@misc{cogprints4150, volume = {117}, editor = {Luc Berthouze and Hideki Kozima and Christopher G. Prince and Giulio Sandini and Georgi Stojanov and Giorgio Metta and Christian Balkenius}, title = {Binding tactile and visual sensations via unique association by cross-anchoring between double-touching and self-occlusion}, author = {Yuichiro Yoshikawa and Koh Hosoda and Minoru Asada}, publisher = {Lund University Cognitive Studies}, year = {2004}, pages = {135--138}, keywords = {multimodal binding, self-body observation, double-touching, self-occlusion, cross-anchoring Hebbian learning, }, url = {http://cogprints.org/4150/}, abstract = {Binding is one of the most fundamental cognitive functions, how to find the correspondence of sensations between different modalities such as vision and touch. Without a priori knowledge on this correspondence, binding is regarded to be a formidable issue for a robot since it often perceives multiple physical phenomena in its different modal sensors, therefore it should correctly match the foci of attention in different modalities that may have multiple correspondences each other. We suppose that learning the multimodal representation of the body should be the first step toward binding since the morphological constraints in self-body-observation would make the binding problem tractable. The multimodal sensations are expected to be constrained in perceiving own body so as to configurate the unique parts of the multiple correspondence reflecting its morphology. In this paper, we propose a method to match the foci of attention in vision and touch through the unique association by cross-anchoring different modalities. Simple experiments show the validity of the proposed method.} }