@misc{cogprints4145, volume = {117}, editor = {Luc Berthouze and Hideki Kozima and Christopher G. Prince and Giulio Sandini and Georgi Stojanov and Giorgio Metta and Christian Balkenius}, title = {Taking Synchrony Seriously: A Perceptual-Level Model of Infant Synchrony Detection}, author = {Christopher G. Prince and George J. Hollich and Nathan A. Helder and Eric J. Mislivec and Anoop Reddy and Sampanna Salunke and Naveed Memon}, publisher = {Lund University Cognitive Studies}, year = {2004}, pages = {89--96}, keywords = {audio-visual synchrony, gaussian mutual information, infant looking time, computational model}, url = {http://cogprints.org/4145/}, abstract = {Synchrony detection between different sensory and/or motor channels appears critically important for young infant learning and cognitive development. For example, empirical studies demonstrate that audio-visual synchrony aids in language acquisition. In this paper we compare these infant studies with a model of synchrony detection based on the Hershey and Movellan (2000) algorithm augmented with methods for quantitative synchrony estimation. Four infant-model comparisons are presented, using audio-visual stimuli of increasing complexity. While infants and the model showed learning or discrimination with each type of stimuli used, the model was most successful with stimuli comprised of one audio and one visual source, and also with two audio sources and a dynamic-face visual motion source. More difficult for the model were stimuli conditions with two motion sources, and more abstract visual dynamics{--}an oscilloscope instead of a face. Future research should model the developmental pathway of synchrony detection. Normal audio-visual synchrony detection in infants may be experience-dependent (e.g., Bergeson, et al., 2004).} }