<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2019.00344</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Auditory and Somatosensory Interaction in Speech Perception in Children and Adults</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Trudeau-Fisette</surname> <given-names>Pam&#x000E9;la</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/706727/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ito</surname> <given-names>Takayuki</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/112797/overview"/>
</contrib> 
<contrib contrib-type="author">
<name><surname>M&#x000E9;nard</surname> <given-names>Lucie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/542794/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Laboratoire de Phon&#x000E9;tique, Universit&#x000E9; du Qu&#x000E9;bec &#x000E0; Montr&#x000E9;al</institution>, <addr-line>Montreal, QC</addr-line>, <country>Canada</country></aff>
<aff id="aff2"><sup>2</sup><institution>Centre for Research on Brain, Language and Music</institution>, <addr-line>Montreal, QC</addr-line>, <country>Canada</country></aff>
<aff id="aff3"><sup>3</sup><institution>GIPSA-Lab, CNRS, Grenoble INP, Universit&#x000E9; Grenoble Alpes</institution>, <addr-line>Grenoble</addr-line>, <country>France</country></aff>
<aff id="aff4"><sup>4</sup><institution>Haskins Laboratories, Yale University</institution>, <addr-line>New Haven, CT</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Carmen Moret-Tatay, Catholic University of Valencia San Vicente M&#x000E1;rtir, Spain</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Camila Rosa De Oliveira, Faculdade Meridional (IMED), Brazil; Nuria Senent-Capuz, Catholic University of Valencia San Vicente M&#x000E1;rtir, Spain</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Pam&#x000E9;la Trudeau-Fisette <email>ptrudeaufisette&#x00040;gmail.com</email></corresp>
<fn fn-type="other" id="fn002"><p><bold>Specialty section:</bold> This article was submitted to Speech and Language, a section of the journal Frontiers in Human Neuroscience</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>04</day>
<month>10</month>
<year>2019</year>
</pub-date>
<pub-date pub-type="collection">
<year>2019</year>
</pub-date>
<volume>13</volume>
<elocation-id>344</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>03</month>
<year>2019</year>
</date>
<date date-type="accepted">
<day>18</day>
<month>09</month>
<year>2019</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2019 Trudeau-Fisette, Ito and M&#x000E9;nard.</copyright-statement>
<copyright-year>2019</copyright-year>
<copyright-holder>Trudeau-Fisette, Ito and M&#x000E9;nard</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract><p>Multisensory integration (MSI) allows us to link sensory cues from multiple sources and plays a crucial role in speech development. However, it is not clear whether humans have an innate ability or whether repeated sensory input while the brain is maturing leads to efficient integration of sensory information in speech. We investigated the integration of auditory and somatosensory information in speech processing in a bimodal perceptual task in 15 young adults (age 19&#x02013;30) and 14 children (age 5&#x02013;6). The participants were asked to identify if the perceived target was the sound /e/ or /&#x000F8;/. Half of the stimuli were presented under a unimodal condition with only auditory input. The other stimuli were presented under a bimodal condition with both auditory input and somatosensory input consisting of facial skin stretches provided by a robotic device, which mimics the articulation of the vowel /e/. The results indicate that the effect of somatosensory information on sound categorization was larger in adults than in children. This suggests that integration of auditory and somatosensory information evolves throughout the course of development.</p></abstract>
<kwd-group>
<kwd>multisensory integration</kwd>
<kwd>speech perception</kwd>
<kwd>auditory and somatosensory feedback</kwd>
<kwd>adults</kwd>
<kwd>children</kwd>
<kwd>categorization</kwd>
<kwd>maturation</kwd>
</kwd-group>
<contract-sponsor id="cn001">Social Sciences and Humanities Research Council of Canada<named-content content-type="fundref-id">10.13039/501100000155</named-content></contract-sponsor>
<contract-sponsor id="cn002">Natural Sciences and Engineering Research Council of Canada<named-content content-type="fundref-id">10.13039/501100000038</named-content></contract-sponsor>
<counts>
<fig-count count="3"/>
<table-count count="1"/>
<equation-count count="12"/>
<ref-count count="94"/>
<page-count count="11"/>
<word-count count="8972"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="introduction" id="s1">
<title>Introduction</title>
<p>From our first day of life, we are confronted with multiple sensory inputs such as tastes, smells, and touches. Unconsciously, related inputs are combined into a single input with rich information. Multisensory integration (MSI), also called multimodal integration, is the ability of the brain to assimilate cues from multiple sensory modalities that allows us to benefit from the information from each sense to reduce perceptual ambiguity and ultimately reinforce our perception of the world (Stein and Meredith, <xref ref-type="bibr" rid="B100">1993</xref>; Stein et al., <xref ref-type="bibr" rid="B101">1996</xref>; Robert-Ribes et al., <xref ref-type="bibr" rid="B91">1998</xref>; Molholm et al., <xref ref-type="bibr" rid="B75">2002</xref>). MSI holds a prominent place in the way that information is processed, by shaping how inputs are perceived. This merging of various sensory inputs into common neurons was typically assumed to occur late in the perceptual process stream (Massaro, <xref ref-type="bibr" rid="B633">1999</xref>), but recent studies in neurophysiology have even demonstrated that MSI can occur in the early stages of cortical processing, even in brain regions typically associated with lower-level processing of uni-sensory inputs (Macaluso et al., <xref ref-type="bibr" rid="B59">2000</xref>; Foxe et al., <xref ref-type="bibr" rid="B29">2002</xref>; Molholm et al., <xref ref-type="bibr" rid="B75">2002</xref>; Mishra et al., <xref ref-type="bibr" rid="B73">2007</xref>; Raij et al., <xref ref-type="bibr" rid="B89">2010</xref>; Mercier et al., <xref ref-type="bibr" rid="B71">2013</xref>).</p>
<p>While some researchers have suggested that an infant&#x02019;s brain is likely equipped with multisensorial functionality at birth (Bower et al., <xref ref-type="bibr" rid="B14">1970</xref>; Streri and Gentaz, <xref ref-type="bibr" rid="B105">2004</xref>), others have suggested that MSI likely develops over time as a result of experiences (Birch and Lefford, <xref ref-type="bibr" rid="B11">1963</xref>; Yu et al., <xref ref-type="bibr" rid="B121">2010</xref>; Burr and Gori, <xref ref-type="bibr" rid="B16">2011</xref>). Several studies support the latter hypothesis. For example, studies have demonstrated that distinct sensory systems develop at different rates and in different ways, which suggests that several mechanisms are implicated in MSI depending on the type of interactions (Walker-Andrews, <xref ref-type="bibr" rid="B118">1994</xref>; Gori et al., <xref ref-type="bibr" rid="B34">2008</xref>; Burr and Gori, <xref ref-type="bibr" rid="B16">2011</xref>; Dionne-Dostie et al., <xref ref-type="bibr" rid="B23">2015</xref>). For example, researchers have reported that eye-hand coordination, a form of somatovisual interaction, can be observed in infants as young as a week old (Bower et al., <xref ref-type="bibr" rid="B14">1970</xref>), and audiovisual association of phonetic information emerges around 2 months of age (Kuhl and Meltzoff, <xref ref-type="bibr" rid="B55">1982</xref>; Patterson and Werker, <xref ref-type="bibr" rid="B83">2003</xref>), but audiovisual integration in spatial localization behavior does not appear before 8 months of age (Neil et al., <xref ref-type="bibr" rid="B80">2006</xref>).</p>
<p>Ultimately, although it is still unclear whether an innate system enables MSI in humans, data from infants, children, and adults suggest that unimodal and multimodal sensory experiences and brain maturation enables the establishment of efficient integration processing (Rentschler et al., <xref ref-type="bibr" rid="B90">2004</xref>; Krakauer et al., <xref ref-type="bibr" rid="B52">2006</xref>; Neil et al., <xref ref-type="bibr" rid="B80">2006</xref>; Gori et al., <xref ref-type="bibr" rid="B34">2008</xref>; Nardini et al., <xref ref-type="bibr" rid="B78">2008</xref>; Hillock et al., <xref ref-type="bibr" rid="B41">2011</xref>; Stein et al., <xref ref-type="bibr" rid="B102">2014</xref>) and that multisensory tasks in school-aged and younger children are executed through unimodal dominance rather than integration abilities (McGurk and Power, <xref ref-type="bibr" rid="B67">1980</xref>; Hatwell, <xref ref-type="bibr" rid="B38">1987</xref>; Misceo et al., <xref ref-type="bibr" rid="B72">1999</xref>; Burr and Gori, <xref ref-type="bibr" rid="B16">2011</xref>). Moreover, according to the intersensory redundancy hypothesis, perception of multimodal information is only facilitated when information from various sources is redundant, and not when the information is conflicting (Bahrick and Lickliter, <xref ref-type="bibr" rid="B4">2000</xref>, <xref ref-type="bibr" rid="B5">2012</xref>).</p>
<p>Multimodal integration is crucial for speech development. According to the associative view, during infancy, the acoustic features of produced and perceived speech are associated with felt and seen articulatory movements required for their production (Kuhl and Meltzoff, <xref ref-type="bibr" rid="B55">1982</xref>; Patterson and Werker, <xref ref-type="bibr" rid="B83">2003</xref>; Pons et al., <xref ref-type="bibr" rid="B86">2009</xref>; Yeung and Werker, <xref ref-type="bibr" rid="B120">2013</xref>). Once acoustic information and proprioceptive feedback information are strongly linked together, this becomes part of an internal multimodal speech model (Guenther and Perkell, <xref ref-type="bibr" rid="B35">2004</xref>; Tourville and Guenther, <xref ref-type="bibr" rid="B108">2011</xref>; Guenther and Vladusich, <xref ref-type="bibr" rid="B36">2012</xref>).</p>
<p>MSI can sometimes be overlooked in speech perception since speakers frequently have one dominant sensory modality (Hecht and Reiner, <xref ref-type="bibr" rid="B39">2009</xref>; Lametti et al., <xref ref-type="bibr" rid="B57">2012</xref>). However, even though audition is the dominant type of sensory information in speech perception, many researchers have suggested that other sensory modalities also play a role in speech processing (Perrier, <xref ref-type="bibr" rid="B85">1995</xref>; Tremblay et al., <xref ref-type="bibr" rid="B109">2003</xref>; Skipper et al., <xref ref-type="bibr" rid="B97">2007</xref>; Ito et al., <xref ref-type="bibr" rid="B48">2009</xref>; Lametti et al., <xref ref-type="bibr" rid="B57">2012</xref>). The McGurk effect, a classic perceptual illusion resulting from incongruent simultaneous auditory and visual cues about consonants clearly demonstrates that information from multiple sensory channels is unconsciously integrated during speech processing (McGurk and MacDonald, <xref ref-type="bibr" rid="B66">1976</xref>).</p>
<p>In the current study, we examined the integration of auditory and somatosensory interaction in speech perception. Previous research has suggested that to better understand how different types of sensory feedback interact in speech perception, we need to better understand how and when this becomes mature.</p>
<p>Hearing is one of the first sensory modalities to emerge in humans. While still <italic>in utero</italic>, babies can differentiate speech from non-speech and distinguish variability in speech length and intensity (for a review on auditory perception in the fetus, see Lecanuet et al., <xref ref-type="bibr" rid="B58">1995</xref>). After birth, babies are very soon responsive to various rhythmic and intonation sounds (Demany et al., <xref ref-type="bibr" rid="B19">1977</xref>) and can distinguish phonemic features such as voicing, manner, and place of articulation (Eimas et al., <xref ref-type="bibr" rid="B25">1971</xref>). Specific perceptual aspects of one&#x02019;s first language, such as sensitivity to phonemes and phonotactic properties, are refined by the first year of life (Kuhl, <xref ref-type="bibr" rid="B54">1991</xref>). Although auditory abilities become well established in the early years of life, anatomical changes and experiences will guide the development of auditory skills throughout childhood (Arabin, <xref ref-type="bibr" rid="B2">2002</xref>; Turgeon, <xref ref-type="bibr" rid="B111">2011</xref>).</p>
<p>Little is known about the development of oral somatosensory abilities in typically developing children. Yet, some authors have worked on the development of oral stereognosis in children and adults, where stereognosis is the ability to perceive and recognize the form of an object in the absence of visual and auditory information, by using tactile information. In oral stereognosis, the form of an object is recognized by exploring tactile information such as texture, size or spatial properties, in the oral cavity. This is usually evaluated by comparing the ability of children and adults to differentiate or identify small plastic objects in their mouths. Researchers have reported that oral sensory discrimination skills depend on age (McDonald and Aungst, <xref ref-type="bibr" rid="B64">1967</xref>; Dette and Linke, <xref ref-type="bibr" rid="B22">1982</xref>; Gisel and Schwob, <xref ref-type="bibr" rid="B32">1988</xref>). McDonald and Aungst (<xref ref-type="bibr" rid="B64">1967</xref>) showed that 6- to 8-year-old children correctly matched half of the presented forms; 17- to 31-year-old adolescents and adults had perfect scores; and scores declined significantly with age among the 52- to 89-year-olds. Dette and Linke (<xref ref-type="bibr" rid="B22">1982</xref>) found similar results in 3- to 17-year-olds. The effect of age was also found in younger vs. older children. Kumin et al. (<xref ref-type="bibr" rid="B56">1984</xref>) showed that among 4- to 11-year-olds, the older children had significantly better oral stereognosis scores than younger children. Gisel and Schwob (<xref ref-type="bibr" rid="B32">1988</xref>) reported that 7- and 8-year-old children had better identification skills in an oral stereognosis experiment than 5- and 6-year-old children. Interestingly, only the 8-year-old children showed a learning effect, in that they got better scores as the experiment progressed.</p>
<p>To explain this age-related improvement in oral stereognosis, it was suggested that oral stereognosis maturity is achieved when the growth of the oral and facial structures is complete (McDonald and Aungst, <xref ref-type="bibr" rid="B64">1967</xref>; Gisel and Schwob, <xref ref-type="bibr" rid="B32">1988</xref>). This explanation is consistent with vocal tract growth data that shows that while major changes occur in the first 3 years of life (Vorperian et al., <xref ref-type="bibr" rid="B116">1999</xref>), important growth of the pharyngeal region is observed between puberty and adulthood (Fitch and Giedd, <xref ref-type="bibr" rid="B27">1999</xref>) and multidimensional maturity of the vocal tract is not reached until adulthood (Bo&#x000EB; et al., <xref ref-type="bibr" rid="B12">2007</xref>, <xref ref-type="bibr" rid="B13">2008</xref>).</p>
<p>A few recent studies have suggested that there is a link between auditory and somatosensory information in multimodal integration.</p>
<p>Lametti et al. (<xref ref-type="bibr" rid="B57">2012</xref>) proposed that sensory preferences in the specification of speech motor goals could mediate responses to real-time manipulations, which would explain the important variability in compensatory behavior to an auditory manipulation (Purcell and Munhall, <xref ref-type="bibr" rid="B88">2006</xref>; Villacorta et al., <xref ref-type="bibr" rid="B114">2007</xref>; MacDonald et al., <xref ref-type="bibr" rid="B60">2010</xref>). They point out that one&#x02019;s own auditory feedback is not the only reliable source of speech monitoring and, in line with the internal speech model theory, that somatosensory feedback would also be considered in speech motor control. In agreement with this concept, Katseff et al. (<xref ref-type="bibr" rid="B51">2012</xref>) suggested that partial compensation in auditory manipulation of real-time speech could be because both auditory and somatosensory feedback system monitor speech motor control and therefore, the two systems are competing when large sensory manipulation affects only one of the sensory channels.</p>
<p>A recent study of speech auditory feedback perturbations in blind and sighted speakers supports the latter explanation. It showed that typically developing adults, whose somatosensory goals are narrowed by vision were more likely to tolerate large discrepancies between the expected and produced auditory outcome, whereas blind speakers, whose auditory goals had primacy over somatosensory ones, tolerated larger discrepancies between their expected and produced somatosensory feedback. In this sense, blind speakers were more inclined to adopt unusual articulatory positions to minimize divergences of their auditory goals (Trudeau-Fisette et al., <xref ref-type="bibr" rid="B110">2017</xref>).</p>
<p>Researchers have also suggested that acoustic and somatosensory cues are integrated. As far as we know, Von Schiller (cited in Krueger, <xref ref-type="bibr" rid="B53">1970</xref>; Jousm&#x000E4;ki and Hari, <xref ref-type="bibr" rid="B50">1998</xref>) was the first one to report that sound could modulate touch. Indeed, although he was mainly focused on the interaction between auditory and visual cues, he showed in his 1932s article that auditory stimuli, such as tones and noise bursts, could influence an object&#x02019;s physical perception. Since then, studies have shown how manipulations of acoustic frequencies or even changes in their prevalence can influence the tactile perception of objects, events, and skin deformation such as their perceived smoothness, occurrence, or magnitude (Krueger, <xref ref-type="bibr" rid="B53">1970</xref>; Jousm&#x000E4;ki and Hari, <xref ref-type="bibr" rid="B50">1998</xref>; Guest et al., <xref ref-type="bibr" rid="B37">2002</xref>; H&#x000F6;tting and R&#x000F6;der, <xref ref-type="bibr" rid="B44">2004</xref>; Ito and Ostry, <xref ref-type="bibr" rid="B46">2010</xref>). Multimodal integration was stronger when both perceptual sources were presented simultaneously (Jousm&#x000E4;ki and Hari, <xref ref-type="bibr" rid="B50">1998</xref>; Guest et al., <xref ref-type="bibr" rid="B37">2002</xref>).</p>
<p>This interaction between auditory and tactile channels is also found in the opposite direction, in that somatosensory inputs can influence the perception of sounds. For example, Sch&#x000FC;rmann et al. (<xref ref-type="bibr" rid="B94">2004</xref>) showed that vibrotactile cues can influence the perception of sound loudness. Later, Gick and Derrick (<xref ref-type="bibr" rid="B31">2009</xref>) demonstrated that aerotactile inputs could modulate the perception of a consonant&#x02019;s oral property.</p>
<p>Somatosensory information coming from orofacial areas is somewhat different from those typically intended. Kinesthetic feedback usually refers to information retrieved from position, movement, and receptors in muscles and articulators (Proske and Gandevia, <xref ref-type="bibr" rid="B87">2009</xref>). However, some of the orofacial regions involved in speech production movement are devoid of muscle proprioceptors. Therefore, the somatosensory information guiding our perception and production abilities likely also come from cutaneous mechanoreceptors (Johansson et al., <xref ref-type="bibr" rid="B49">1988</xref>; Ito and Gomi, <xref ref-type="bibr" rid="B45">2007</xref>; Ito and Ostry, <xref ref-type="bibr" rid="B46">2010</xref>).</p>
<p>Although many studies have reported on the role of somatosensory information derived from orofacial movement in speech production (Tremblay et al., <xref ref-type="bibr" rid="B109">2003</xref>; Nasir and Ostry, <xref ref-type="bibr" rid="B79">2006</xref>; Ito and Ostry, <xref ref-type="bibr" rid="B46">2010</xref>; Feng et al., <xref ref-type="bibr" rid="B26">2011</xref>; Lametti et al., <xref ref-type="bibr" rid="B57">2012</xref>), few studies have reported its role in speech perception.</p>
<p>Researchers recently investigated the contribution of somatosensory information on speech perception mechanisms. Ito et al. (<xref ref-type="bibr" rid="B48">2009</xref>) designed a bimodal perceptual task experiment where they asked participants to identify if the perceived target was the word &#x0201C;head&#x0201D; or &#x0201C;had.&#x0201D; When the acoustic targets (all members of the &#x0201C;head/had&#x0201D; continuum) were perceived simultaneously to a skin manipulation recalling the oral articulatory gestures implicated in the production of the vowel /&#x003F5;/, the identification rate of the target &#x0201C;head&#x0201D; was significantly improved. The researchers also tested different directions of the orofacial muscle manipulation and established that the observed effect was only found if the physical manipulation reflected a movement required in speech production (Ito et al., <xref ref-type="bibr" rid="B48">2009</xref>).</p>
<p>Somatosensory information appears to even be involved in the processing of higher-level perceptual concepts (Ogane et al., <xref ref-type="bibr" rid="B81">2017</xref>). In a similar perceptual task, participants were asked to identify if the perceived acoustic target was &#x0201C;l&#x02019;affiche&#x0201D; (the poster) or &#x0201C;la fiche&#x0201D; (the form). The authors showed that the appropriate temporal positions of somatosensory skin manipulation in the stimulus word, simulating somatosensory inputs concerning the hyperarticulation of either the vowel /a/ or the vowel /i/, could affect the categorization of the lexical target.</p>
<p>Although further study would reinforce these findings, these experiments highlight the fact that the perception of linguistic inputs can be influenced by the manipulation of cutaneous receptors involved in speech motion (Ito et al., <xref ref-type="bibr" rid="B48">2009</xref>, <xref ref-type="bibr" rid="B47">2014</xref>; Ito and Ostry, <xref ref-type="bibr" rid="B46">2010</xref>), and furthermore, attest of a strong link between auditory and somatosensory channels within the multimodal aspect of speech perception in adults.</p>
<p>The fact that sounds discrimination if facilitated when included in the infants&#x02019; babbling register (Vihman, <xref ref-type="bibr" rid="B113">1996</xref>) is surely part of the growing body of evidence that demonstrates how somatosensory information that is derived from speech movement also influences speech perception in young speakers (DePaolis et al., <xref ref-type="bibr" rid="B20">2011</xref>; Bruderer et al., <xref ref-type="bibr" rid="B15">2015</xref>; Werker, <xref ref-type="bibr" rid="B119">2018</xref>). However, to our knowledge, only two studies have investigated how somatosensory feedback is involved in speech perception abilities in children (Yeung and Werker, <xref ref-type="bibr" rid="B120">2013</xref>; Bruderer et al., <xref ref-type="bibr" rid="B15">2015</xref>). In both studies, the researchers manipulated oral somatosensory feedback by constraining tongue or lip movement, thus forcing the adoption of a precise articulatory position. Although MSI continues to evolve until late childhood (Ross et al., <xref ref-type="bibr" rid="B92">2011</xref>), these two experiments in toddlers shed light on how this phenomenon emerges.</p>
<p>In their 2013 article, Yeung and Werker (<xref ref-type="bibr" rid="B120">2013</xref>) reported that when 4- and 5-month-old infants were confronted with incongruent auditory and labial somatosensory cues, they were more likely to fix the visual demonstration corresponding to the vowel perceived through the auditory channel. In contrast, congruent auditory and somatosensory cues did not call for the need to add a corresponding visual representation of the perceived vowel.</p>
<p>Also using a looking-time procedure, Bruderer et al. (<xref ref-type="bibr" rid="B15">2015</xref>) focused on the role of language experience on the integration of somatosensory information. They found that the ability of 6-month-old infants to discriminate between the non-native dental /<graphic xlink:href="fnhum-13-00344-g0004.tif"/>/ and the retroflex /&#x00256;/ Hindi consonant was influenced by the insertion of a teething toy. When the toddlers&#x02019; tongue movements were restrained, they showed no evidence of phonetic contrast discrimination of tongue tip position. As shown by Ito et al. (<xref ref-type="bibr" rid="B48">2009</xref>), the effect of somatosensory cues was only observed if the perturbed articulator would have been involved in the production of the sound that was heard.</p>
<p>While these two studies mainly focused on perceptual discrimination rather than categorical representation of speech, they suggest that proprioceptive information resulting from static articulatory perturbation plays an important role in speech perception mechanisms in toddlers and that the phenomenon of multimodal integration in the perception-production speech model starts early in life. The authors suggested that, even at a very young age, babies can recognize that information can come from multiple sources and they react differently when the sensory sources are compatible. However, it is still unknown when children begin to integrate various sensory sources to treat them as a single sensory source.</p>
<p>In the current study, we aimed to investigate how dynamic somatosensory information from orofacial cutaneous receptors is integrated in speech processing in children compared to adults. Based on previous research, we hypothesized that: (1) when somatosensory inputs are presented simultaneously with auditory inputs, this affects their phonemic categorization; (2) auditory and somatosensory integration is stronger in adults than in children; and (3) MSI is facilitated when both types of sensory feedback are consistent.</p>
</sec>
<sec sec-type="materials and methods" id="s2">
<title>Materials and Methods</title>
<sec id="s2-1">
<title>Participants</title>
<p>We recruited 15 young adults (aged 19&#x02013;30), including eight females. We also recruited 21 children (aged 4&#x02013;6) and after excluding seven children due to equipment malfunction (1), non-completion (2), or inability to understand the task (4), this left 14 children (aged 5&#x02013;6) including 10 females, for the data analysis. Five- to six-year-old is a particularly interesting age window since children master all phonemes of their native language. However, they have not yet entered the fluent reading stage, during which explicit teaching of reading has been shown to alter multimodal perceptual (Horlyck et al., <xref ref-type="bibr" rid="B43">2012</xref>).</p>
<p>All participants were native speakers of Canadian French and were tested for pure-tone detection threshold using an adaptive method (DT &#x0003C; 25 dB HL at 250, 500, 1,000, 2,000, 4,000 and 8,000 Hz). None of the participants reported having speech or language impairments. The research protocol was approved by the Universit&#x000E9; du Qu&#x000E9;bec &#x000E0; Montr&#x000E9;al&#x02019;s Institutional Review Board (no 2015-05-4.2) and all participants (or the children&#x02019;s parents) gave written informed consent. The number of participants was limited due to the age of the children and the length of the task (3 different tasks were executed on the same day).</p>
</sec>
<sec id="s2-2">
<title>Experimental Procedure</title>
<p>As in the task used by Ito et al. (<xref ref-type="bibr" rid="B48">2009</xref>), the participants were asked to identify the vowel they perceived and were asked to choose between /e/ and /&#x000F8;/. Based on M&#x000E9;nard and Boe (<xref ref-type="bibr" rid="B68">2004</xref>), the auditory stimulus consisted of 10 members of a synthesized /e&#x02013;&#x000F8;/ continuum generated using the Maeda model (see <xref ref-type="table" rid="T1">Table 1</xref>). This continuum was created such that the first four formants were equally distributed from those corresponding to the natural endpoint tokens of /e/ and /&#x000F8;/. To ensure that the children understood the difference between the two vocalic choices, the vowel /e/ was represented by an image of a fairy (/e/ as in f&#x000E9;e) and the vowel /&#x000F8;/ was represented by an image of a fire (/&#x000F8;/ as in feu). Since, we wanted to minimize large head movements during the experiment, the children were asked to point out the image corresponding to their answers. Both images were placed in front of them at shoulder level, three feet away from each other on the horizontal plane. The adults were able to use the keyboard without looking at it and they used the right and left arrows to indicate their responses.</p>
<table-wrap id="T1" position="float">
<label>Table 1</label>
<caption><p>Formant and bandwidth values of the synthesized stimuli used in the perceptual task.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" colspan="5">Formant values</th>
<th align="center" colspan="5">Bandwidths values</th>
</tr>
<tr>
<th/>
<th align="center">F1</th>
<th align="center">F2</th>
<th align="center">F3</th>
<th align="center">F4</th>
<th align="center">F5</th>
<th align="center">B1</th>
<th align="center">B2</th>
<th align="center">B3</th>
<th align="center">B4</th>
<th align="center">B5</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Auditory stimuli</td>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td align="left">1</td>
<td align="center">364</td>
<td align="center">1,922</td>
<td align="center">2,509</td>
<td align="center">3,550</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">2</td>
<td align="center">364</td>
<td align="center">1,892</td>
<td align="center">2,469</td>
<td align="center">3,500</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">3</td>
<td align="center">364</td>
<td align="center">1,862</td>
<td align="center">2,429</td>
<td align="center">3,450</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">4</td>
<td align="center">364</td>
<td align="center">1,832</td>
<td align="center">2,389</td>
<td align="center">3,400</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">5</td>
<td align="center">364</td>
<td align="center">1,802</td>
<td align="center">2,349</td>
<td align="center">3,350</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">6</td>
<td align="center">364</td>
<td align="center">1,772</td>
<td align="center">2,309</td>
<td align="center">3,300</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">7</td>
<td align="center">364</td>
<td align="center">1,742</td>
<td align="center">2,269</td>
<td align="center">3,250</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">8</td>
<td align="center">364</td>
<td align="center">1,712</td>
<td align="center">2,229</td>
<td align="center">3,200</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">9</td>
<td align="center">364</td>
<td align="center">1,682</td>
<td align="center">2,189</td>
<td align="center">3,150</td>
<td align="center">4,000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
<tr>
<td align="left">10</td>
<td align="center">364</td>
<td align="center">1,652</td>
<td align="center">2,149</td>
<td align="center">3,100</td>
<td align="center">4, 000</td>
<td align="center">48</td>
<td align="center">55</td>
<td align="center">60</td>
<td align="center">50</td>
<td align="center">100</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> shows the experimental set-up for the facial skin stretch perturbations. The participants were seated with their backs to a Phantom 1.0 device (SensAble Technologies) and they wore headphones (Sennheiser HD 380 pro). This small unit, composed of a robotic arm to which a wire is attached, allows for minor lateral skin manipulation at the side of the mouth, where small plastic tabs (2 mm &#x000D7; 3 mm), located on the ends of the wire, were placed with double-sided tape. The robotic arm was programed to ensure that when a four Newton flexion force was administered it led to a 10- to 15-mm lateral skin stretch.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Experimental set up for facial skin stretch perturbations (reproduced with permission from Ito and Ostry, <xref ref-type="bibr" rid="B46">2010</xref>).</p></caption>
<graphic xlink:href="fnhum-13-00344-g0001.tif"/>
</fig>
<p>When this facial skin stretch is applied at lateral to the oral angle in the backward direction as shown in the figure, it mimics the articulation associated with the production of the unrounded vowel /e/. Therefore, auditory and somatosensory feedback was either congruent (with /e/-like auditory inputs) or incongruent (with /&#x000F8;/-like auditory inputs). As stated early, cutaneous receptors found in the within the labial area provides speech related kinesthetic information (Ito and Gomi, <xref ref-type="bibr" rid="B45">2007</xref>). Since the skin manipulation was programed to be perceived at the same time as the auditory stimuli, it was possible to investigate the contribution of the somatosensory system to the perceptual processing of the speech targets.</p>
<p>The auditory stimuli were presented in 20 blocks of 10 trials each. Within each block, all members of the 10-step continuum were presented in a random order. For half of the trials, only the auditory stimulus was presented (unimodal condition). For the other half of the trials, a facial skin manipulation was also applied (bimodal condition). Alternate blocks of unimodal and bimodal conditions were presented to the participants. In total, 200 perceptual judgments were collected, 100 in the auditory-only condition and 100 in the combined auditory and skin-stretch condition.</p>
</sec>
<sec id="s2-3">
<title>Data Analysis</title>
<p>For each participant, stimulus, and condition, we calculated the percentage of /e/ responses. The experiment was closely monitored, and the responses in trials where a short pause was requested by the participant were excluded from the analysis. In doing so, we sought to eliminate categorical judgments for which the participants were no longer in a position to properly respond to the task (fewer than 1.1% and 0.2% of all responses were excluded for children and adults, respectively). These perceptual scores were then fitted onto a logistic regression model (Probit model) to obtain psychometric functions from which the labeling slopes and 50% crossover boundaries were computed. The value of the slope corresponds to the sharpness of the categorization (the lower the value, the more distinct the categorization), while the boundary value indicates the location of the categorical boundary between the two vowel targets (the higher the value, the more toward /&#x000F8;/ the frontier). Using the lme4 package in R, we carried out a linear mixed-effects model (Baayen et al., <xref ref-type="bibr" rid="B3">2008</xref>) for both the steepness of the slopes and the category boundaries in which group (adult or children) and condition (unimodal or bimodal) were specified as fixed factors and individual participant was defined as a random factor.</p>
<p>Each given answer (5,800 perceptual judgments collected from 29 participants) was fitted into a linear mixed-effects model where fixed factors included stimuli (the 10-step continuum), group (adult or children), and condition (unimodal or bimodal), and the random factor was the individual participant. The mean categorization of the first and last two stimuli was also compared. Once again, the averages of the given answers (116 mean perceptual judgments collected from 29 participants) were fitted into a linear mixed-effects model where the fixed variables included stimuli (head stimuli or tail stimuli), group (adult or children), and condition (unimodal or bimodal) and where the random variable was the individual participant. Finally, independent <italic>t</italic>-tests were carried out in order to compare variability in responses between both experimental groups and conditions. In both cases, Kolmogorov&#x02013;Smirnov tests indicated that categorizations followed a normal distribution.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<p>The overall percentage of /e/ responses for each stimulus is shown in <xref ref-type="fig" rid="F2">Figure 2</xref>. The data were averaged across speakers, within both groups. <xref ref-type="fig" rid="F3">Figure 3</xref> displays the values for the labeling slope (distinctiveness of the vowels&#x02019; categorization) and 50% crossover boundary (location of the categorical frontier) averaged across experimental conditions and groups. As can be seen in both figures, regardless of the experimental condition, the children had greater variations in overall responses compared to the adults, which was confirmed in an independent <italic>t</italic>-test (<italic>t</italic><sub>(38)</sub> = 2.792, <italic>p</italic> &#x0003C; 0.01).</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Percent identification of the vowel [e] for stimuli on the [e&#x02013;&#x000F8;] continuum, in both experimental conditions, for both groups. Error bars indicate standard errors.</p></caption>
<graphic xlink:href="fnhum-13-00344-g0002.tif"/>
</fig>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Psychometric functions of labeling slope and 50% crossover boundary, in both experimental conditions, for both groups. Error bars indicate standard errors. *<italic>p</italic> &#x0003C; 0.05; **<italic>p</italic> &#x0003C; 0.01; ***<italic>p</italic> &#x0003C; 0.001.</p></caption>
<graphic xlink:href="fnhum-13-00344-g0003.tif"/>
</fig>
<sec id="s3-1">
<title>Psychometric Functions</title>
<sec id="s3-1-1">
<title>Labeling Slope Results</title>
<p>The linear mixed-effects model revealed a significant main effect of group on the steepness of the slope (<inline-formula><mml:math id="M1"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 23.549, <italic>p</italic> &#x0003C; 0.001), indicating that there was more categorical perception in adults than in children (see <xref ref-type="fig" rid="F2">Figure 2</xref>, black lines and <xref ref-type="fig" rid="F3">Figure 3</xref>, left-hand part of the graph).</p>
<p>Although no effect of condition as a main effect was observed (<inline-formula><mml:math id="M2"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 3.618, <italic>p</italic> &#x0003E; 0.05), a significant interaction between group and condition was found (<inline-formula><mml:math id="M3"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 4.956, <italic>p</italic> &#x0003C; 0.05). <italic>Post hoc</italic> analysis revealed that in the bimodal condition the slope of the labeling function was more abrupt for the adults (<italic>z</italic> = &#x02212;3.153, <italic>p</italic> &#x0003C; 0.01) but not for the children, suggesting that the skin stretch condition led to a more categorical identification of the stimuli in adults only.</p>
</sec>
<sec id="s3-1-2">
<title>The 50% Crossover Boundary Results</title>
<p>A linear mixed-effects model analysis carried out on the 50% crossover boundaries revealed a single main effect of condition (<inline-formula><mml:math id="M4"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 9.245, <italic>p</italic> &#x0003C; 0.01). For both groups, the skin stretch perturbation led to a displacement of the 50% crossover boundary. In the bimodal condition (A+SS), the boundary was located closer to /&#x000F8;/ than in the unimodal condition (A). This result is consistent with the expected effect of the skin stretch perturbation; more stimuli were perceived as /e/ than /&#x000F8;/. No effect of group, as a main effect or with condition was found. The results are presented in <xref ref-type="fig" rid="F2">Figure 2</xref> and in <xref ref-type="fig" rid="F3">Figure 3</xref>, in the right-hand part of the graphs.</p>
</sec>
</sec>
<sec id="s3-2">
<title>Categorical Judgments</title>
<p>A linear mixed-effects model analysis performed on the categorical judgments revealed that in addition to the expected main effect of stimuli (<inline-formula><mml:math id="M5"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 3652.4, <italic>p</italic> &#x0003C; 0.001), there were significant effects of group (<inline-formula><mml:math id="M6"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 4.586, <italic>p</italic> &#x0003C; 0.05) and condition (<inline-formula><mml:math id="M7"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 15.736, <italic>p</italic> &#x0003C; 0.001), suggesting that children and adults did not categorize the stimuli in a similar manner and that both experimental conditions prompted different categorization. Moreover, a significant interaction of group and stimuli (<inline-formula><mml:math id="M8"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 144.52, <italic>p</italic> &#x0003C; 0.001) revealed that irrespectively of the experimental condition, some auditory stimuli were categorized differently by the two groups.</p>
<p><italic>Post hoc</italic> tests revealed that whether a skin stretch manipulation was applied or not, stimulus 7 (A <italic>z</italic> = &#x02212;3.795, <italic>p</italic> &#x0003C; 0.1 A+SS <italic>z</italic> = &#x02212;4.648, <italic>p</italic> &#x0003C; 0.01), 8 (A <italic>z</italic> = &#x02212;3.445, <italic>p</italic> &#x0003C; 0.5 A+SS <italic>z</italic> = &#x02212;3.544, <italic>p</italic> &#x0003C; 0.1) and 9 (A <italic>z</italic> = &#x02212;3.179, <italic>p</italic> &#x0003C; 0.5 A+SS <italic>z</italic> = &#x02212;4.347, <italic>p</italic> &#x0003C; 0.01) were more systematically identified as /&#x000F8;/ by the adults than by the children. While no other two-way interactions were found, a significant three-way interaction of group, condition, and stimuli was observed (<inline-formula><mml:math id="M9"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>4</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 117.26, <italic>p</italic> &#x0003C; 0.001) suggesting that, for some specific stimuli, the skin stretch condition affected the perceptual judgment of both groups in a different manner.</p>
<p>First, it was found that the skin stretch manipulation had a greater effect on stimulus 6, in children only (<italic>z</italic> = &#x02212;3.251, <italic>p</italic> &#x0003C; 0.5). For this group, the skin stretch condition caused a 15.8% increase of /e/ labeling on stimulus 6. For the adults, the addition of somatosensory cues only led to a 3.3% increase in /e/ categorization.</p>
<p>Although less expected, the skin stretch manipulation also led to some perceptual changes at the endpoint of the auditory continuum. As shown in <xref ref-type="fig" rid="F2">Figure 2</xref>, stimulus 2 (<italic>z</italic> = 3.053, <italic>p</italic> &#x0003C; 0.5) and stimulus10 (<italic>z</italic> = &#x02212;3.734, <italic>p</italic> &#x0003C; 0.1) were labeled differently by the two groups, but only in the bimodal condition. In fact, stimulus 2 (an /e/-like stimulus) was more likely to be identified as an /e/ by the adults in the experimental condition. In contrast, children were less inclined to label it so. As for stimulus 10 (an /&#x000F8;/-like stimulus), the addition of somatosensory inputs decreased the correct identification rate in children only. In adults, although it barely affected their categorical judgments, the skin stretch manipulation mimicking the articulatory gestures of the vowel /e/ resulted in an increase of /&#x000F8;/ labeling, as if it had a reverse effect.</p>
<p>Last, a comparison of mean categorizations of the first and last two stimuli revealed a main effect of stimuli (<inline-formula><mml:math id="M10"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 313.52, <italic>p</italic> &#x0003C; 0.001) and a significant interaction of group and stimuli (<inline-formula><mml:math id="M11"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 36.260, <italic>p</italic> &#x0003C; 0.001). More importantly, it also revealed a 3-way interaction of group, condition, and stimuli (<inline-formula><mml:math id="M12"><mml:msubsup><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mn>4</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = 37.474, <italic>p</italic> &#x0003C; 0.001). <italic>Post hoc</italic> tests indicated that those endpoint stimuli of the continuum were categorized differently by the two groups, but only when a skin stretch manipulation was applied. In agreement with previous results, in the skin stretch condition, children labeled more /e/-like stimuli as /&#x000F8;/ (<italic>z</italic> = 3.434, <italic>p</italic> &#x0003C; 0.5), and more /&#x000F8;/-like stimuli as /e/ (<italic>z</italic> = &#x02212;4.139, <italic>p</italic> &#x0003C; 0.01).</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>This study aimed to investigate how auditory and somatosensory information is integrated in speech processing by school-aged children and adults, by testing three hypotheses.</p>
<p>As hypothesized, the overall perceptual categorization of the auditory stimuli was affected by the addition of somatosensory manipulations. The results for psychometric functions and categorical judgments revealed that auditory stimuli perceived simultaneously with skin stretch manipulations were labeled differently than when they were perceived on their own. Sounds were more perceived as /e/ when they were accompanied by the proprioceptive modification.</p>
<p>The second hypothesis that auditory and somatosensory integration would be greater in adults than in children was also confirmed. As shown in <xref ref-type="fig" rid="F2">Figures 2</xref>, <xref ref-type="fig" rid="F3">3</xref>, orofacial manipulation affected the position of the 50% crossover boundary of both groups; when backward skin stretches were perceived simultaneously with the auditory stimulus, it increased its probability of being identified as an /e/. This impact of skin stretch manipulation on the value corresponding to the 50th percentile was also reported in Ito et al.&#x02019;s (<xref ref-type="bibr" rid="B48">2009</xref>) experiment. However, bimodal presentation of auditory and somatosensory inputs affected the steepness of the slope in adults only. <xref ref-type="fig" rid="F2">Figure 2</xref> also shows that adult participants were more likely to label /e/-like stimuli as /e/ in the bimodal condition. Since negligible changes were observed for /&#x000F8;/-like stimuli, it led to a more categorical boundary between the two acoustic vocalic targets. This difference in the integration patterns between children and adults suggests that linkage of specific somatosensory inputs with a corresponding speech sound evolves with age.</p>
<p>The third hypothesis that MSI would be stronger when auditory and somatosensory information was congruent was confirmed in adults but not in children. Only adults&#x02019; perception was facilitated when both sensory information was consistent. In children, a decrease in the correct identification rate resulted from the bimodal presentation when auditory and proprioceptive inputs were compatible. Moreover, while adults seemed to not be affected by the /e/-like skin stretches when auditory stimuli were alongside the prototypical /&#x000F8;/ vocalic sound (see <xref ref-type="fig" rid="F2">Figure 2</xref>), children&#x02019;s categorization was influenced even when sensory channels were clearly contrasting, as if the bimodal presentation of vocalic targets blurred the children categorization abilities. Moreover, thought somatosensory information mostly affected specific stimuli in adult, it&#x02019;s effect in children was further distributed along the auditory continuum. These last observations support our second hypothesis that MSI is strongly defined in adults.</p>
<p>As many have suggested, MSI continues to develop during childhood (e.g., Ross et al., <xref ref-type="bibr" rid="B92">2011</xref>; Dionne-Dostie et al., <xref ref-type="bibr" rid="B23">2015</xref>). The fact that young children are influenced by somatosensory inputs in a different manner then adults could, therefore, be due to their underdeveloped MSI abilities. Related findings have been reported for audiovisual integration (McGurk and MacDonald, <xref ref-type="bibr" rid="B66">1976</xref>; Massaro, <xref ref-type="bibr" rid="B63">1984</xref>; Desjardins et al., <xref ref-type="bibr" rid="B21">1997</xref>). It has also been demonstrated that the influence of visual articulators in audition is weaker in school-aged children than in adults.</p>
<p>In agreement with the concept that MSI continues to develop during childhood, the differences observed between the two groups of perceivers could also be explained by the fact that different sensory systems develop at different rates and in different ways. In that sense, it has also been found that school-aged children were not only less likely to perceive a perceptual illusion resulting from incongruent auditory and visual inputs, but they also had poorer results in the identification of unimodal visual targets (Massaro, <xref ref-type="bibr" rid="B63">1984</xref>).</p>
<p>Studies of the development of somatosensory abilities also support this concept. As established earlier, oral sensory acuity continues to mature until adolescence (McDonald and Aungst, <xref ref-type="bibr" rid="B64">1967</xref>; Dette and Linke, <xref ref-type="bibr" rid="B22">1982</xref>; Holst-Wolf et al., <xref ref-type="bibr" rid="B42">2016</xref>). The young participants who were 5&#x02013;6 years of age in the current study may have had underdeveloped proprioceptive systems, which may have caused their less clearly defined categorization of bimodal presentations.</p>
<p>It is generally accepted that auditory discrimination is poorer and more variable in children than in adults (Buss et al., <xref ref-type="bibr" rid="B17">2009</xref>; MacPherson and Akeroyd, <xref ref-type="bibr" rid="B62">2014</xref>), and children&#x02019;s lower psychometric scores are often related to poorer attention (Moore et al., <xref ref-type="bibr" rid="B76">2008</xref>).</p>
<p>MSI requires sustained attention, and researchers have suggested that poor psychometric scores in children might be related to an attentional bias between the recruited senses in children vs. adults (Spence and McDonald, <xref ref-type="bibr" rid="B99">2004</xref>; Alsius et al., <xref ref-type="bibr" rid="B1">2005</xref>; Barutchu et al., <xref ref-type="bibr" rid="B7">2009</xref>). For example, Barutchu et al. (<xref ref-type="bibr" rid="B7">2009</xref>) observed a decline in multisensory facilitation when auditory inputs were presented with a reduced signal-to-noise ratio. They suggested that the increased level of difficulty in performing the audiovisual detection task under high noise condition may be responsible for the degraded integrative processes.</p>
<p>If this attention bias might explain some of the between-group performance differences found when /e/-like somatosensory inputs were presented with /&#x000F8;/-like auditory inputs (high level of difficulty), it would not justify differences between children and adults when the auditory and somatosensory channels agreed. The children showed decreased multisensory ability when both sensory inputs were compatible. Since difficulty level was reduced when multiple sensory sources were compatible, we should only have observed confusion in the children&#x02019;s categorization when auditory and somatosensory information was incongruent. According to the intersensory redundancy hypothesis, MSI should be improved when information from multiple sources is redundant. Indeed, Bahrick and Lickliter (<xref ref-type="bibr" rid="B4">2000</xref>) suggested that concordance of multiple signals would guide attention and even help learning (Barutchu et al., <xref ref-type="bibr" rid="B8">2010</xref>). In the current study, this multisensory facilitation was only found in the adult participants.</p>
<p>This latter observation and the fact that no significant differences in variability were found across experimental conditions make it difficult to link the dissimilar patterns of MSI found between the two groups to an attention bias in children. However, finding a greater variability in MSI in children in both conditions, combined with their distinct psychometric and categorical scores provides support for the concept that perceptual systems in school-aged children are not yet fully shaped, which prevents them from attaining adult-like categorization scores.</p>
<p>As speech processing is multisensory and 5- to 6-year-olds have already experienced it, it is not surprising that some differences, even typical MSI ones, were found between the two experimental conditions in children. Since even very young children recognize that various speech sensory feedback can be compatible&#x02014;or not (Patterson and Werker, <xref ref-type="bibr" rid="B83">2003</xref>; Yeung and Werker, <xref ref-type="bibr" rid="B120">2013</xref>; Bruderer et al., <xref ref-type="bibr" rid="B15">2015</xref>; Werker, <xref ref-type="bibr" rid="B119">2018</xref>), the different behavioral patterns observed in this study suggest that some form of multimodal processing exists in school-aged children, but complete maturation of the sensory systems is needed to achieve adult-like MSI.</p>
</sec>
<sec sec-type="conclusion" id="s5">
<title>Conclusion</title>
<p>When somatosensory input was added to auditory stimuli, it affected the categorization of stimuli at the edge of the categorical boundary for both children and adults. However, while the oral skin stretch manipulation had a defining effect on phonemic categories in adults, it seemed to have a blurring effect in children, particularly on the prototypical auditory stimuli. Overall, our results suggest that since adults have fully developed sensory channels and more experiences in MSI, they have stronger auditory and somatosensory integration than children.</p>
<p>Although longitudinal observations are not possible, two supplementary experiments in these participants has been conducted to further investigate how MSI takes place in speech processing in school-aged children and adults. These focus on the role of visual and auditory feedback.</p>
</sec>
<sec id="s6">
<title>Data Availability Statement</title>
<p>The datasets generated for this study are available on request to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Ethics Statement</title>
<p>This study was carried out in accordance with the recommendations of &#x0201C;Comit&#x000E9; institutionnel d&#x02019;&#x000E9;thique de la recherche avec des &#x000EA;tres humaines (CIERH) de l&#x02019;Universit&#x000E9; du Qu&#x000E9;bec &#x000E0; Montr&#x000E9;al [UQAM; Institutional review board of research ethics with humans of the Universit&#x000E9; of Qu&#x000E9;bec in Montr&#x000E9;eal (UQAM)]&#x0201D; with written informed consent from all subjects or their parent (for minor). All subjects or their parent gave written informed consent in accordance with the Declaration of Helsinki. The protocol was approved by the &#x0201C;Comit&#x000E9; institutionnel d&#x02019;&#x000E9;thique de la recherche avec des &#x000EA;tres humaines (CIERH) de l&#x02019;Universit&#x000E9; du Qu&#x000E9;bec &#x000E0; Montr&#x000E9;al [UQAM; Institutional review board of research ethics with humans of the Universit&#x000E9; of Qu&#x000E9;bec in Montr&#x000E9;eal (UQAM)]&#x0201D;.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>PT-F, TI and LM contributed to the conception and design of the study. PT-F collected data, organized the database and performed the statistical analysis (all under LM&#x02019;s guidance). PT-F wrote the first draft of the manuscript. PT-F and LM were involved in subsequent drafts of the manuscript. PT-F, TI and LM contributed to manuscript revision, read and approved the submitted version.</p>
</sec>
<sec id="s9">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
</body>
<back>
<ack>
<p>We are especially grateful to David Ostry for his immense generosity toward this project and to Marlene Busko for copyediting the article. Special thoughts for Bilal Alchalabi and Camille Vidou for their precious help.</p>
</ack>
<fn-group>
<fn fn-type="financial-disclosure">
<p><bold>Funding.</bold> This work was funded by the Social Sciences and Humanities Research Council of Canada (both Canadian Graduate Scholarships&#x02014;a Doctoral program and an Insight grant) and the Natural Sciences and Engineering Research Council of Canada (a Discovery grant).</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alsius</surname> <given-names>A.</given-names></name> <name><surname>Navarra</surname> <given-names>J.</given-names></name> <name><surname>Campbell</surname> <given-names>R.</given-names></name> <name><surname>Soto-Faraco</surname> <given-names>S.</given-names></name></person-group> (<year>2005</year>). <article-title>Audiovisual integration of speech falters under high attention demands</article-title>. <source>Curr. Biol.</source> <volume>15</volume>, <fpage>839</fpage>&#x02013;<lpage>843</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2005.03.046</pub-id><pub-id pub-id-type="pmid">15886102</pub-id></citation></ref>
<ref id="B2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arabin</surname> <given-names>B.</given-names></name></person-group> (<year>2002</year>). <article-title>Music during pregnancy</article-title>. <source>Ultrasound Obstet. Gynecol</source> <volume>20</volume>, <fpage>425</fpage>&#x02013;<lpage>430</lpage>. <pub-id pub-id-type="doi">10.1046/j.1469-0705.2002.00844.x</pub-id><pub-id pub-id-type="pmid">12423477</pub-id></citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baayen</surname> <given-names>R. H.</given-names></name> <name><surname>Davidson</surname> <given-names>D. J.</given-names></name> <name><surname>Bates</surname> <given-names>D. M.</given-names></name></person-group> (<year>2008</year>). <article-title>Mixed-effects modeling with crossed random effects for subjects and items</article-title>. <source>J. Mem. Lang.</source> <volume>59</volume>, <fpage>390</fpage>&#x02013;<lpage>412</lpage>. <pub-id pub-id-type="doi">10.1016/j.jml.2007.12.005</pub-id></citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bahrick</surname> <given-names>L. E.</given-names></name> <name><surname>Lickliter</surname> <given-names>R.</given-names></name></person-group> (<year>2000</year>). <article-title>Intersensory redundancy guides attentional selectivity and perceptual learning in infancy</article-title>. <source>Dev. Psychol.</source> <volume>36</volume>, <fpage>190</fpage>&#x02013;<lpage>201</lpage>. <pub-id pub-id-type="doi">10.1037//0012-1649.36.2.190</pub-id><pub-id pub-id-type="pmid">10749076</pub-id></citation></ref>
<ref id="B5"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Bahrick</surname> <given-names>L. E.</given-names></name> <name><surname>Lickliter</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). &#x0201C;<article-title>The role of intersensory redundancy in early perceptual, cognitive and social development</article-title>,&#x0201D; in <source>Multisensory Development</source>, eds <person-group person-group-type="editor"><name><surname>Bremner</surname> <given-names>A.</given-names></name> <name><surname>Lewkowicz</surname> <given-names>D. J.</given-names></name> <name><surname>Spence</surname> <given-names>C.</given-names></name></person-group> (<publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>), <fpage>183</fpage>&#x02013;<lpage>205</lpage>.</citation></ref>
<ref id="B7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barutchu</surname> <given-names>A.</given-names></name> <name><surname>Crewther</surname> <given-names>D. P.</given-names></name> <name><surname>Crewther</surname> <given-names>S. G.</given-names></name></person-group> (<year>2009</year>). <article-title>The race that precedes coactivation: development of multisensory facilitation in children</article-title>. <source>Dev. Sci.</source> <volume>12</volume>, <fpage>464</fpage>&#x02013;<lpage>473</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-7687.2008.00782.x</pub-id><pub-id pub-id-type="pmid">19371371</pub-id></citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barutchu</surname> <given-names>A.</given-names></name> <name><surname>Danaher</surname> <given-names>J.</given-names></name> <name><surname>Crewther</surname> <given-names>S. G.</given-names></name> <name><surname>Innes-Brown</surname> <given-names>H.</given-names></name> <name><surname>Shivdasani</surname> <given-names>M. N.</given-names></name> <name><surname>Paolini</surname> <given-names>A. G.</given-names></name></person-group> (<year>2010</year>). <article-title>Audiovisual integration in noise by children and adults</article-title>. <source>J. Exp. Child Psychol.</source> <volume>105</volume>, <fpage>38</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1016/j.jecp.2009.08.005</pub-id><pub-id pub-id-type="pmid">19822327</pub-id></citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Birch</surname> <given-names>H. G.</given-names></name> <name><surname>Lefford</surname> <given-names>A.</given-names></name></person-group> (<year>1963</year>). <article-title>Intersensory development in children</article-title>. <source>Monogr. Soc. Res. Child Dev.</source> <volume>28</volume>, <fpage>1</fpage>&#x02013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.2307/1165681</pub-id><pub-id pub-id-type="pmid">14058560</pub-id></citation></ref>
<ref id="B12"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Bo&#x000EB;</surname> <given-names>L.-J.</given-names></name> <name><surname>Granat</surname> <given-names>J.</given-names></name> <name><surname>Badin</surname> <given-names>P.</given-names></name> <name><surname>Autesserre</surname> <given-names>D.</given-names></name> <name><surname>Pochic</surname> <given-names>D.</given-names></name> <name><surname>Zga</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2007</year>). &#x0201C;<article-title>Skull and vocal tract growth from newborn to adult</article-title>,&#x0201D; in <source>7th International Seminar on Speech Production, ISSP7</source> (<publisher-loc>Ubatuba, Brazil</publisher-loc>), <fpage>533</fpage>&#x02013;<lpage>536</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://hal.archives-ouvertes.fr/hal-00167610">https://hal.archives-ouvertes.fr/hal-00167610</ext-link>. Accessed March 15, 2019.</citation></ref>
<ref id="B13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bo&#x000EB;</surname> <given-names>L.-J.</given-names></name> <name><surname>M&#x000E9;nard</surname> <given-names>L.</given-names></name> <name><surname>Serkhane</surname> <given-names>J.</given-names></name> <name><surname>Birkholz</surname> <given-names>P.</given-names></name> <name><surname>Kr&#x000F6;ger</surname> <given-names>B.</given-names></name> <name><surname>Badin</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>La croissance de l&#x02019;instrument vocal: contr&#x000F4;le, mod&#x000E9;lisation, potentialit&#x000E9;s acoustiques et cons&#x000E9;quences perceptives</article-title>. <source>Revue Fran&#x000E7;aise de Linguistique Appliqu&#x000E9;ee</source> <volume>13</volume>, <fpage>59</fpage>&#x02013;<lpage>80</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.cairn.info/revue-francaise-de-linguistique-appliquee-2008-2-page-59.htm">https://www.cairn.info/revue-francaise-de-linguistique-appliquee-2008-2-page-59.htm</ext-link>. Accessed March 15, 2019.</citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bower</surname> <given-names>T. G.</given-names></name> <name><surname>Broughton</surname> <given-names>J. M.</given-names></name> <name><surname>Moore</surname> <given-names>M. K.</given-names></name></person-group> (<year>1970</year>). <article-title>The coordination of visual and tactual input in infants</article-title>. <source>Percept. Psychophys.</source> <volume>8</volume>, <fpage>51</fpage>&#x02013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.3758/bf03208933</pub-id></citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bruderer</surname> <given-names>A. G.</given-names></name> <name><surname>Danielson</surname> <given-names>D. K.</given-names></name> <name><surname>Kandhadai</surname> <given-names>P.</given-names></name> <name><surname>Werker</surname> <given-names>J. F.</given-names></name></person-group> (<year>2015</year>). <article-title>Sensorimotor influences on speech perception in infancy</article-title>. <source>Proc. Natl. Acad. Sci. U S A</source> <volume>112</volume>, <fpage>13531</fpage>&#x02013;<lpage>13536</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1508631112</pub-id><pub-id pub-id-type="pmid">26460030</pub-id></citation></ref>
<ref id="B16"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Burr</surname> <given-names>D.</given-names></name> <name><surname>Gori</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). &#x0201C;<article-title>Multisensory integration develops late in humans</article-title>,&#x0201D; in <source>The Neural Bases of Multisensory Processes</source>, eds <person-group person-group-type="editor"><name><surname>Murray</surname> <given-names>M. M.</given-names></name> <name><surname>Wallace</surname> <given-names>M. T.</given-names></name></person-group> (<publisher-loc>Boca Raton, FL</publisher-loc>: <publisher-name>CRC Press</publisher-name>), <fpage>810</fpage>.</citation></ref>
<ref id="B17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Buss</surname> <given-names>E.</given-names></name> <name><surname>Hall</surname> <given-names>J. W.</given-names></name> <name><surname>Grose</surname> <given-names>J. H.</given-names></name></person-group> (<year>2009</year>). <article-title>Psychometric functions for pure tone intensity discrimination: slope differences in school-aged children and adults</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>125</volume>, <fpage>1050</fpage>&#x02013;<lpage>1058</lpage>. <pub-id pub-id-type="doi">10.1121/1.3050273</pub-id><pub-id pub-id-type="pmid">19206879</pub-id></citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Demany</surname> <given-names>L.</given-names></name> <name><surname>McKenzie</surname> <given-names>B.</given-names></name> <name><surname>Vurpillot</surname> <given-names>E.</given-names></name></person-group> (<year>1977</year>). <article-title>Rhythm perception in early infancy</article-title>. <source>Nature</source> <volume>266</volume>, <fpage>718</fpage>&#x02013;<lpage>719</lpage>. <pub-id pub-id-type="doi">10.1038/266718a0</pub-id><pub-id pub-id-type="pmid">876350</pub-id></citation></ref>
<ref id="B20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>DePaolis</surname> <given-names>R. A.</given-names></name> <name><surname>Vihman</surname> <given-names>M. M.</given-names></name> <name><surname>Keren-Portnoy</surname> <given-names>T.</given-names></name></person-group> (<year>2011</year>). <article-title>Do production patterns influence the processing of speech in prelinguistic infants?</article-title> <source>Infant Behav. Dev.</source> <volume>34</volume>, <fpage>590</fpage>&#x02013;<lpage>601</lpage>. <pub-id pub-id-type="doi">10.1016/j.infbeh.2011.06.005</pub-id><pub-id pub-id-type="pmid">21774986</pub-id></citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desjardins</surname> <given-names>R. N.</given-names></name> <name><surname>Rogers</surname> <given-names>J.</given-names></name> <name><surname>Werker</surname> <given-names>J. F.</given-names></name></person-group> (<year>1997</year>). <article-title>An exploration of why preschoolers perform differently than do adults in audiovisual speech perception tasks</article-title>. <source>J. Exp. Child Psychol.</source> <volume>66</volume>, <fpage>85</fpage>&#x02013;<lpage>110</lpage>. <pub-id pub-id-type="doi">10.1006/jecp.1997.2379</pub-id><pub-id pub-id-type="pmid">9226935</pub-id></citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dette</surname> <given-names>M.</given-names></name> <name><surname>Linke</surname> <given-names>P. G.</given-names></name></person-group> (<year>1982</year>). <article-title>The development of oral and manual stereognosis in children from 3 to 10 years old (Die Entwicklung der oralen und manuellen Stereognose bei Kindem im Altervon 3 bis 10 Jahren)</article-title>. <source>Stomatologie</source> <volume>32</volume>, <fpage>269</fpage>&#x02013;<lpage>274</lpage>.</citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dionne-Dostie</surname> <given-names>E.</given-names></name> <name><surname>Paquette</surname> <given-names>N.</given-names></name> <name><surname>Lassonde</surname> <given-names>M.</given-names></name> <name><surname>Gallagher</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Multisensory integration and child neurodevelopment</article-title>. <source>Brain Sci.</source> <volume>5</volume>, <fpage>32</fpage>&#x02013;<lpage>57</lpage>. <pub-id pub-id-type="doi">10.3390/brainsci5010032</pub-id><pub-id pub-id-type="pmid">25679116</pub-id></citation></ref>
<ref id="B25"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Eimas</surname> <given-names>P. D.</given-names></name> <name><surname>Siqueland</surname> <given-names>E. R.</given-names></name> <name><surname>Jusczyk</surname> <given-names>P.</given-names></name> <name><surname>Vigorito</surname> <given-names>J.</given-names></name></person-group> (<year>1971</year>). <article-title>Speech perception in infants</article-title>. <source>Science</source> <volume>171</volume>, <fpage>303</fpage>&#x02013;<lpage>306</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://home.fau.edu/lewkowic/web/Eimas infant speech discrim Science 1971.pdf">http://home.fau.edu/lewkowic/web/Eimas infant speech discrim Science 1971.pdf</ext-link>. Accessed February 1, 2019.</citation></ref>
<ref id="B26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>Y.</given-names></name> <name><surname>Gracco</surname> <given-names>V. L.</given-names></name> <name><surname>Max</surname> <given-names>L.</given-names></name></person-group> (<year>2011</year>). <article-title>Integration of auditory and somatosensory error signals in the neural control of speech movements</article-title>. <source>J. Neurophysiol.</source> <volume>106</volume>, <fpage>667</fpage>&#x02013;<lpage>679</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00638.2010</pub-id><pub-id pub-id-type="pmid">21562187</pub-id></citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fitch</surname> <given-names>W. T.</given-names></name> <name><surname>Giedd</surname> <given-names>J.</given-names></name></person-group> (<year>1999</year>). <article-title>Morphology and development of the human vocal tract: a study using magnetic resonance imaging</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>106</volume>, <fpage>1511</fpage>&#x02013;<lpage>1522</lpage>. <pub-id pub-id-type="doi">10.1121/1.427148</pub-id><pub-id pub-id-type="pmid">10489707</pub-id></citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Foxe</surname> <given-names>J. J.</given-names></name> <name><surname>Wylie</surname> <given-names>G. R.</given-names></name> <name><surname>Martinez</surname> <given-names>A.</given-names></name> <name><surname>Schroeder</surname> <given-names>C. E.</given-names></name> <name><surname>Javitt</surname> <given-names>D. C.</given-names></name> <name><surname>Guilfoyle</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Auditory-somatosensory multisensory processing in auditory association cortex: an fMRI study</article-title>. <source>J. Neurophysiol.</source> <volume>88</volume>, <fpage>540</fpage>&#x02013;<lpage>543</lpage>. <pub-id pub-id-type="doi">10.1152/jn.2002.88.1.540</pub-id><pub-id pub-id-type="pmid">12091578</pub-id></citation></ref>
<ref id="B31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gick</surname> <given-names>B.</given-names></name> <name><surname>Derrick</surname> <given-names>D.</given-names></name></person-group> (<year>2009</year>). <article-title>Aero-tactile integration in speech perception</article-title>. <source>Nature</source> <volume>462</volume>, <fpage>502</fpage>&#x02013;<lpage>504</lpage>. <pub-id pub-id-type="doi">10.1038/nature08572</pub-id><pub-id pub-id-type="pmid">19940925</pub-id></citation></ref>
<ref id="B32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gisel</surname> <given-names>E. G.</given-names></name> <name><surname>Schwob</surname> <given-names>H.</given-names></name></person-group> (<year>1988</year>). <article-title>Oral form discrimination in normal 5-to 8-year-old children: an adjunct to an eating assessment</article-title>. <source>Occup. Ther. J. Res.</source> <volume>8</volume>, <fpage>195</fpage>&#x02013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1177/153944928800800403</pub-id></citation></ref>
<ref id="B34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gori</surname> <given-names>M.</given-names></name> <name><surname>Del Viva</surname> <given-names>M.</given-names></name> <name><surname>Sandini</surname> <given-names>G.</given-names></name> <name><surname>Burr</surname> <given-names>D. C.</given-names></name></person-group> (<year>2008</year>). <article-title>Young children do not integrate visual and haptic form information</article-title>. <source>Curr. Biol.</source> <volume>18</volume>, <fpage>694</fpage>&#x02013;<lpage>698</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2008.04.036</pub-id><pub-id pub-id-type="pmid">18450446</pub-id></citation></ref>
<ref id="B35"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Guenther</surname> <given-names>F. H.</given-names></name> <name><surname>Perkell</surname> <given-names>J. S.</given-names></name></person-group> (<year>2004</year>). &#x0201C;<article-title>A neural model of speech production and its application to studies of the role of auditory feedback in speech</article-title>,&#x0201D; in <source>Speech Motor Control in Normal and Disordered Speech</source>, eds <person-group person-group-type="editor"><name><surname>Maassen</surname> <given-names>R.</given-names></name> <name><surname>Kent</surname> <given-names>R. D.</given-names></name> <name><surname>Peters</surname> <given-names>H.</given-names></name> <name><surname>van Lieshout</surname> <given-names>P. H.</given-names></name> <name><surname>Hulstijn</surname> <given-names>W.</given-names></name></person-group> (<publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>), <fpage>29</fpage>&#x02013;<lpage>49</lpage>.</citation></ref>
<ref id="B36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guenther</surname> <given-names>F. H.</given-names></name> <name><surname>Vladusich</surname> <given-names>T.</given-names></name></person-group> (<year>2012</year>). <article-title>A neural theory of speech acquisition and production</article-title>. <source>J. Neurolinguistics</source> <volume>25</volume>, <fpage>408</fpage>&#x02013;<lpage>422</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneuroling.2009.08.006</pub-id><pub-id pub-id-type="pmid">22711978</pub-id></citation></ref>
<ref id="B37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guest</surname> <given-names>S.</given-names></name> <name><surname>Catmur</surname> <given-names>C.</given-names></name> <name><surname>Lloyd</surname> <given-names>D.</given-names></name> <name><surname>Spence</surname> <given-names>C.</given-names></name></person-group> (<year>2002</year>). <article-title>Audiotactile interactions in roughness perception</article-title>. <source>Exp. Brain Res.</source> <volume>146</volume>, <fpage>161</fpage>&#x02013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-002-1164-z</pub-id><pub-id pub-id-type="pmid">12195518</pub-id></citation></ref>
<ref id="B38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hatwell</surname> <given-names>Y.</given-names></name></person-group> (<year>1987</year>). <article-title>Motor and cognitive functions of the hand in infancy and childhood</article-title>. <source>Int. J. Behav. Dev.</source> <volume>10</volume>, <fpage>509</fpage>&#x02013;<lpage>526</lpage>. <pub-id pub-id-type="doi">10.1177/016502548701000409</pub-id></citation></ref>
<ref id="B39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hecht</surname> <given-names>D.</given-names></name> <name><surname>Reiner</surname> <given-names>M.</given-names></name></person-group> (<year>2009</year>). <article-title>Sensory dominance in combinations of audio, visual and haptic stimuli</article-title>. <source>Exp. Brain Res.</source> <volume>193</volume>, <fpage>307</fpage>&#x02013;<lpage>314</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-008-1626-z</pub-id><pub-id pub-id-type="pmid">18985327</pub-id></citation></ref>
<ref id="B41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hillock</surname> <given-names>A. R.</given-names></name> <name><surname>Powers</surname> <given-names>A. R.</given-names></name> <name><surname>Wallace</surname> <given-names>M. T.</given-names></name></person-group> (<year>2011</year>). <article-title>Binding of sights and sounds: age-related changes in multisensory temporal processing</article-title>. <source>Neuropsychologia</source> <volume>49</volume>, <fpage>461</fpage>&#x02013;<lpage>467</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2010.11.041</pub-id><pub-id pub-id-type="pmid">21134385</pub-id></citation></ref>
<ref id="B42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holst-Wolf</surname> <given-names>J. M.</given-names></name> <name><surname>Yeh</surname> <given-names>I.-L.</given-names></name> <name><surname>Konczak</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Development of proprioceptive acuity in typically developing children: normative data on forearm position sense</article-title>. <source>Front. Hum. Neurosci</source> <volume>10</volume>:<fpage>436</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00436</pub-id><pub-id pub-id-type="pmid">27621702</pub-id></citation></ref>
<ref id="B43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Horlyck</surname> <given-names>S.</given-names></name> <name><surname>Reid</surname> <given-names>A.</given-names></name> <name><surname>Burnham</surname> <given-names>D.</given-names></name></person-group> (<year>2012</year>). <article-title>The relationship between learning to read and language-specific speech perception: maturation versus experience</article-title>. <source>Sci. Stud. Read.</source> <volume>16</volume>, <fpage>218</fpage>&#x02013;<lpage>239</lpage>. <pub-id pub-id-type="doi">10.1080/10888438.2010.546460</pub-id></citation></ref>
<ref id="B44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>H&#x000F6;tting</surname> <given-names>K.</given-names></name> <name><surname>R&#x000F6;der</surname> <given-names>B.</given-names></name></person-group> (<year>2004</year>). <article-title>Hearing cheats touch, but less in congenitally blind than in sighted individuals</article-title>. <source>Psychol. Sci.</source> <volume>15</volume>, <fpage>60</fpage>&#x02013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1111/j.0963-7214.2004.01501010.x</pub-id><pub-id pub-id-type="pmid">14717833</pub-id></citation></ref>
<ref id="B45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ito</surname> <given-names>T.</given-names></name> <name><surname>Gomi</surname> <given-names>H.</given-names></name></person-group> (<year>2007</year>). <article-title>Cutaneous mechanoreceptors contribute to the generation of a cortical reflex in speech</article-title>. <source>Neuroreport</source> <volume>18</volume>, <fpage>907</fpage>&#x02013;<lpage>910</lpage>. <pub-id pub-id-type="doi">10.1097/WNR.0b013e32810f2dfb</pub-id><pub-id pub-id-type="pmid">17515799</pub-id></citation></ref>
<ref id="B46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ito</surname> <given-names>T.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2010</year>). <article-title>Somatosensory contribution to motor learning due to facial skin deformation</article-title>. <source>J. Neurophysiol.</source> <volume>104</volume>, <fpage>1230</fpage>&#x02013;<lpage>1238</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00199.2010</pub-id><pub-id pub-id-type="pmid">20592121</pub-id></citation></ref>
<ref id="B47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ito</surname> <given-names>T.</given-names></name> <name><surname>Gracco</surname> <given-names>V. L.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2014</year>). <article-title>Temporal factors affecting somatosensory-auditory interactions in speech processing</article-title>. <source>Front. Psychol.</source> <volume>5</volume>:<fpage>1198</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2014.01198</pub-id><pub-id pub-id-type="pmid">25452733</pub-id></citation></ref>
<ref id="B48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ito</surname> <given-names>T.</given-names></name> <name><surname>Tiede</surname> <given-names>M.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2009</year>). <article-title>Somatosensory function in speech perception</article-title>. <source>Proc. Natl. Acad. Sci. U S A</source> <volume>106</volume>, <fpage>1245</fpage>&#x02013;<lpage>1248</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0810063106</pub-id><pub-id pub-id-type="pmid">19164569</pub-id></citation></ref>
<ref id="B49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Johansson</surname> <given-names>R. S.</given-names></name> <name><surname>Trulsson</surname> <given-names>M.</given-names></name> <name><surname>Olsson</surname> <given-names>K. A.</given-names></name> <name><surname>And Abbs</surname> <given-names>J. H.</given-names></name></person-group> (<year>1988</year>). <article-title>Mechanoreceptive afferent activity in the infraorbital nerve in man during speech and chewing movements</article-title>. <source>Exp. Brain Res.</source> <volume>72</volume>, <fpage>209</fpage>&#x02013;<lpage>214</lpage>. <pub-id pub-id-type="doi">10.1007/bf00248519</pub-id><pub-id pub-id-type="pmid">3169190</pub-id></citation></ref>
<ref id="B50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jousm&#x000E4;ki</surname> <given-names>V.</given-names></name> <name><surname>Hari</surname> <given-names>R.</given-names></name></person-group> (<year>1998</year>). <article-title>Parchment-skin illusion: sound-biased touch</article-title>. <source>Curr. Biol.</source> <volume>8</volume>:<fpage>R190</fpage>. <pub-id pub-id-type="doi">10.1016/s0960-9822(98)70120-4</pub-id><pub-id pub-id-type="pmid">9512426</pub-id></citation></ref>
<ref id="B51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Katseff</surname> <given-names>S.</given-names></name> <name><surname>Houde</surname> <given-names>J. F.</given-names></name> <name><surname>Johnson</surname> <given-names>K.</given-names></name></person-group> (<year>2012</year>). <article-title>Partial compensation for altered auditory feedback: a tradeoff with somatosensory feedback?</article-title> <source>Lang. Speech</source> <volume>55</volume>, <fpage>295</fpage>&#x02013;<lpage>308</lpage>. <pub-id pub-id-type="doi">10.1177/0023830911417802</pub-id><pub-id pub-id-type="pmid">22783636</pub-id></citation></ref>
<ref id="B52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krakauer</surname> <given-names>J. W.</given-names></name> <name><surname>Mazzoni</surname> <given-names>P.</given-names></name> <name><surname>Ghazizadeh</surname> <given-names>A.</given-names></name> <name><surname>Ravindran</surname> <given-names>R.</given-names></name> <name><surname>Shadmehr</surname> <given-names>R.</given-names></name></person-group> (<year>2006</year>). <article-title>Generalization of motor learning depends on the history of prior action</article-title>. <source>PLoS Biol.</source> <volume>4</volume>:<fpage>e316</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pbio.0040316</pub-id><pub-id pub-id-type="pmid">16968135</pub-id></citation></ref>
<ref id="B53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krueger</surname> <given-names>L. E.</given-names></name></person-group> (<year>1970</year>). <article-title>David Katz&#x02019;s der aufbau der tastwelt (the world of touch): a synopsis</article-title>. <source>Percept. Psychophys.</source> <volume>7</volume>, <fpage>337</fpage>&#x02013;<lpage>341</lpage>. <pub-id pub-id-type="doi">10.3758/BF03208659</pub-id></citation></ref>
<ref id="B54"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Kuhl</surname> <given-names>P. K.</given-names></name></person-group> (<year>1991</year>). <article-title>Human adults and human infants show a perceptual magnet effect for the prototypes of speech categories, monkeys do not</article-title>. <source>Percept. Psychophys.</source> <volume>50</volume>, <fpage>93</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.3758/bf03212211</pub-id><pub-id pub-id-type="pmid">1945741</pub-id></citation></ref>
<ref id="B55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kuhl</surname> <given-names>P.</given-names></name> <name><surname>Meltzoff</surname> <given-names>A.</given-names></name></person-group> (<year>1982</year>). <article-title>The bimodal perception of speech in infancy</article-title>. <source>Science</source> <volume>218</volume>, <fpage>1138</fpage>&#x02013;<lpage>1141</lpage>. <pub-id pub-id-type="doi">10.1126/science.7146899</pub-id><pub-id pub-id-type="pmid">7146899</pub-id></citation></ref>
<ref id="B56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kumin</surname> <given-names>L. B.</given-names></name> <name><surname>Saltysiak</surname> <given-names>E. B.</given-names></name> <name><surname>Bell</surname> <given-names>K.</given-names></name> <name><surname>Forget</surname> <given-names>K.</given-names></name> <name><surname>Goodman</surname> <given-names>M. S.</given-names></name> <name><surname>Goytisolo</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>1984</year>). <article-title>Relationships of oral stereognostic ability to age and sex of children</article-title>. <source>Percept. Mot. Skills</source> <volume>59</volume>, <fpage>123</fpage>&#x02013;<lpage>126</lpage>. <pub-id pub-id-type="doi">10.2466/pms.1984.59.1.123</pub-id><pub-id pub-id-type="pmid">6493925</pub-id></citation></ref>
<ref id="B57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lametti</surname> <given-names>D. R.</given-names></name> <name><surname>Nasir</surname> <given-names>S. M.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2012</year>). <article-title>Sensory preference in speech production revealed by simultaneous alteration of auditory and somatosensory feedback</article-title>. <source>J. Neurosci.</source> <volume>32</volume>, <fpage>9351</fpage>&#x02013;<lpage>9358</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.0404-12.2012</pub-id><pub-id pub-id-type="pmid">22764242</pub-id></citation></ref>
<ref id="B58"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Lecanuet</surname> <given-names>J.-P.</given-names></name> <name><surname>Granier-Deferre</surname> <given-names>C.</given-names></name> <name><surname>Busnel</surname> <given-names>M.-C.</given-names></name></person-group> (<year>1995</year>). &#x0201C;<article-title>Human fetal auditory perception</article-title>,&#x0201D; in <source>Fetal Development: A Psychobiological Perspective</source>, eds <person-group person-group-type="editor"><name><surname>Lecanuet</surname> <given-names>J.-P.</given-names></name> <name><surname>Fifer</surname> <given-names>W. P.</given-names></name> <name><surname>Krasnegor</surname> <given-names>N. A.</given-names></name> <name><surname>Smotherman</surname> <given-names>W. P.</given-names></name></person-group> (<publisher-loc>Hillsdale, NJ</publisher-loc>: <publisher-name>Lawrence Erlbaum Associates</publisher-name>), <fpage>512</fpage>.</citation></ref>
<ref id="B59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Macaluso</surname> <given-names>E.</given-names></name> <name><surname>Frith</surname> <given-names>C. D.</given-names></name> <name><surname>Driver</surname> <given-names>J.</given-names></name></person-group> (<year>2000</year>). <article-title>Modulation of human visual cortex by crossmodal spatial attention</article-title>. <source>Science.</source> <volume>289</volume>, <fpage>1206</fpage>&#x02013;<lpage>1208</lpage>. <pub-id pub-id-type="doi">10.1126/science.289.5482.1206</pub-id><pub-id pub-id-type="pmid">10947990</pub-id></citation></ref>
<ref id="B60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacDonald</surname> <given-names>E. N.</given-names></name> <name><surname>Goldberg</surname> <given-names>R.</given-names></name> <name><surname>Munhall</surname> <given-names>K. G.</given-names></name></person-group> (<year>2010</year>). <article-title>Compensations in response to real-time formant perturbations of different magnitudes</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>127</volume>, <fpage>1059</fpage>&#x02013;<lpage>1068</lpage>. <pub-id pub-id-type="doi">10.1121/1.3278606</pub-id><pub-id pub-id-type="pmid">20136227</pub-id></citation></ref>
<ref id="B62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacPherson</surname> <given-names>A.</given-names></name> <name><surname>Akeroyd</surname> <given-names>M. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Variations in the slope of the psychometric functions for speech intelligibility: a systematic survey</article-title>. <source>Trends Hear.</source> <volume>18</volume>:<fpage>2331216514537722</fpage>. <pub-id pub-id-type="doi">10.1177/2331216514537722</pub-id><pub-id pub-id-type="pmid">24906905</pub-id></citation></ref>
<ref id="B63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Massaro</surname> <given-names>D. W.</given-names></name></person-group> (<year>1984</year>). <article-title>Children&#x02019;s perception of visual and auditory speech</article-title>. <source>Child Dev.</source> <volume>55</volume>, <fpage>1777</fpage>&#x02013;<lpage>1788</lpage>. <pub-id pub-id-type="pmid">6510054</pub-id></citation></ref>
<ref id="B633"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Massaro</surname> <given-names>D. W.</given-names></name></person-group> (<year>1999</year>). <article-title>Speech reading: illusion or window into pattern recognition</article-title>. <source>Trends Cogn. Sci.</source> <volume>3</volume>, <fpage>310</fpage>&#x02013;<lpage>317</lpage>. <pub-id pub-id-type="doi">10.1016/S1364-6613(99)01360-1</pub-id><pub-id pub-id-type="pmid">6510054</pub-id></citation></ref>
<ref id="B64"><citation citation-type="book"><person-group person-group-type="author"><name><surname>McDonald</surname> <given-names>E. T.</given-names></name> <name><surname>Aungst</surname> <given-names>L. F.</given-names></name></person-group> (<year>1967</year>). &#x0201C;<article-title>Studies in oral sensorimotor function</article-title>,&#x0201D; in <source>Symposium on Oral Sensation and Perception</source>, ed. <person-group person-group-type="editor"><name><surname>Bosma</surname> <given-names>J. F.</given-names></name></person-group> (<publisher-loc>Springfield, IL</publisher-loc>: <publisher-name>Charles C. Thomas</publisher-name>), <fpage>202</fpage>&#x02013;<lpage>220</lpage>.</citation></ref>
<ref id="B66"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McGurk</surname> <given-names>H.</given-names></name> <name><surname>MacDonald</surname> <given-names>J.</given-names></name></person-group> (<year>1976</year>). <article-title>Hearing lips and seeing voices</article-title>. <source>Nature</source> <volume>264</volume>, <fpage>746</fpage>&#x02013;<lpage>748</lpage>. <pub-id pub-id-type="doi">10.1038/264746a0</pub-id><pub-id pub-id-type="pmid">1012311</pub-id></citation></ref>
<ref id="B67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McGurk</surname> <given-names>H.</given-names></name> <name><surname>Power</surname> <given-names>R. P.</given-names></name></person-group> (<year>1980</year>). <article-title>Intermodal coordination in young children: vision and touch</article-title>. <source>Dev. Psychol.</source> <volume>16</volume>, <fpage>679</fpage>&#x02013;<lpage>680</lpage>. <pub-id pub-id-type="doi">10.1037/0012-1649.16.6.679</pub-id></citation></ref>
<ref id="B68"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>M&#x000E9;nard</surname> <given-names>L.</given-names></name> <name><surname>Boe</surname> <given-names>L.</given-names></name></person-group> (<year>2004</year>). <article-title>L&#x02019;&#x000E9;mergence du syst&#x000E8;me phonologique chez l&#x02019;enfant : l&#x02019;apport de la mod&#x000E9;lisation articulatoire</article-title>. <source>Cana. J. Linguist.</source> <volume>49</volume>, <fpage>155</fpage>&#x02013;<lpage>174</lpage>. <pub-id pub-id-type="doi">10.1017/S0008413100000785</pub-id></citation></ref>
<ref id="B71"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mercier</surname> <given-names>M. R.</given-names></name> <name><surname>Foxe</surname> <given-names>J. J.</given-names></name> <name><surname>Fiebelkorn</surname> <given-names>I. C.</given-names></name> <name><surname>Butler</surname> <given-names>J. S.</given-names></name> <name><surname>Schwartz</surname> <given-names>T. H.</given-names></name> <name><surname>Molholm</surname> <given-names>S.</given-names></name></person-group> (<year>2013</year>). <article-title>Auditory-driven phase reset in visual cortex: human electrocorticography reveals mechanisms of early multisensory integration</article-title>. <source>Neuroimage</source> <volume>79</volume>, <fpage>19</fpage>&#x02013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.04.060</pub-id><pub-id pub-id-type="pmid">23624493</pub-id></citation></ref>
<ref id="B72"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Misceo</surname> <given-names>G. F.</given-names></name> <name><surname>Hershberger</surname> <given-names>W. A.</given-names></name> <name><surname>Mancini</surname> <given-names>R. L.</given-names></name></person-group> (<year>1999</year>). <article-title>Haptic estimates of discordant visual-haptic size vary developmentally</article-title>. <source>Percept. Psychophys.</source> <volume>61</volume>, <fpage>608</fpage>&#x02013;<lpage>614</lpage>. <pub-id pub-id-type="doi">10.3758/bf03205533</pub-id><pub-id pub-id-type="pmid">10370331</pub-id></citation></ref>
<ref id="B73"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname> <given-names>J.</given-names></name> <name><surname>Martinez</surname> <given-names>A.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name> <name><surname>Hillyard</surname> <given-names>S. A.</given-names></name></person-group> (<year>2007</year>). <article-title>Early cross-modal interactions in auditory and visual cortex underlie a sound-induced visual illusion</article-title>. <source>J. Neurosci.</source> <volume>27</volume>, <fpage>4120</fpage>&#x02013;<lpage>4131</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4912-06.2007</pub-id><pub-id pub-id-type="pmid">17428990</pub-id></citation></ref>
<ref id="B75"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Molholm</surname> <given-names>S.</given-names></name> <name><surname>Ritter</surname> <given-names>W.</given-names></name> <name><surname>Murray</surname> <given-names>M. M.</given-names></name> <name><surname>Javitt</surname> <given-names>D. C.</given-names></name> <name><surname>Schroeder</surname> <given-names>C. E.</given-names></name> <name><surname>Foxe</surname> <given-names>J. J.</given-names></name></person-group> (<year>2002</year>). <article-title>Multisensory auditory-visual interactions during early sensory processing in humans: a high-density electrical mapping study</article-title>. <source>Brain Res. Cogn. Brain Res.</source> <volume>14</volume>, <fpage>115</fpage>&#x02013;<lpage>128</lpage>. <pub-id pub-id-type="doi">10.1016/s0926-6410(02)00066-6</pub-id><pub-id pub-id-type="pmid">12063135</pub-id></citation></ref>
<ref id="B76"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moore</surname> <given-names>D. R.</given-names></name> <name><surname>Ferguson</surname> <given-names>M. A.</given-names></name> <name><surname>Halliday</surname> <given-names>L. F.</given-names></name> <name><surname>Riley</surname> <given-names>A.</given-names></name></person-group> (<year>2008</year>). <article-title>Frequency discrimination in children: perception, learning and attention</article-title>. <source>Hear. Res.</source> <volume>238</volume>, <fpage>147</fpage>&#x02013;<lpage>154</lpage>. <pub-id pub-id-type="doi">10.1016/j.heares.2007.11.013</pub-id><pub-id pub-id-type="pmid">18222053</pub-id></citation></ref>
<ref id="B78"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nardini</surname> <given-names>M.</given-names></name> <name><surname>Jones</surname> <given-names>P.</given-names></name> <name><surname>Bedford</surname> <given-names>R.</given-names></name> <name><surname>Braddick</surname> <given-names>O.</given-names></name></person-group> (<year>2008</year>). <article-title>Development of cue integration in human navigation</article-title>. <source>Curr. Biol.</source> <volume>18</volume>, <fpage>689</fpage>&#x02013;<lpage>693</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2008.04.021</pub-id><pub-id pub-id-type="pmid">18450447</pub-id></citation></ref>
<ref id="B79"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nasir</surname> <given-names>S. M.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2006</year>). <article-title>Somatosensory precision in speech production</article-title>. <source>Curr. Biol.</source> <volume>16</volume>, <fpage>1918</fpage>&#x02013;<lpage>1923</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2006.07.069</pub-id><pub-id pub-id-type="pmid">17027488</pub-id></citation></ref>
<ref id="B80"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neil</surname> <given-names>P. A.</given-names></name> <name><surname>Chee-Ruiter</surname> <given-names>C.</given-names></name> <name><surname>Scheier</surname> <given-names>C.</given-names></name> <name><surname>Lewkowicz</surname> <given-names>D. J.</given-names></name> <name><surname>Shimojo</surname> <given-names>S.</given-names></name></person-group> (<year>2006</year>). <article-title>Development of multisensory spatial integration and perception in humans</article-title>. <source>Dev. Sci.</source> <volume>9</volume>, <fpage>454</fpage>&#x02013;<lpage>464</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-7687.2006.00512.x</pub-id><pub-id pub-id-type="pmid">16911447</pub-id></citation></ref>
<ref id="B81"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Ogane</surname> <given-names>R.</given-names></name> <name><surname>Schwartz</surname> <given-names>J.-L.</given-names></name> <name><surname>Ito</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). &#x0201C;<article-title>Somatosensory information affects word segmentation and perception of lexical information</article-title>,&#x0201D; in <source>(Poster) Presented at the 2017 Society for the Neurobiology of Language conference in Baltimore, MD</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://hal.archives-ouvertes.fr/hal-01658527">https://hal.archives-ouvertes.fr/hal-01658527</ext-link>. Accessed March 05, 2019.</citation></ref>
<ref id="B83"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Patterson</surname> <given-names>M. L.</given-names></name> <name><surname>Werker</surname> <given-names>J. F.</given-names></name></person-group> (<year>2003</year>). <article-title>Two-month-old infants match phonetic information in lips and voice</article-title>. <source>Dev. Sci.</source> <volume>6</volume>, <fpage>191</fpage>&#x02013;<lpage>196</lpage>. <pub-id pub-id-type="doi">10.1111/1467-7687.00271</pub-id></citation></ref>
<ref id="B85"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Perrier</surname> <given-names>P.</given-names></name></person-group> (<year>1995</year>). <article-title>Control and representations in speech production</article-title>. <source>ZAS Papers Lingustics</source> <volume>40</volume>, <fpage>109</fpage>&#x02013;<lpage>132</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://hal.archives-ouvertes.fr/hal-00430387/document">https://hal.archives-ouvertes.fr/hal-00430387/document</ext-link>. Accessed March 05, 2019.</citation></ref>
<ref id="B86"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pons</surname> <given-names>F.</given-names></name> <name><surname>Lewkowicz</surname> <given-names>D. J.</given-names></name> <name><surname>Soto-Faraco</surname> <given-names>S.</given-names></name> <name><surname>Sebasti&#x000E1;n-Gall&#x000E9;s</surname> <given-names>N.</given-names></name></person-group> (<year>2009</year>). <article-title>Narrowing of intersensory speech perception in infancy</article-title>. <source>Proc. Natl. Acad. Sci. U S A</source> <volume>106</volume>, <fpage>10598</fpage>&#x02013;<lpage>10602</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0904134106</pub-id><pub-id pub-id-type="pmid">19541648</pub-id></citation></ref>
<ref id="B87"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Proske</surname> <given-names>U.</given-names></name> <name><surname>Gandevia</surname> <given-names>S. C.</given-names></name></person-group> (<year>2009</year>). <article-title>The kinaesthetic senses</article-title>. <source>J. Physiol.</source> <volume>587</volume>, <fpage>4139</fpage>&#x02013;<lpage>4146</lpage>. <pub-id pub-id-type="doi">10.1113/jphysiol.2009.175372</pub-id><pub-id pub-id-type="pmid">19581378</pub-id></citation></ref>
<ref id="B88"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Purcell</surname> <given-names>D. W.</given-names></name> <name><surname>Munhall</surname> <given-names>K. G.</given-names></name></person-group> (<year>2006</year>). <article-title>Adaptive control of vowel formant frequency: evidence from real-time formant manipulation</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>120</volume>, <fpage>966</fpage>&#x02013;<lpage>977</lpage>. <pub-id pub-id-type="doi">10.1121/1.2217714</pub-id><pub-id pub-id-type="pmid">16938984</pub-id></citation></ref>
<ref id="B89"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Raij</surname> <given-names>T.</given-names></name> <name><surname>Ahveninen</surname> <given-names>J.</given-names></name> <name><surname>Lin</surname> <given-names>F. H.</given-names></name> <name><surname>Witzel</surname> <given-names>T.</given-names></name> <name><surname>J&#x000E4;&#x000E4;skel&#x000E4;inen</surname> <given-names>I. P.</given-names></name> <name><surname>Letham</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Onset timing of cross-sensory activations and multisensory interactions in auditory and visual sensory cortices</article-title>. <source>Eur. J. Neurosci</source> <volume>31</volume>, <fpage>1772</fpage>&#x02013;<lpage>1782</lpage>. <pub-id pub-id-type="doi">10.1111/j.1460-9568.2010.07213.x</pub-id><pub-id pub-id-type="pmid">20584181</pub-id></citation></ref>
<ref id="B90"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rentschler</surname> <given-names>I.</given-names></name> <name><surname>J&#x000FC;ttner</surname> <given-names>M.</given-names></name> <name><surname>Osman</surname> <given-names>E.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>A.</given-names></name> <name><surname>Caelli</surname> <given-names>T.</given-names></name></person-group> (<year>2004</year>). <article-title>Development of configural 3D object recognition</article-title>. <source>Behav. Brain Res.</source> <volume>149</volume>, <fpage>107</fpage>&#x02013;<lpage>111</lpage>. <pub-id pub-id-type="doi">10.1016/s0166-4328(03)00194-3</pub-id><pub-id pub-id-type="pmid">14739015</pub-id></citation></ref>
<ref id="B91"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Robert-Ribes</surname> <given-names>J.</given-names></name> <name><surname>Schwartz</surname> <given-names>J.-L.</given-names></name> <name><surname>Lallouache</surname> <given-names>T.</given-names></name> <name><surname>Escudier</surname> <given-names>P.</given-names></name></person-group> (<year>1998</year>). <article-title>Complementarity and synergy in bimodal speech: auditory, visual and audio-visual identification of French oral vowels in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>103</volume>, <fpage>3677</fpage>&#x02013;<lpage>3689</lpage>. <pub-id pub-id-type="doi">10.1121/1.423069</pub-id><pub-id pub-id-type="pmid">9637049</pub-id></citation></ref>
<ref id="B92"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ross</surname> <given-names>L. A.</given-names></name> <name><surname>Molholm</surname> <given-names>S.</given-names></name> <name><surname>Blanco</surname> <given-names>D.</given-names></name> <name><surname>Gomez-Ramirez</surname> <given-names>M.</given-names></name> <name><surname>Saint-amour</surname> <given-names>D.</given-names></name> <name><surname>Foxe</surname> <given-names>J. J.</given-names></name></person-group> (<year>2011</year>). <article-title>The development of multisensory speech perception continues into the late childhood years</article-title>. <source>Eur. J. Neurosci.</source> <volume>33</volume>, <fpage>2329</fpage>&#x02013;<lpage>2337</lpage>. <pub-id pub-id-type="doi">10.1111/j.1460-9568.2011.07685.x</pub-id><pub-id pub-id-type="pmid">21615556</pub-id></citation></ref>
<ref id="B94"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sch&#x000FC;rmann</surname> <given-names>M.</given-names></name> <name><surname>Caetano</surname> <given-names>G.</given-names></name> <name><surname>Jousm&#x000E4;ki</surname> <given-names>V.</given-names></name> <name><surname>Hari</surname> <given-names>R.</given-names></name></person-group> (<year>2004</year>). <article-title>Hands help hearing: facilitatory audiotactile interaction at low sound-intensity levels</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>115</volume>, <fpage>830</fpage>&#x02013;<lpage>832</lpage>. <pub-id pub-id-type="doi">10.1121/1.1639909</pub-id><pub-id pub-id-type="pmid">15000194</pub-id></citation></ref>
<ref id="B97"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Skipper</surname> <given-names>J. I.</given-names></name> <name><surname>Van Wassenhove</surname> <given-names>V.</given-names></name> <name><surname>Nusbaum</surname> <given-names>H. C.</given-names></name> <name><surname>Small</surname> <given-names>S. L.</given-names></name></person-group> (<year>2007</year>). <article-title>Hearing lips and seeing voices: how cortical areas supporting speech production mediate audiovisual speech perception</article-title>. <source>Cereb. Cortex</source> <volume>17</volume>, <fpage>2387</fpage>&#x02013;<lpage>2399</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhl147</pub-id><pub-id pub-id-type="pmid">17218482</pub-id></citation></ref>
<ref id="B99"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Spence</surname> <given-names>C.</given-names></name> <name><surname>McDonald</surname> <given-names>J.</given-names></name></person-group> (<year>2004</year>). &#x0201C;<article-title>The cross-modal consequences of the exogenous spatial orienting of attention</article-title>,&#x0201D; in <source>The Handbook of Multisensory Processes</source>, eds <person-group person-group-type="editor"><name><surname>Calvert</surname> <given-names>G. A.</given-names></name> <name><surname>Spence</surname> <given-names>C.</given-names></name> <name><surname>Stein</surname> <given-names>B. E.</given-names></name></person-group> (<publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>), <fpage>3</fpage>&#x02013;<lpage>25</lpage>.</citation></ref>
<ref id="B101"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>B. E.</given-names></name> <name><surname>London</surname> <given-names>N.</given-names></name> <name><surname>Wilkinson</surname> <given-names>L. K.</given-names></name> <name><surname>Price</surname> <given-names>D. D.</given-names></name></person-group> (<year>1996</year>). <article-title>Enhancement of perceived visual intensity by auditory stimuli: a psychophysical analysis</article-title>. <source>J. Cogn. Neurosci.</source> <volume>8</volume>, <fpage>497</fpage>&#x02013;<lpage>506</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.1996.8.6.497</pub-id><pub-id pub-id-type="pmid">23961981</pub-id></citation></ref>
<ref id="B100"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>B. E.</given-names></name> <name><surname>Meredith</surname> <given-names>M. A.</given-names></name></person-group> (<year>1993</year>). <source>The Merging of the Senses.</source> <collab>Cambridge, MA</collab>: <publisher-name>MIT Press</publisher-name>.</citation></ref>
<ref id="B102"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>B. E.</given-names></name> <name><surname>Stanford</surname> <given-names>T. R.</given-names></name> <name><surname>Rowland</surname> <given-names>B. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Development of multisensory integration from the perspective of the individual neuron</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>15</volume>, <fpage>520</fpage>&#x02013;<lpage>535</lpage>. <pub-id pub-id-type="doi">10.1038/nrn3742</pub-id><pub-id pub-id-type="pmid">25158358</pub-id></citation></ref>
<ref id="B105"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Streri</surname> <given-names>A.</given-names></name> <name><surname>Gentaz</surname> <given-names>E.</given-names></name></person-group> (<year>2004</year>). <article-title>Cross-modal recognition of shape from hand to eyes and handedness in human newborns</article-title>. <source>Neuropsychologia</source> <volume>42</volume>, <fpage>1365</fpage>&#x02013;<lpage>1369</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2004.02.012</pub-id><pub-id pub-id-type="pmid">15193944</pub-id></citation></ref>
<ref id="B108"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tourville</surname> <given-names>J. A.</given-names></name> <name><surname>Guenther</surname> <given-names>F. H.</given-names></name></person-group> (<year>2011</year>). <article-title>The DIVA model: a neural theory of speech acquisition and production</article-title>. <source>Lang. Cogn. Process.</source> <volume>26</volume>, <fpage>952</fpage>&#x02013;<lpage>981</lpage>. <pub-id pub-id-type="doi">10.1080/01690960903498424</pub-id><pub-id pub-id-type="pmid">23667281</pub-id></citation></ref>
<ref id="B109"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tremblay</surname> <given-names>S.</given-names></name> <name><surname>Shiller</surname> <given-names>D. M.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2003</year>). <article-title>Somatosensory basis of speech production</article-title>. <source>Nature</source> <volume>423</volume>, <fpage>866</fpage>&#x02013;<lpage>869</lpage>. <pub-id pub-id-type="doi">10.1038/nature01710</pub-id><pub-id pub-id-type="pmid">12815431</pub-id></citation></ref>
<ref id="B110"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Trudeau-Fisette</surname> <given-names>P.</given-names></name> <name><surname>Tiede</surname> <given-names>M.</given-names></name> <name><surname>M&#x000E9;nard</surname> <given-names>L.</given-names></name></person-group> (<year>2017</year>). <article-title>Compensations to auditory feedback perturbations in congenitally blind and sighted speakers: acoustic and articulatory data</article-title>. <source>PLoS One</source> <volume>12</volume>:<fpage>e0180300</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0180300</pub-id><pub-id pub-id-type="pmid">28678819</pub-id></citation></ref>
<ref id="B111"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Turgeon</surname> <given-names>C.</given-names></name></person-group> (<year>2011</year>). <article-title>Mesure du d&#x000E9;veloppement de la capacit&#x000E9; de discrimination auditive et visuelle chez des personnes malentendantes porteuses d&#x02019;un implant cochl&#x000E9;aire</article-title>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://papyrus.bib.umontreal.ca/xmlui/handle/1866/6091">https://papyrus.bib.umontreal.ca/xmlui/handle/1866/6091</ext-link>. Accessed February 01, 2019.</citation></ref>
<ref id="B113"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Vihman</surname> <given-names>M. M.</given-names></name></person-group> (<year>1996</year>). <source>Phonological Development: The Origins of Language in the Child.</source> <collab>Oxford, England</collab>: <publisher-name>Blackwell</publisher-name>.</citation></ref>
<ref id="B114"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villacorta</surname> <given-names>V. M.</given-names></name> <name><surname>Perkell</surname> <given-names>J. S.</given-names></name> <name><surname>Guenther</surname> <given-names>F. H.</given-names></name></person-group> (<year>2007</year>). <article-title>Sensorimotor adaptation to feedback perturbations of vowel acoustics and its relation to perception</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>122</volume>, <fpage>2306</fpage>&#x02013;<lpage>2319</lpage>. <pub-id pub-id-type="doi">10.1121/1.2773966</pub-id><pub-id pub-id-type="pmid">17902866</pub-id></citation></ref>
<ref id="B116"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vorperian</surname> <given-names>H. K.</given-names></name> <name><surname>Kent</surname> <given-names>R. D.</given-names></name> <name><surname>Gentry</surname> <given-names>L. R.</given-names></name> <name><surname>Yandell</surname> <given-names>B. S.</given-names></name></person-group> (<year>1999</year>). <article-title>Magnetic resonance imaging procedures to study the concurrent anatomic development of vocal tract structures: preliminary results</article-title>. <source>Int. J. Pediatr. Otorhinolaryngol.</source> <volume>49</volume>, <fpage>197</fpage>&#x02013;<lpage>206</lpage>. <pub-id pub-id-type="doi">10.1016/s0165-5876(99)00208-6</pub-id><pub-id pub-id-type="pmid">10519699</pub-id></citation></ref>
<ref id="B118"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Walker-Andrews</surname> <given-names>A.</given-names></name></person-group> (<year>1994</year>). &#x0201C;<article-title>Taxonomy for intermodal relations</article-title>,&#x0201D; in <source>The Development of Intersensory Perception: Comparative Perspectives</source>, eds <person-group person-group-type="editor"><name><surname>Lewkowicz</surname> <given-names>D. J.</given-names></name> <name><surname>Lickliter</surname> <given-names>R.</given-names></name></person-group> (<publisher-loc>Hillsdale, NJ</publisher-loc>: <publisher-name>Lawrence Erlbaum Associates</publisher-name>), <fpage>39</fpage>&#x02013;<lpage>56</lpage>.</citation></ref>
<ref id="B119"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Werker</surname> <given-names>J. F.</given-names></name></person-group> (<year>2018</year>). <article-title>Perceptual beginnings to language acquisition</article-title>. <source>Appl. Psycholinguist</source> <volume>39</volume>, <fpage>703</fpage>&#x02013;<lpage>728</lpage>. <pub-id pub-id-type="doi">10.1017/s014271641800022x</pub-id></citation></ref>
<ref id="B120"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yeung</surname> <given-names>H. H.</given-names></name> <name><surname>Werker</surname> <given-names>J. F.</given-names></name></person-group> (<year>2013</year>). <article-title>Lip movements affect infants&#x02019; audiovisual speech perception</article-title>. <source>Psychol. Sci.</source> <volume>24</volume>, <fpage>603</fpage>&#x02013;<lpage>612</lpage>. <pub-id pub-id-type="doi">10.1177/0956797612458802</pub-id><pub-id pub-id-type="pmid">23538910</pub-id></citation></ref>
<ref id="B121"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>L.</given-names></name> <name><surname>Rowland</surname> <given-names>B. A.</given-names></name> <name><surname>Stein</surname> <given-names>B. E.</given-names></name></person-group> (<year>2010</year>). <article-title>Initiating the development of multisensory integration by manipulating sensory experience,</article-title>. <source>J. Neurosci.</source> <volume>30</volume>, <fpage>4904</fpage>&#x02013;<lpage>4913</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.5575-09.2010</pub-id><pub-id pub-id-type="pmid">20371810</pub-id></citation></ref>
</ref-list>
</back>
</article>