<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Aging Neurosci.</journal-id>
<journal-title>Frontiers in Aging Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Aging Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1663-4365</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnagi.2023.1151652</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Aging Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Aging effect of cross-modal interactions during audiovisual detection and discrimination by behavior and ERPs</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Ren</surname> <given-names>Yanna</given-names></name><xref rid="aff1" ref-type="aff"><sup>1</sup></xref><xref rid="fn0001" ref-type="author-notes"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/898479/overview"/>
</contrib>
<contrib contrib-type="author"><name><surname>Li</surname> <given-names>Yan</given-names></name><xref rid="aff1" ref-type="aff"><sup>1</sup></xref><xref rid="fn0001" ref-type="author-notes"><sup>&#x2020;</sup></xref>
</contrib>
<contrib contrib-type="author"><name><surname>Xu</surname> <given-names>Zhihan</given-names></name><xref rid="aff2" ref-type="aff"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1433450/overview"/>
</contrib>
<contrib contrib-type="author"><name><surname>Luo</surname> <given-names>Rui</given-names></name><xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author"><name><surname>Qian</surname> <given-names>Runqi</given-names></name><xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author"><name><surname>Duan</surname> <given-names>Jieping</given-names></name><xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author"><name><surname>Yang</surname> <given-names>Jiajia</given-names></name><xref rid="aff3" ref-type="aff"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/163354/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Yang</surname> <given-names>Weiping</given-names></name><xref rid="aff4" ref-type="aff"><sup>4</sup></xref><xref rid="c001" ref-type="corresp"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/514017/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Psychology, College of Humanities and Management, Guizhou University of Traditional Chinese Medicine</institution>, <addr-line>Guiyang</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Foreign Language, Ningbo University of Technology</institution>, <addr-line>Ningbo</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Applied Brain Science Lab Interdisciplinary Science and Engineering in Health Systems, Okayama University</institution>, <addr-line>Okayama</addr-line>, <country>Japan</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Psychology, Faculty of Education, Hubei University</institution>, <addr-line>Wuhan</addr-line>, <country>China</country></aff>
<author-notes>
<fn id="fn0002" fn-type="edited-by">
<p>Edited by: Hans Colonius, University of Oldenburg, Germany</p>
</fn>
<fn id="fn0003" fn-type="edited-by">
<p>Reviewed by: Rodolfo Sol&#x00ED;s-Vivanco, Manuel Velasco Su&#x00E1;rez National Institute of Neurology and Neurosurgery, Mexico; Bin Wang, Taiyuan University of Technology, China</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Weiping Yang, <email>swywp@163.com</email></corresp>
<fn id="fn0001" fn-type="equal">
<p><sup>&#x2020;</sup>These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>26</day>
<month>04</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>15</volume>
<elocation-id>1151652</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>01</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>04</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2023 Ren, Li, Xu, Luo, Qian, Duan, Yang and Yang.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Ren, Li, Xu, Luo, Qian, Duan, Yang and Yang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Numerous studies have shown that aging greatly affects audiovisual integration; however, it is still unclear when the aging effect occurs, and its neural mechanism has yet to be fully elucidated.</p>
</sec>
<sec>
<title>Methods</title>
<p>We assessed the audiovisual integration (AVI) of older (<italic>n</italic>&#x2009;=&#x2009;40) and younger (<italic>n</italic>&#x2009;=&#x2009;45) adults using simple meaningless stimulus detection and discrimination tasks. The results showed that the response was significantly faster and more accurate for younger adults than for older adults in both the detection and discrimination tasks. The AVI was comparable for older and younger adults during stimulus detection (9.37% vs. 9.43%); however, the AVI was lower for older than for younger adults during stimulus discrimination (9.48% vs. 13.08%) behaviorally. The electroencephalography (EEG) analysis showed that comparable AVI amplitude was found at 220&#x2013;240&#x2009;ms for both groups during stimulus detection and discrimination, but there was no significant difference between brain regions for older adults but a higher AVI amplitude in the right posterior for younger adults. Additionally, a significant AVI was found for younger adults in 290&#x2013;310&#x2009;ms but was absent for older adults during stimulus discrimination. Furthermore, significant AVI was found in the left anterior and right anterior at 290&#x2013;310&#x2009;ms for older adults but in the central, right posterior and left posterior for younger adults.</p>
</sec>
<sec>
<title>Discussion</title>
<p>These results suggested that the aging effect of AVI occurred in multiple stages, but the attenuated AVI mainly occurred in the later discriminating stage attributed to attention deficit.</p>
</sec>
</abstract>
<kwd-group>
<kwd>aging</kwd>
<kwd>audiovisual integration (AVI)</kwd>
<kwd>ERP</kwd>
<kwd>race model</kwd>
<kwd>older adults</kwd>
</kwd-group>
<contract-num rid="cn2">32260198</contract-num>
<contract-num rid="cn2">31800932</contract-num>
<contract-num rid="cn2">31700973</contract-num>
<contract-num rid="cn3">22NDQN280YB</contract-num>
<contract-sponsor id="cn1">Science and Technology Planning Project of Guizhou Province</contract-sponsor>
<contract-sponsor id="cn2">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content></contract-sponsor>
<contract-sponsor id="cn3">Zhejiang Provincial Philosophy and Social Sciences Planning Project</contract-sponsor>
<counts>
<fig-count count="4"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="51"/>
<page-count count="9"/>
<word-count count="7107"/>
</counts>
</article-meta>
</front>
<body>
<sec id="sec1" sec-type="intro">
<label>1.</label>
<title>Introduction</title>
<p>Vision and audition are two important sense organs, and the merging of visual information and auditory information can occur automatically, which is called audiovisual integration (AVI; <xref ref-type="bibr" rid="ref42">Stein and Meredith, 1993</xref>; <xref ref-type="bibr" rid="ref39">Scheliga et al., 2022</xref>). Integration of available auditory and visual information from the complex outside environment assists individuals in accurately perceiving the outside world, and numerous studies have found that the response to multisensory audiovisual information is faster and more accurate than that to uni-sensory auditory information or visual information across the lifespan (<xref ref-type="bibr" rid="ref2">Brandwein et al., 2011</xref>; <xref ref-type="bibr" rid="ref36">Ren et al., 2020c</xref>). Aging is a major global issue, and the proportion of the elderly population is increasing yearly. Aging is associated with declines in various functions, including vision, audition, tactile, olfaction and gustation (<xref ref-type="bibr" rid="ref16">Isaev et al., 2019</xref>), and older adults are more dependent on information merging from different sensory modalities (<xref ref-type="bibr" rid="ref13">Grady, 2012</xref>; <xref ref-type="bibr" rid="ref10">Freiherr et al., 2013</xref>). Therefore, how elderly individuals integrate valuable information from visual and auditory sensory modalities has become a hot topic of age-related cognition, which is a key factor in the development of cognitive interventions (<xref ref-type="bibr" rid="ref36">Ren et al., 2020c</xref>).</p>
<p>Using meaningful semantic stimuli, including visual colorful circles and auditory color naming, <xref ref-type="bibr" rid="ref21">Laurienti et al. (2006)</xref> first reported that the AVI was enhanced for older adults compared with younger adults in behavior (<xref ref-type="bibr" rid="ref21">Laurienti et al., 2006</xref>). According to their results, they proposed an assumption that older adults might establish compensatory mechanisms during multisensory audiovisual processing to ease uni-sensory functional decline. To clarify the neural mechanism for the enhanced AVI, Diaconescu et al. investigated the aging effect of AVI during processing meaningful semantic audiovisual stimuli using MEG (<xref ref-type="bibr" rid="ref7">Diaconescu et al., 2013</xref>), and they found posterior parietal and medial prefrontal activity in charge of the age-related AVI. Specifically, preferential activity in posterior parietal and medial prefrontal regions responded to multisensory audiovisual stimuli between 150 and 300&#x2009;ms, and increased activity in inferior parietal and medial prefrontal regions 100&#x2009;ms after stimulus onset in older adults only. In the aforementioned studies, semantic stimulus material was applied, which induced perceptual AVI processing and semantic processing. It is difficult to disentangle the aging effect on the AVI or high-level semantic processing. In addition, semantic meaning (<xref ref-type="bibr" rid="ref8">Doehrmann and Naumer, 2008</xref>; <xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>) and task complexity (<xref ref-type="bibr" rid="ref29">Pronina et al., 2022</xref>) greatly modulate AVI processing, and investigation of the AVI of meaningless stimulus material is necessary to uncover the aging effect of AVI.</p>
<p><xref ref-type="bibr" rid="ref27">Peiffer et al. (2007)</xref> and <xref ref-type="bibr" rid="ref32">Ren et al. (2020a</xref>,<xref ref-type="bibr" rid="ref35">b</xref>,<xref ref-type="bibr" rid="ref36">c)</xref> investigated the aging effect of AVI using meaningless stimulus materials behaviorally to eliminate most high-order cognitive processing, but conflicting results were obtained (<xref ref-type="bibr" rid="ref27">Peiffer et al., 2007</xref>; <xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>). In the study by Peiffer et al., the visual stimulus was green light emitting diodes and the auditory stimulus was white noise, and a simple detection task required participants to respond when detecting any auditory and visual signal was conducted (<xref ref-type="bibr" rid="ref27">Peiffer et al., 2007</xref>). They reported a higher AVI for older adults than for younger adults, consistent with <xref ref-type="bibr" rid="ref21">Laurienti et al. (2006)</xref>. However, Ren et al. designed a discrimination task that instructed participants to identify target signals (white-black checkerboard contained two black dots in white board and white noise) from nontarget signals (white-black checkboard and pure tone), and reduced the AVI for older adults compared with younger adults (<xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>). The detection of information in the external world is the most basic ingredient of perception because it merely contains a behavioral judgment about the presence or absence of something regardless of its identity or properties that are necessary for discrimination tasks (<xref ref-type="bibr" rid="ref28">Pennartz, 2015</xref>). Compared with the detection task, the discrimination task requires higher cognitive processing and is also the basic ingredient of perception (<xref ref-type="bibr" rid="ref41">Spotorno et al., 2016</xref>). Considering the importance of detection and discrimination in human life, it is necessary to clarify whether the aging effect mainly occurred at the detection level or in a relatively higher discrimination process.</p>
<p>Although there is a mass of recent studies concerning age-related AVI, they mainly focused on the interaction between AVI with attention or spatiotemporal synchronism (<xref ref-type="bibr" rid="ref48">Wang et al., 2017</xref>; <xref ref-type="bibr" rid="ref34">Ren et al., 2018</xref>, <xref ref-type="bibr" rid="ref32">2020a</xref>, <xref ref-type="bibr" rid="ref33">2022</xref>; <xref ref-type="bibr" rid="ref47">Wang et al., 2018</xref>), and they found that compared with younger adults, additional brain networks were recruited and higher brain functional connectivity was evoked during AVI for older adults. It remains unclear when the aging effect occurs during AVI. In the current study, older and younger adults were instructed to perform meaningless auditory and visual signal detection tasks and discrimination tasks during EEG recording. This allowed us to answer two overarching research questions. First, when does the aging effect begin to influence AVI? Second, what is the neural mechanism underlying the aging effect on AVI?</p>
</sec>
<sec id="sec2" sec-type="methods">
<label>2.</label>
<title>Methods</title>
<sec id="sec3">
<label>2.1.</label>
<title>Participants</title>
<p>Forty-five older adults and 45 younger adults were recruited to participate in the study. All participants were paid for their time, and 40 older adults (55&#x2013;75&#x2009;years old, mean age&#x2009;&#x00B1;&#x2009;SD, 58.9&#x2009;&#x00B1;&#x2009;4.4) and 45 younger adults (18&#x2013;23&#x2009;years old, mean age&#x2009;&#x00B1;&#x2009;SD, 19.9&#x2009;&#x00B1;&#x2009;1.1) completed the experiment successfully. Three of the older adults were unable to complete the discrimination task, and the accuracy of two older adults was lower than 60%; therefore, the data of the five older adults were excluded from further analysis. All of the older adults were recruited from Guiyang City, and all of the younger adults were college students and graduate students of Guizhou University of Traditional Chinese Medicine. The participant who takes drugs related to mental illness was excluded. All participants had normal hearing and normal or corrected-to-normal vision and were naive about the purpose of the experiment. Vision was examined by a Chinese Eye Chart, and audition was examined by Pure-tone Audiometry. The mini-mental state examination (MMSE) scores and Montreal cognitive assessment (MoCA) scores were greater than or equal to 26 (<xref ref-type="bibr" rid="ref3">Bravo and H&#x00E9;bert, 1997</xref>; <xref ref-type="bibr" rid="ref18">Jia et al., 2021</xref>). Additionally, all participants provided written informed consent before the experiment, which was previously approved by the Second Affiliated Hospital of Guizhou University of Traditional Chinese Medicine.</p>
</sec>
<sec id="sec4">
<label>2.2.</label>
<title>Stimuli and procedure</title>
<sec id="sec5">
<label>2.2.1.</label>
<title>Detection task</title>
<p>The visual stimulus (V) is 10% contrast 1.5 spatial frequency Gabor, including horizontal Gabor and vertical Gabor. The auditory stimulus (A) is a sinusoidal tone, including 1,000 and 500&#x2009;Hz. The audiovisual stimulus (AV) is the combination of 10% contrast 1.5 spatial frequency vertical Gabor and 1,000&#x2009;Hz sinusoidal tone and of 10% contrast 1.5 spatial frequency horizontal Gabor and 500&#x2009;Hz sinusoidal tone. No other combination of A stimulus and V stimulus was used in the current study. Participants were instructed to perform the experiment in a dimly lit, electrically shielded and sound-attenuated room (laboratory room, Guizhou University of Traditional Chinese Medicine, China). All V stimuli were presented on the center of the monitor with a gray background (RGB: 192, 192, and 192) in front of the participant (60&#x2009;cm), and A stimuli were presented through speakers located centrally on the back of the monitor at 60&#x2009;dB (10&#x2009;ms of rise or fall cosine gate). The experiment began with a fixation &#x201C;+&#x201D; at the center of the screen for 3,000&#x2009;ms (<xref rid="fig1" ref-type="fig">Figure 1A</xref>). Then, the A, V, and AV stimuli were presented randomly for 100&#x2009;ms with a random interstimulus interval (ISI) of 1,800&#x2013;3,000&#x2009;ms. The participants were instructed to press the left button of the mouse to respond to all stimuli they perceived as rapidly and as accurately as possible.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Schematic depiction of the experimental design. An example of a possible sequence of audiovisual stimuli and visual stimuli in the detection task <bold>(A)</bold> and a possible sequence of audiovisual stimuli in the discrimination task <bold>(B)</bold>. ISI, interstimulus interval.</p>
</caption>
<graphic xlink:href="fnagi-15-1151652-g001.tif"/>
</fig>
</sec>
<sec id="sec6">
<label>2.2.2.</label>
<title>Discrimination task</title>
<p>The presentation of A, V, and AV stimuli was the same as that in the detection task but in a different response mode. In the stimulus discrimination task, the participant was only instructed to respond to vertical Gabor, 1,000&#x2009;Hz sinusoidal tone, and combination of vertical Gabor and 1,000&#x2009;Hz sinusoidal tone (target); however, the stimulus was withheld to horizontal Gabor, 500&#x2009;Hz sinusoidal tone, and the combination of horizontal Gabor and 500&#x2009;Hz sinusoidal tone (non-target; <xref rid="fig1" ref-type="fig">Figure 1B</xref>). The sequence of the detection task and discrimination task was random for each participant. There are 300 trials for each task, including the A, V, and AV target trials 20 times each and the A, V, and AV non-target trials 80 times each. Each task lasted for 12&#x2009;min, which divided into two sessions with self-time break.</p>
</sec>
</sec>
<sec id="sec7">
<label>2.3.</label>
<title>Data collection</title>
<p>The stimuli presentation and behavioral data collection were controlled using E-prime 3.0 software (Psychology Software Tolls, Inc., Pittsburgh, PA, USA). The EEG signals were recorded using the BrainVision actiCHamp Plus system (Brain Products GmbH, Gilching, Germany) through 32 Ag/AgCl electrodes mounted on an electrode cap (actiCAP GmbH, Herrsching, Germany). The vertical eye movements and eye blinks were measured by acquiring EOG data from an electrode placed approximately 1&#x2009;cm below the subject&#x2019;s left eye (VEOG), and the horizontal eye movements were measured by acquiring the EOG signal from one electrode placed approximately 1&#x2009;cm from the outer canthi of the left eye (HEOG). The reference electrode was Fz, and the impedance was maintained below 5&#x2009;k&#x03A9;. The raw signals were digitized using a sample frequency of 1,000&#x2009;Hz, and all data were stored digitally for off-line analysis.</p>
</sec>
<sec id="sec8">
<label>2.4.</label>
<title>Data analysis</title>
<sec id="sec9">
<label>2.4.1.</label>
<title>Behavioral data</title>
<sec id="sec10">
<label>2.4.1.1.</label>
<title>Hit rate and response time</title>
<p>The hit rate is the percentage of correct responses (the response time falls within the average time period &#x00B1;2.5 SD) relative to the total number of target stimuli. The hit rates and response times (RTs) were computed separately for each participant and then submitted to a 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;3 (stimulus type: A, V, AV) analysis of variance (ANOVA) (Greenhouse&#x2013;Geisser corrections with corrected degrees of freedom). The statistical analysis was conducted using IBM SPSS statistic 22.0 (IBM Corp., Armonk, NY, USA), the statistical significance level was set at <italic>p</italic>&#x2009;&#x2264;&#x2009;0.05, and the effect size (&#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>) estimates were reported.</p>
</sec>
<sec id="sec11">
<label>2.4.1.2.</label>
<title>Race model</title>
<p>As in our previous study on the interaction between attentional load and AVI (<xref ref-type="bibr" rid="ref30">Ren et al., 2021</xref>, <xref ref-type="bibr" rid="ref31">2023</xref>), the occurrence of AVI was assessed using a race model by cumulative distribution functions (CDFs; <xref ref-type="bibr" rid="ref24">Miller, 1982</xref>, <xref ref-type="bibr" rid="ref25">1986</xref>). <italic>P</italic><sub>A</sub>, <italic>P</italic><sub>V</sub>, and <italic>P</italic><sub>AV</sub> are the probability of responding within a given timeframe in a unimodal visual trial, unimodal auditory trial, and bimodal audiovisual trial, respectively. The race model (<italic>P</italic><sub>RM</sub>) is a statistical prediction model [<italic>P</italic><sub>RM</sub> =&#x2009;(<italic>P</italic><sub>A</sub> + <italic>P</italic><sub>V</sub>)&#x2009;&#x2212; <italic>P</italic><sub>A</sub> &#x00D7; <italic>P</italic><sub>V</sub>], and <xref ref-type="bibr" rid="ref24">Miller (1982</xref>, <xref ref-type="bibr" rid="ref25">1986)</xref> proposed that <italic>P</italic><sub>AV</sub> will never exceeds P<sub>RM</sub>. If P<sub>RM</sub> is significantly greater than <italic>P</italic><sub>AV</sub>, the interaction between auditory stimulus and visual stimulus is considered to occur. To assess the amount of AVI in various conditions, a difference probability curve was generated by subtracting a subject&#x2019;s race model CDF from his or her AV CDF in each 10-ms bin (<xref ref-type="bibr" rid="ref22">Laurienti et al., 2004</xref>, <xref ref-type="bibr" rid="ref21">2006</xref>; <xref ref-type="bibr" rid="ref27">Peiffer et al., 2007</xref>; <xref ref-type="bibr" rid="ref15">Hugenschmidt et al., 2009</xref>). The peak of the difference probability curve (peak benefit) was computed separately for each participant in each condition to assess the amount of AVI. The time point of peak benefit was defined as the peak latency, and the time interval at which a significant difference occurred between the AV CDF and the race model CDFs was defined as the time window of AVI, which was used to assess when the AVI occurred together with peak latency.</p>
</sec>
</sec>
<sec id="sec12">
<label>2.4.2.</label>
<title>EEG data</title>
<p>The EEG data were imported and processed with MATLAB R2013b (MathWorks, Inc., Natick, MA, United States) with the freely available EEGLAB toolboxes<xref rid="fn0004" ref-type="fn">
<sup>1</sup></xref> (Swartz Center for Computational Neuroscience, La Jolla, CA, United States). The EEG data were positioned according to the 32-channel montage of the international 10/20 system, and only the EEG signals elicited by vertical Gabor, 1,000&#x2009;Hz sinusoidal tone, and combination of vertical Gabor and 1,000&#x2009;Hz sinusoidal tone were analyzed. The two electrodes monitoring eye movement (HEOG and VEOG) were deleted, and then, the data were rereferenced to the bilateral mastoid electrodes (TP9 and TP10). The original reference data were recovered to Fz. The remaining continuous EEG data were bandpass filtered from 1 to 40 Hz during recordings at a sampling rate of 1,000&#x2009;Hz. For the detection task, the data were divided into epochs with 400 time points (100&#x2009;ms prestimulus and 300&#x2009;ms poststimulus points) and 700 time points (100&#x2009;ms prestimulus and 700&#x2009;ms poststimulus points) for the discrimination task. Then, an independent component analysis (ICA) was used to remove artifacts from the data, including eye artifacts, frequency interference, muscle artifacts, head movement, and electrocardiographic activity (<xref ref-type="bibr" rid="ref23">Makeig et al., 1997</xref>; <xref ref-type="bibr" rid="ref20">Jung et al., 2001</xref>; <xref ref-type="bibr" rid="ref6">Delorme and Makeig, 2004</xref>). Subsequently, baseline corrections were made based on the 100&#x2009;ms to 0&#x2009;ms prestimulus interval data from the ICA-corrected data. The data were then averaged for each stimulus type, following digital filtering with a bandpass filter of 0.01&#x2013;40&#x2009;Hz, and the grand-averaged data were obtained across all participants for each stimulus type in each electrode. The AVI was calculated according to the previous studies, which have reported that audiovisual integration could be assessed by the difference in amplitude [ERP<sub>AV</sub> &#x2013; ERP<sub>(A&#x2009;+&#x2009;V)</sub>] between the sum of the event related potential (ERP) waves of the unimodal visual and unimodal auditory stimuli ERP<sub>(A&#x2009;+&#x2009;V)</sub> and the ERP waves of the bimodal stimuli ERP<sub>AV</sub> (<xref ref-type="bibr" rid="ref11">Giard and Peronnet, 1999</xref>; <xref ref-type="bibr" rid="ref43">Talsma et al., 2007</xref>).</p>
<p>According to previous studies (<xref ref-type="bibr" rid="ref7">Diaconescu et al., 2013</xref>; <xref ref-type="bibr" rid="ref51">Yang et al., 2022</xref>), five regions of interest (ROIs) were selected: left anterior (F3, FC5, and FC1), right anterior (F4, FC6, and FC2), central (C3, Cz, and C4), left posterior (P3, CP5, and CP1), and right posterior (P4, CP6, and CP2). To measure the AVI diversity between groups in each task, statistical analysis was conducted in the following steps. First, pointwise running <italic>t</italic>-tests between ERP<sub>(A&#x2009;+&#x2009;V)</sub> and ERP<sub>AV</sub> were applied. If 20 or more consecutive points were significant (20 points&#x2009;=&#x2009;20&#x2009;ms, criterion <italic>p</italic>&#x2009;&#x003C;&#x2009;0.050), we defined that the AVI occurred (<xref ref-type="bibr" rid="ref14">Guthrie and Buchwald, 1991</xref>; <xref ref-type="bibr" rid="ref40">Senkowski et al., 2007</xref>). Second, in each ROI, the amplitudes of [ERP<sub>AV</sub> &#x2013; ERP<sub>(A&#x2009;+&#x2009;V)</sub>] across each significant time interval were averaged. Finally, the mean amplitudes were submitted to 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;5 (ROIs: left anterior, right anterior, central, left posterior right posterior) ANOVA (Greenhouse&#x2013;Geisser corrections with corrected degrees of freedom). The statistical analysis was conducted using IBM SPSS statistic 22.0 (IBM Corp., Armonk, NY, USA), the statistical significance level was set at <italic>p</italic>&#x2009;&#x2264;&#x2009;0.05, and the effect size (&#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>) estimates were reported.</p>
</sec>
</sec>
</sec>
<sec id="sec13" sec-type="results">
<label>3.</label>
<title>Results</title>
<sec id="sec14">
<label>3.1.</label>
<title>Hit rate and response time</title>
<sec id="sec15">
<label>3.1.1.</label>
<title>Detection task</title>
<p>The mean hit rate and RTs are shown in <xref rid="tab1" ref-type="table">Table 1</xref>. A 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;3 (stimulus type: A, V, AV) ANOVA of hit rate found a significant main effect of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;6.632, <italic>p</italic>&#x2009;=&#x2009;0.012, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.074], indicating a higher hit rate for younger adults than for older adults, and a main effect of stimulus type [<italic>F</italic>(2, 166)&#x2009;=&#x2009;32.039, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.279], indicating a higher hit rate to the AV stimulus than to the V stimulus and A stimulus (AV&#x2009;&#x003E;&#x2009;A&#x2009;&#x003E;&#x2009;V, all <italic>ps</italic> &#x2264;&#x2009;0.006). There was no significant interaction between group and stimulus type [<italic>F</italic>(2, 166)&#x2009;=&#x2009;2.200, <italic>p</italic>&#x2009;=&#x2009;0.121, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.026]. ANOVA of RTs revealed that there was a main effect of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;193.620, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.700], indicating a faster response for younger adults than for older adults. In addition, the main effect of stimulus type was also significant [<italic>F</italic>(2, 166)&#x2009;=&#x2009;89.247, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.455], indicating a faster response to the AV stimulus than to the A stimulus and V stimulus (V vs. A, <italic>p</italic>&#x2009;=&#x2009;0.159). However, no significant interaction between group and stimulus type was found [F(2, 166)&#x2009;=&#x2009;0.444, <italic>p</italic>&#x2009;=&#x2009;0.587, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.005].</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>The mean response time and hit rate with standard deviation (mean&#x2009;&#x00B1;&#x2009;SD) for visual, auditory and audiovisual stimuli in the detection task and discrimination tasks.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" valign="top" colspan="3">Detection task</th>
<th align="center" valign="top" colspan="3">Discrimination task</th>
</tr>
<tr>
<th/>
<th align="center" valign="top">V</th>
<th align="center" valign="top">A</th>
<th align="center" valign="top">AV</th>
<th align="center" valign="top">V</th>
<th align="center" valign="top">A</th>
<th align="center" valign="top">AV</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom" colspan="7">
<bold>Response time (ms)</bold>
</td>
</tr>
<tr>
<td align="left" valign="bottom">Older</td>
<td align="center" valign="bottom">472&#x2009;&#x00B1;&#x2009;91</td>
<td align="center" valign="bottom">504&#x2009;&#x00B1;&#x2009;147</td>
<td align="center" valign="bottom">407&#x2009;&#x00B1;&#x2009;107</td>
<td align="center" valign="bottom">615&#x2009;&#x00B1;&#x2009;67</td>
<td align="center" valign="bottom">636&#x2009;&#x00B1;&#x2009;106</td>
<td align="center" valign="bottom">550&#x2009;&#x00B1;&#x2009;81</td>
</tr>
<tr>
<td align="left" valign="bottom">Younger</td>
<td align="center" valign="bottom">396&#x2009;&#x00B1;&#x2009;60</td>
<td align="center" valign="bottom">410&#x2009;&#x00B1;&#x2009;96</td>
<td align="center" valign="bottom">338&#x2009;&#x00B1;&#x2009;69</td>
<td align="center" valign="bottom">554&#x2009;&#x00B1;&#x2009;63</td>
<td align="center" valign="bottom">544&#x2009;&#x00B1;&#x2009;80</td>
<td align="center" valign="bottom">457&#x2009;&#x00B1;&#x2009;64</td>
</tr>
<tr>
<td align="left" valign="bottom" colspan="7">
<bold>Hit rate (%)</bold>
</td>
</tr>
<tr>
<td align="left" valign="bottom">Older</td>
<td align="center" valign="bottom">88&#x2009;&#x00B1;&#x2009;8</td>
<td align="center" valign="bottom">92&#x2009;&#x00B1;&#x2009;8</td>
<td align="center" valign="bottom">96&#x2009;&#x00B1;&#x2009;3</td>
<td align="center" valign="bottom">92&#x2009;&#x00B1;&#x2009;9</td>
<td align="center" valign="bottom">92&#x2009;&#x00B1;&#x2009;8</td>
<td align="center" valign="bottom">97&#x2009;&#x00B1;&#x2009;4</td>
</tr>
<tr>
<td align="left" valign="bottom">Younger</td>
<td align="center" valign="bottom">93&#x2009;&#x00B1;&#x2009;4</td>
<td align="center" valign="bottom">94&#x2009;&#x00B1;&#x2009;5</td>
<td align="center" valign="bottom">96&#x2009;&#x00B1;&#x2009;2</td>
<td align="center" valign="bottom">93&#x2009;&#x00B1;&#x2009;6</td>
<td align="center" valign="bottom">95&#x2009;&#x00B1;&#x2009;4</td>
<td align="center" valign="bottom">97&#x2009;&#x00B1;&#x2009;2</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>A, auditory stimulus; V, visual stimulus; AV, audiovisual stimulus.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec16">
<label>3.1.2.</label>
<title>Discrimination task</title>
<p>A 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;3 (stimulus type: A, V, AV) ANOVA revealed that there was a significant main effect of stimulus type on hit rate [<italic>F</italic>(2, 166)&#x2009;=&#x2009;16.483, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.166], indicating a higher hit rate for the AV stimulus than for the V stimulus and A stimulus (V vs. A, <italic>p</italic>&#x2009;=&#x2009;0.211). No significant main effect of group [F(1, 83)&#x2009;=&#x2009;2.753, <italic>p</italic>&#x2009;=&#x2009;0.201, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.032] or interaction between group and stimulus type [<italic>F</italic>(2, 166)&#x2009;=&#x2009;0.671, <italic>p</italic>&#x2009;=&#x2009;0.483, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.008] was found. ANOVA also revealed that there was a main effect of group on RT [<italic>F</italic>(1, 83)&#x2009;=&#x2009;32.483, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.281], indicating a faster response in younger adults than in older adults, and a significant main effect of stimulus type on RT [<italic>F</italic>(2, 166)&#x2009;=&#x2009;87.855, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.514], indicating a faster response to the AV stimulus than to the V stimulus and A stimulus (V vs. A, <italic>p</italic>&#x2009;=&#x2009;0.999). Additionally, the interaction between group and stimulus type was also found to have a significant effect on RT [<italic>F</italic>(2, 166)&#x2009;=&#x2009;3.735, <italic>p</italic>&#x2009;=&#x2009;0.036, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.043]. Further <italic>post hoc</italic> analysis was applied. The pairwise comparison for group found that the response of younger adults was faster than that of older adults to all stimuli. The pairwise comparison for stimulus found that the response to the AV stimulus was faster than to the A stimulus and V stimulus for both younger and older adults (all <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), but no significant difference was found between the V stimulus and A stimulus for both older (<italic>p</italic>&#x2009;=&#x2009;0.203) and younger (<italic>p</italic>&#x2009;=&#x2009;0.999) adults.</p>
</sec>
</sec>
<sec id="sec17">
<label>3.2.</label>
<title>Race model</title>
<p>The analysis for RTs using the race model revealed significant AVI in both younger and older adults in the detection task and discrimination tasks (<xref rid="fig2" ref-type="fig">Figure 2</xref>). The independent <italic>t</italic> test revealed that the AVI of older adults was comparable to that of younger adults in the detection task (9.37% vs. 9.43%, <italic>t<sub>83</sub></italic>&#x2009;=&#x2009;0.698, <italic>p</italic>&#x2009;=&#x2009;0.488, <xref rid="fig2" ref-type="fig">Figure 2A</xref>) but significantly lower than that of younger adults in the discrimination task (9.48% vs. 13.08%, <italic>t<sub>83</sub></italic>&#x2009;=&#x2009;&#x2212;2.952, <italic>p</italic>&#x2009;=&#x2009;0.034, <xref rid="fig2" ref-type="fig">Figure 2B</xref>).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Probability difference between audiovisual CDFs and race model CDFs for older and younger adults in detection <bold>(A)</bold> and discrimination <bold>(B)</bold> tasks.</p>
</caption>
<graphic xlink:href="fnagi-15-1151652-g002.tif"/>
</fig>
</sec>
<sec id="sec18">
<label>3.3.</label>
<title>EEG data</title>
<sec id="sec19">
<label>3.3.1.</label>
<title>Detection task</title>
<p>To remove the influence of action potential on ERP components, only a 400-ms time interval (100&#x2009;ms prestimulus and 300&#x2009;ms poststimulus points) was analyzed in the detection task. Pointwise running <italic>t</italic> tests revealed that significant AVI occurred at 220&#x2013;240&#x2009;ms (<xref rid="fig3" ref-type="fig">Figure 3</xref>). 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;5 (ROIs: left anterior, right anterior, central, left posterior, right posterior) ANOVA revealed a significant main effect of ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;15.652, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.133], indicating a higher amplitude in the right posterior than in the other ROIs. Additionally, there was a significant interaction between group and ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;12.709, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.159]. The <italic>post hoc</italic> analysis for group showed that for older adults, no significant difference was found between ROIs. However, for younger adults, the amplitude in the right posterior was significantly higher than the others (all <italic>ps</italic>&#x2009;&#x003C;&#x2009;0.001) and higher in the left posterior and central regions than in the right anterior and left anterior regions (all <italic>ps</italic>&#x2009;&#x2265;&#x2009;0.152). There was no significant difference between the left posterior and central (<italic>p</italic>&#x2009;&#x003E;&#x2009;0.999), but there was a significantly higher amplitude in the right anterior than in the left anterior (<italic>p</italic>&#x2009;=&#x2009;0.033). The <italic>post hoc</italic> analysis for ROIs found higher amplitude in the left anterior and right anterior (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.028) but lower amplitude in the right posterior (<italic>p</italic>&#x2009;=&#x2009;0.038) for older adults than for younger adults; however, no significant difference was found between older and younger adults in the central (<italic>p</italic>&#x2009;=&#x2009;0.205) and left posterior (<italic>p</italic>&#x2009;=&#x2009;0.690). In addition, there was no significant main effect of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;1.652, <italic>p</italic>&#x2009;=&#x2009;0.202, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.020].</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Grand-averaged event-related potentials and topography map of audiovisual integration for older and younger adults in the time window of 220&#x2013;240&#x2009;ms in the detection task. Grand-averaged event-related potentials of the left anterior are the mean amplitudes of FC5, F3, and FC1; right anterior are the mean amplitudes of FC6, F4, and FC2; central are the mean amplitudes of C3, Cz, and C4; left posterior are the mean amplitudes of CP5, P3, and CP1; and right posterior are the mean amplitudes of CP6, P4, and CP2. The time interval where audiovisual integration occurred is marked with gray squares in the ERP waves, and the darker the color (the larger the absolute value) on the topographic map, the stronger the audiovisual integration.</p>
</caption>
<graphic xlink:href="fnagi-15-1151652-g003.tif"/>
</fig>
</sec>
<sec id="sec20">
<label>3.3.2.</label>
<title>Discrimination task</title>
<p>To further investigate whether the aging effect occurred in the late cognitive processing stage, a 700-ms time interval (100&#x2009;ms prestimulus and 600&#x2009;ms poststimulus points) was analyzed in the discrimination task. Pointwise running <italic>t</italic> tests revealed that significant AVI occurred at 220&#x2013;240, 290&#x2013;310, and 400&#x2013;420&#x2009;ms. In each AVI time interval, the mean amplitudes were submitted to 2 (group: older, younger)&#x2009;&#x00D7;&#x2009;5 (ROIs: left anterior, right anterior, central, left posterior right posterior) ANOVA.</p>
<sec id="sec21">
<label>3.3.2.1.</label>
<title>220&#x2013;240&#x2009;Ms</title>
<p>Similar to that in the detection task, there was a significant main effect of ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;8.604, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.094], indicating higher amplitude in the right posterior and central regions than in other ROIs (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.017). No significant main effect of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;1.003, <italic>p</italic>&#x2009;=&#x2009;0.319, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.012] or interaction between group and ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;1.448, <italic>p</italic>&#x2009;=&#x2009;0.234, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.017] was found.</p>
</sec>
<sec id="sec22">
<label>3.3.2.2.</label>
<title>290&#x2013;310&#x2009;Ms</title>
<p>There were significant main effects of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;4.494, <italic>p</italic>&#x2009;=&#x2009;0.037, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.051] and ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;9.793, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.106], indicating higher amplitudes for younger adults than for older adults in the left anterior, right anterior and central regions than in the left posterior and right posterior regions. Additionally, there was a significant interaction between group and ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;3.957, <italic>p</italic>&#x2009;=&#x2009;0.014, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.046]. The <italic>post hoc</italic> analysis for group showed that there was no significant difference between ROIs for older adults (all <italic>ps</italic>&#x2009;&#x2265;&#x2009;0.324). Higher amplitude in left anterior and central than that in other ROIs (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.009) in right anterior and left posterior than that in right posterior (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.003) for younger adults; however, no significant difference was found between left anterior and central (all <italic>ps</italic>&#x2009;&#x003E;&#x2009;0.999) or between right anterior and left posterior (all <italic>ps</italic>&#x2009;&#x003E;&#x2009;0.999). The <italic>post hoc</italic> analysis for ROIs showed higher amplitudes for younger than for older adults in the left anterior (<italic>p</italic>&#x2009;=&#x2009;0.014), central (<italic>p</italic>&#x2009;=&#x2009;0.010) and right posterior (<italic>p</italic>&#x2009;=&#x2009;0.009) but comparable amplitudes in the right anterior (<italic>p</italic>&#x2009;=&#x2009;0.404) and right posterior (<italic>p</italic>&#x2009;=&#x2009;0.412).</p>
</sec>
<sec id="sec23">
<label>3.3.2.3.</label>
<title>400&#x2013;420&#x2009;Ms</title>
<p>There were no significant main effects of group [<italic>F</italic>(1, 83)&#x2009;=&#x2009;2.260, <italic>p</italic>&#x2009;=&#x2009;0.137, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.027] and ROIs [<italic>F</italic>(4, 332)&#x2009;=&#x2009;2.854, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.057, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.033]; however, the interaction between group and ROIs was significant [<italic>F</italic>(4, 332)&#x2009;=&#x2009;25.879, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, &#x03B7;<italic>
<sub>P</sub>
</italic><sup>2</sup>&#x2009;=&#x2009;0.238]. The <italic>post hoc</italic> analysis showed that for the older group, there was a higher amplitude in the left anterior and right anterior than in other ROIs (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.030) in the central region than in the left posterior (<italic>p</italic>&#x2009;=&#x2009;0.001) and right posterior (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), but there was no significant difference between the left anterior and right anterior (<italic>p</italic>&#x2009;&#x003E;&#x2009;0.999) or between the left posterior and right posterior (<italic>p</italic>&#x2009;&#x003E;&#x2009;0.999). For younger adults, the amplitude was higher in the central, right posterior and right posterior than in the right anterior and left anterior (all <italic>ps</italic>&#x2009;&#x2264;&#x2009;0.011); however, there was no significant difference among the central, right posterior and right posterior (all <italic>ps</italic>&#x2009;&#x2265;&#x2009;0.208) or between the right anterior and left anterior (<italic>p</italic>&#x2009;&#x003E;&#x2009;0.999). The <italic>post hoc</italic> analysis for ROIs showed higher amplitudes in the left anterior (<italic>p</italic>&#x2009;=&#x2009;0.001) and right anterior (<italic>p</italic>&#x2009;=&#x2009;0.001) but lower amplitudes in the left posterior (<italic>p</italic>&#x2009;=&#x2009;0.039) and right posterior (<italic>p</italic>&#x2009;=&#x2009;0.026) for younger adults than for older adults, but there was no significant difference in centrality between older and younger adults (<italic>p</italic>&#x2009;=&#x2009;0.236; <xref rid="fig4" ref-type="fig">Figure 4</xref>).</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Grand-averaged event-related potentials <bold>(A)</bold> and topography map of audiovisual integration <bold>(B)</bold> for older and younger adults at intervals of 220&#x2013;240, 290&#x2013;310, and 420&#x2013;440&#x2009;ms in the discrimination task. Grand-averaged event-related potentials of the left anterior are the mean amplitudes of FC5, F3, and FC1; right anterior are the mean amplitudes of FC6, F4, and FC2; central are the mean amplitudes of C3, Cz, and C4; left posterior are the mean amplitudes of CP5, P3, and CP1; and right posterior are the mean amplitudes of CP6, P4, and CP2. The time interval where audiovisual integration occurred is marked with gray squares in the ERP waves, and the darker the color (the larger the absolute value) on the topographic map, the stronger the audiovisual integration.</p>
</caption>
<graphic xlink:href="fnagi-15-1151652-g004.tif"/>
</fig>
</sec>
</sec>
</sec>
</sec>
<sec id="sec24" sec-type="discussions">
<label>4.</label>
<title>Discussion</title>
<p>The aim of the current study was to investigate when the aging effect occurred during auditory and visual merging processing and its neural mechanism using a detection task and discrimination task. The results found that during stimulus detection, no significant AVI difference was found between older and younger adults behaviorally; however, a higher AVI (220&#x2013;240&#x2009;ms) was found in the left posterior for younger adults, but there was no significant difference between brain regions for older adults. AVI was lower for older adults than for younger adults during stimulus discrimination, and attenuated AVI mainly occurred in the 290&#x2013;310&#x2009;ms time interval.</p>
<sec id="sec25">
<label>4.1.</label>
<title>Comparable AVI in the stimulus detection stage</title>
<p>Inconsistent with Peiffer et al.&#x2019;s study (<xref ref-type="bibr" rid="ref27">Peiffer et al., 2007</xref>), our present study found that the AVI was comparable between older and younger adults in the detection task. There is general age-related slowing in uni-sensory and multisensory responses (<xref ref-type="bibr" rid="ref26">Paige and Gutchess, 2017</xref>; <xref ref-type="bibr" rid="ref1">Anderson, 2019</xref>; <xref ref-type="bibr" rid="ref19">Jones and Noppeney, 2021</xref>), even in simple reaction times (<xref ref-type="bibr" rid="ref4">Cliff et al., 2013</xref>); however, Peiffer et al. reported no significant difference in uni-sensory responses but a faster multisensory response for older adults than for younger adults. When the AVI was calculated, the appearance of enhanced AVI for older adults was observed, which might be an epiphenomenon and a unique report (<xref ref-type="bibr" rid="ref27">Peiffer et al., 2007</xref>). Consistent with numerous previous studies, the response to uni-sensory and multisensory stimuli was slower for older adults than for younger adults in the current study (<xref ref-type="bibr" rid="ref21">Laurienti et al., 2006</xref>; <xref ref-type="bibr" rid="ref13">Grady, 2012</xref>; <xref ref-type="bibr" rid="ref7">Diaconescu et al., 2013</xref>; <xref ref-type="bibr" rid="ref36">Ren et al., 2020c</xref>), which further led to delayed AVI (<xref ref-type="bibr" rid="ref21">Laurienti et al., 2006</xref>; <xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>, <xref ref-type="bibr" rid="ref30">2021</xref>). However, we first reported that the quantification of AVI was equivalent for the two age groups during simple meaningless stimulus detection.</p>
<p>In addition, consistent with the behavioral results, the current ERP analysis also showed no significant difference in the AVI amplitudes between older and younger adults at 220&#x2013;240&#x2009;ms, but further pairwise comparison showed no significant difference between ROIs for older adults but a higher AVI amplitude in the right posterior than the others for younger adults. With aging, brain structural and functional variables have been reported extensively, focusing on the core construct of compensatory scaffolding (<xref ref-type="bibr" rid="ref12">Goh and Park, 2009</xref>; <xref ref-type="bibr" rid="ref37">Reuter-Lorenz and Park, 2014</xref>). Studies have found that different from that for younger adults, the older adults recruited traditional unimodal information processing brain regions (<xref ref-type="bibr" rid="ref34">Ren et al., 2018</xref>) and associated brain region (<xref ref-type="bibr" rid="ref7">Diaconescu et al., 2013</xref>; <xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>) to process bimodal audiovisual information by reducing lateralization. Together with the behavioral and EEG results, we proposed that although there was no obvious diversity in behavioral expression, different neural representations occurred (<xref ref-type="bibr" rid="ref12">Goh and Park, 2009</xref>; <xref ref-type="bibr" rid="ref37">Reuter-Lorenz and Park, 2014</xref>), specifically reduced lateralization (<xref ref-type="bibr" rid="ref10">Freiherr et al., 2013</xref>; <xref ref-type="bibr" rid="ref36">Ren et al., 2020c</xref>) and shifted AVI regions (<xref ref-type="bibr" rid="ref5">Davis et al., 2007</xref>; <xref ref-type="bibr" rid="ref32">Ren et al., 2020a</xref>). However, considering the low spatial resolution of EEG studies, further fMRI studies are needed.</p>
</sec>
<sec id="sec26">
<label>4.2.</label>
<title>Lower AVI in the stimulus discrimination stage</title>
<p>Consistent with a previous study, the AVI was lower for older adults than for younger adults during the discrimination of meaningless auditory and visual stimuli (<xref ref-type="bibr" rid="ref50">Wu et al., 2012</xref>; <xref ref-type="bibr" rid="ref35">Ren et al., 2020b</xref>, <xref ref-type="bibr" rid="ref30">2021</xref>). The attention is a complex system in the brain that involves several different brain regions and mainly divided into three separate but interrelated networks: alerting, orienting, and executive control. <xref ref-type="bibr" rid="ref49">Williams et al. (2016)</xref> investigated attention network using attention network test (ANT) while EEG recording, and found the older adults showed reduced alerting, but did not differ from younger adults in orienting or executive control (<xref ref-type="bibr" rid="ref17">Ishigami et al., 2015</xref>; <xref ref-type="bibr" rid="ref49">Williams et al., 2016</xref>). The AVI was higher in the attended condition than in the unattended condition (<xref ref-type="bibr" rid="ref45">Talsma and Woldorff, 2005</xref>; <xref ref-type="bibr" rid="ref43">Talsma et al., 2007</xref>, <xref ref-type="bibr" rid="ref44">2010</xref>; <xref ref-type="bibr" rid="ref46">Tang et al., 2016</xref>); therefore, attention decline might be the most likely factor in the reduced AVI for older adults. Additionally, as in the stimulus detection task, there was no significant difference in the AVI amplitude between older and younger adults during 220&#x2013;240&#x2009;ms; however, AVI-related brain regions were different.</p>
<p>Furthermore, the AVI occurred for younger adults but was absent for older adults during 290&#x2013;310&#x2009;ms, and the AVI amplitude was higher in the left anterior and right anterior for older adults but in the central, right posterior and right posterior for younger adults during 400&#x2013;420&#x2009;ms. These results indicated that the attenuated AVI for older adults might be attributed to information processing in 290&#x2013;310&#x2009;ms, which mainly involved the N2 component. In discrimination tasks, the no-go N2 in the anterior was shown to reflect response inhibition (<xref ref-type="bibr" rid="ref9">Folstein and Van Petten, 2008</xref>), and older adults have a inhibition deficit in go/no-go task (<xref ref-type="bibr" rid="ref38">Rey-Mermet et al., 2018</xref>). Therefore, it is reasonable for a reduced AVI for older adults in the discrimination task, and we further proposed that the aging effect of AVI occurred as early as 220&#x2013;240&#x2009;ms, but the attenuated AVI mainly occurred in the later discriminating process at 290&#x2013;310&#x2009;ms.</p>
<p>In conclusion, there was a significant aging effect during AVI in multiple stages, but the older adults retained the ability to merge cross-modal information during the simple detection task attributed to the adaptive compensation mechanism. During stimulus discrimination, the AVI was attenuated, and it mainly occurred in the later discriminating stage at 290&#x2013;310&#x2009;ms, which was attributed to the attention suppression deficit.</p>
</sec>
</sec>
<sec id="sec27" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="sec28">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by Second Affiliated Hospital of Guizhou University of Traditional Chinese Medicine. The patients/participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="sec29">
<title>Author contributions</title>
<p>YR and YL conceived and designed the experiments. RL, RQ, and JD collected the data. ZX and YR analyzed the data. YR wrote the draft manuscript with feedback from JY and WY. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="sec30" sec-type="funding-information">
<title>Funding</title>
<p>This study was partially supported by the Science and Technology Planning Project of Guizhou Province [QianKeHeJiChu-ZK (2021) General 120], the National Natural Science Foundation of China (32260198, 31800932, 31700973), and the Zhejiang Provincial Philosophy and Social Sciences Planning Project (22NDQN280YB).</p>
</sec>
<sec id="conf1" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as potential conflicts of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<ref-list>
<title>References</title>
<ref id="ref1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anderson</surname> <given-names>N. D.</given-names></name>
</person-group> (<year>2019</year>). <article-title>Cognitive neuroscience of aging</article-title>. <source>J. Gerontol.: Series B</source> <volume>74</volume>, <fpage>1083</fpage>&#x2013;<lpage>1085</lpage>. doi: <pub-id pub-id-type="doi">10.1093/geronb/gbz078</pub-id></citation>
</ref>
<ref id="ref2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brandwein</surname> <given-names>A. B.</given-names></name> <name><surname>Foxe</surname> <given-names>J. J.</given-names></name> <name><surname>Russo</surname> <given-names>N. N.</given-names></name> <name><surname>Altschuler</surname> <given-names>T. S.</given-names></name> <name><surname>Gomes</surname> <given-names>H.</given-names></name> <name><surname>Molholm</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>The development of audiovisual multisensory integration across childhood and early adolescence: a high-density electrical mapping study</article-title>. <source>Cereb. Cortex</source> <volume>21</volume>, <fpage>1042</fpage>&#x2013;<lpage>1055</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhq170</pub-id>, PMID: <pub-id pub-id-type="pmid">20847153</pub-id></citation>
</ref>
<ref id="ref3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bravo</surname> <given-names>G.</given-names></name> <name><surname>H&#x00E9;bert</surname> <given-names>R.</given-names></name></person-group> (<year>1997</year>). <article-title>Age-and education-specific reference values for the mini-mental and modified mini-mental state examinations derived from a non-demented elderly population</article-title>. <source>Int. J. Geriatr. Psychiatry</source> <volume>12</volume>, <fpage>1008</fpage>&#x2013;<lpage>1018</lpage>. doi: <pub-id pub-id-type="doi">10.1002/(sici)1099-1166(199710)12:10&#x003C;1008::aid-gps676&#x003E;3.0.co;2-a</pub-id>, PMID: <pub-id pub-id-type="pmid">9395933</pub-id></citation>
</ref>
<ref id="ref4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cliff</surname> <given-names>M.</given-names></name> <name><surname>Joyce</surname> <given-names>D. W.</given-names></name> <name><surname>Lamar</surname> <given-names>M.</given-names></name> <name><surname>Dannhauser</surname> <given-names>T.</given-names></name> <name><surname>Tracy</surname> <given-names>D. K.</given-names></name> <name><surname>Shergill</surname> <given-names>S. S.</given-names></name></person-group> (<year>2013</year>). <article-title>Aging effects on functional auditory and visual processing using fMRI with variable sensory loading</article-title>. <source>Cortex</source> <volume>49</volume>, <fpage>1304</fpage>&#x2013;<lpage>1313</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2012.04.003</pub-id>, PMID: <pub-id pub-id-type="pmid">22578707</pub-id></citation>
</ref>
<ref id="ref5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname> <given-names>S. W.</given-names></name> <name><surname>Dennis</surname> <given-names>N. A.</given-names></name> <name><surname>Daselaar</surname> <given-names>S. M.</given-names></name> <name><surname>Fleck</surname> <given-names>M. S.</given-names></name> <name><surname>Cabeza</surname> <given-names>R.</given-names></name></person-group> (<year>2007</year>). <article-title>Que PASA? The posterior-anterior shift in aging</article-title>. <source>Cereb. Cortex</source> <volume>18</volume>, <fpage>1201</fpage>&#x2013;<lpage>1209</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhm155</pub-id>, PMID: <pub-id pub-id-type="pmid">17925295</pub-id></citation>
</ref>
<ref id="ref6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Delorme</surname> <given-names>A.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name></person-group> (<year>2004</year>). <article-title>EEGLAB: an open source toolbox for analysis of single-trial EEG dynamics including independent component analysis</article-title>. <source>J. Neurosci. Methods</source> <volume>134</volume>, <fpage>9</fpage>&#x2013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jneumeth.2003.10.009</pub-id>, PMID: <pub-id pub-id-type="pmid">15102499</pub-id></citation>
</ref>
<ref id="ref7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Diaconescu</surname> <given-names>A. O.</given-names></name> <name><surname>Hasher</surname> <given-names>L.</given-names></name> <name><surname>McIntosh</surname> <given-names>A. R.</given-names></name></person-group> (<year>2013</year>). <article-title>Visual dominance and multisensory integration changes with age</article-title>. <source>NeuroImage</source> <volume>65</volume>, <fpage>152</fpage>&#x2013;<lpage>166</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.09.057</pub-id>, PMID: <pub-id pub-id-type="pmid">23036447</pub-id></citation>
</ref>
<ref id="ref8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Doehrmann</surname> <given-names>O.</given-names></name> <name><surname>Naumer</surname> <given-names>M. J.</given-names></name></person-group> (<year>2008</year>). <article-title>Semantics and the multisensory brain: how meaning modulates processes of audio-visual integration</article-title>. <source>Brain Res.</source> <volume>1242</volume>, <fpage>136</fpage>&#x2013;<lpage>150</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.brainres.2008.03.071</pub-id>, PMID: <pub-id pub-id-type="pmid">18479672</pub-id></citation>
</ref>
<ref id="ref9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Folstein</surname> <given-names>J. R.</given-names></name> <name><surname>Van Petten</surname> <given-names>C.</given-names></name></person-group> (<year>2008</year>). <article-title>Influence of cognitive control and mismatch on the N2 component of the ERP: A review</article-title>. <source>Psychophysiology</source> <volume>45</volume>, <fpage>152</fpage>&#x2013;<lpage>170</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.2007.00602.x</pub-id>, PMID: <pub-id pub-id-type="pmid">17850238</pub-id></citation>
</ref>
<ref id="ref10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Freiherr</surname> <given-names>J.</given-names></name> <name><surname>Lundstr&#x00F6;m</surname> <given-names>J.</given-names></name> <name><surname>Habel</surname> <given-names>U.</given-names></name> <name><surname>Reetz</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Multisensory integration mechanisms during aging</article-title>. <source>Front. Hum. Neurosci.</source> <volume>7</volume>:<fpage>863</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2013.00863</pub-id>, PMID: <pub-id pub-id-type="pmid">24379773</pub-id></citation>
</ref>
<ref id="ref11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Giard</surname> <given-names>M. H.</given-names></name> <name><surname>Peronnet</surname> <given-names>F.</given-names></name></person-group> (<year>1999</year>). <article-title>Auditory-visual integration during multimodal object recognition in humans: a behavioral and electrophysiological study</article-title>. <source>J. Cogn. Neurosci.</source> <volume>11</volume>, <fpage>473</fpage>&#x2013;<lpage>490</lpage>. doi: <pub-id pub-id-type="doi">10.1162/089892999563544</pub-id>, PMID: <pub-id pub-id-type="pmid">10511637</pub-id></citation>
</ref>
<ref id="ref12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goh</surname> <given-names>J. O.</given-names></name> <name><surname>Park</surname> <given-names>D. C.</given-names></name></person-group> (<year>2009</year>). <article-title>Neuroplasticity and cognitive aging: the scaffolding theory of aging and cognition</article-title>. <source>Restor. Neurol. Neurosci.</source> <volume>27</volume>, <fpage>391</fpage>&#x2013;<lpage>403</lpage>. doi: <pub-id pub-id-type="doi">10.3233/rnn-2009-0493</pub-id>, PMID: <pub-id pub-id-type="pmid">19847066</pub-id></citation>
</ref>
<ref id="ref13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Grady</surname> <given-names>C. L.</given-names></name>
</person-group> (<year>2012</year>). <article-title>The cognitive neuroscience of ageing</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>13</volume>, <fpage>491</fpage>&#x2013;<lpage>505</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn3256</pub-id>, PMID: <pub-id pub-id-type="pmid">22714020</pub-id></citation>
</ref>
<ref id="ref14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guthrie</surname> <given-names>D.</given-names></name> <name><surname>Buchwald</surname> <given-names>J. S.</given-names></name></person-group> (<year>1991</year>). <article-title>Significance testing of difference</article-title>. <source>Potentials</source> <volume>28</volume>, <fpage>240</fpage>&#x2013;<lpage>244</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.1991.tb00417.x</pub-id></citation>
</ref>
<ref id="ref15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hugenschmidt</surname> <given-names>C. E.</given-names></name> <name><surname>Mozolic</surname> <given-names>J. L.</given-names></name> <name><surname>Laurienti</surname> <given-names>P. J.</given-names></name></person-group> (<year>2009</year>). <article-title>Suppression of multisensory integration by modality-specific attention in aging</article-title>. <source>Neuroreport</source> <volume>20</volume>, <fpage>349</fpage>&#x2013;<lpage>353</lpage>. doi: <pub-id pub-id-type="doi">10.1097/WNR.0b013e328323ab07</pub-id>, PMID: <pub-id pub-id-type="pmid">19218871</pub-id></citation>
</ref>
<ref id="ref16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isaev</surname> <given-names>N. K.</given-names></name> <name><surname>Stelmashook</surname> <given-names>E. V.</given-names></name> <name><surname>Genrikhs</surname> <given-names>E. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Neurogenesis and brain aging</article-title>. <source>Rev. Neurosci.</source> <volume>30</volume>, <fpage>573</fpage>&#x2013;<lpage>580</lpage>. doi: <pub-id pub-id-type="doi">10.1515/revneuro-2018-0084</pub-id></citation>
</ref>
<ref id="ref17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ishigami</surname> <given-names>Y.</given-names></name> <name><surname>Eskes</surname> <given-names>G. A.</given-names></name> <name><surname>Tyndall</surname> <given-names>A. V.</given-names></name> <name><surname>Longman</surname> <given-names>R. S.</given-names></name> <name><surname>Drogos</surname> <given-names>L. L.</given-names></name> <name><surname>Poulin</surname> <given-names>M. J.</given-names></name></person-group> (<year>2015</year>). <article-title>The attention network test-interaction (ANT-I): reliability and validity in healthy older adults</article-title>. <source>Exp. Brain Res.</source> <volume>234</volume>, <fpage>815</fpage>&#x2013;<lpage>827</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00221-015-4493-4</pub-id>, PMID: <pub-id pub-id-type="pmid">26645310</pub-id></citation>
</ref>
<ref id="ref18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jia</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Huang</surname> <given-names>F.</given-names></name> <name><surname>Du</surname> <given-names>W.</given-names></name> <name><surname>Jiang</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>A comparison of the mini-mental state examination (MMSE) with the Montreal cognitive assessment (MoCA) for mild cognitive impairment screening in Chinese middle-aged and older population: a cross-sectional study</article-title>. <source>BMC Psychiatry</source> <volume>48</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.1186/s12888-021-03495-6</pub-id></citation>
</ref>
<ref id="ref19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jones</surname> <given-names>S. A.</given-names></name> <name><surname>Noppeney</surname> <given-names>U.</given-names></name></person-group> (<year>2021</year>). <article-title>Ageing and multisensory integration: a review of the evidence, and a computational perspective</article-title>. <source>Cortex</source> <volume>138</volume>, <fpage>1</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2021.02.001</pub-id></citation>
</ref>
<ref id="ref20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jung</surname> <given-names>T.-P.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name> <name><surname>Westerfield</surname> <given-names>M.</given-names></name> <name><surname>Townsend</surname> <given-names>J.</given-names></name> <name><surname>Courchesne</surname> <given-names>E.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name></person-group> (<year>2001</year>). <article-title>Analysis and visualization of single-trial event-related potentials</article-title>. <source>Hum. Brain Mapp.</source> <volume>14</volume>, <fpage>166</fpage>&#x2013;<lpage>185</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.1050</pub-id>, PMID: <pub-id pub-id-type="pmid">11559961</pub-id></citation>
</ref>
<ref id="ref21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laurienti</surname> <given-names>P. J.</given-names></name> <name><surname>Burdette</surname> <given-names>J. H.</given-names></name> <name><surname>Maldjian</surname> <given-names>J. A.</given-names></name> <name><surname>Wallace</surname> <given-names>M. T.</given-names></name></person-group> (<year>2006</year>). <article-title>Enhanced multisensory integration in older adults</article-title>. <source>Neurobiol. Aging</source> <volume>27</volume>, <fpage>1155</fpage>&#x2013;<lpage>1163</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2005.05.024</pub-id></citation>
</ref>
<ref id="ref22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laurienti</surname> <given-names>P. J.</given-names></name> <name><surname>Kraft</surname> <given-names>R. A.</given-names></name> <name><surname>Maldjian</surname> <given-names>J. A.</given-names></name> <name><surname>Burdette</surname> <given-names>J. H.</given-names></name> <name><surname>Wallace</surname> <given-names>M. T.</given-names></name></person-group> (<year>2004</year>). <article-title>Semantic congruence is a critical factor in multisensory behavioral performance</article-title>. <source>Exp. Brain Res.</source> <volume>158</volume>, <fpage>405</fpage>&#x2013;<lpage>414</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00221-004-1913-2</pub-id>, PMID: <pub-id pub-id-type="pmid">15221173</pub-id></citation>
</ref>
<ref id="ref23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Makeig</surname> <given-names>S.</given-names></name> <name><surname>Jung</surname> <given-names>T.-P.</given-names></name> <name><surname>Bell</surname> <given-names>A. J.</given-names></name> <name><surname>Ghahremani</surname> <given-names>D.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name></person-group> (<year>1997</year>). <article-title>Blind separation of auditory event-related brain responses into independent components</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>94</volume>, <fpage>10979</fpage>&#x2013;<lpage>10984</lpage>. doi: <pub-id pub-id-type="doi">10.2307/43450</pub-id>, PMID: <pub-id pub-id-type="pmid">9380745</pub-id></citation>
</ref>
<ref id="ref24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miller</surname> <given-names>J.</given-names></name>
</person-group> (<year>1982</year>). <article-title>Divided attention: evidence for coactivation with redundant signals</article-title>. <source>Cogn. Psychol.</source> <volume>14</volume>, <fpage>247</fpage>&#x2013;<lpage>279</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0010-0285(82)90010-X</pub-id>, PMID: <pub-id pub-id-type="pmid">7083803</pub-id></citation>
</ref>
<ref id="ref25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miller</surname> <given-names>J.</given-names></name>
</person-group> (<year>1986</year>). <article-title>Timecourse of coactivation in bimodal divided attention</article-title>. <source>Percept. Psychophys.</source> <volume>40</volume>, <fpage>331</fpage>&#x2013;<lpage>343</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03203025</pub-id>, PMID: <pub-id pub-id-type="pmid">3786102</pub-id></citation>
</ref>
<ref id="ref26">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Paige</surname> <given-names>L. E.</given-names></name> <name><surname>Gutchess</surname> <given-names>A. H.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>Cognitive neuroscience of aging</article-title>&#x201D; in <source>Encyclopedia of Geropsychology</source>. ed. <person-group person-group-type="editor">
<name><surname>Pachana</surname> <given-names>N.</given-names></name>
</person-group> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>)</citation>
</ref>
<ref id="ref27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peiffer</surname> <given-names>A. M.</given-names></name> <name><surname>Mozolic</surname> <given-names>J. L.</given-names></name> <name><surname>Hugenschmidt</surname> <given-names>C. E.</given-names></name> <name><surname>Laurienti</surname> <given-names>P. J.</given-names></name></person-group> (<year>2007</year>). <article-title>Age-related multisensory enhancement in a simple audiovisual detection task</article-title>. <source>Neuroreport</source> <volume>18</volume>, <fpage>1077</fpage>&#x2013;<lpage>1081</lpage>. doi: <pub-id pub-id-type="doi">10.1097/WNR.0b013e3281e72ae7</pub-id></citation>
</ref>
<ref id="ref28">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pennartz</surname> <given-names>C. M. A.</given-names></name>
</person-group> (<year>2015</year>). <source>The Brain&#x2019;s Representational Power</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>The MIT Press</publisher-name></citation>
</ref>
<ref id="ref29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pronina</surname> <given-names>M. V.</given-names></name> <name><surname>Ponomarev</surname> <given-names>V. A.</given-names></name> <name><surname>Kropotov</surname> <given-names>Y. D.</given-names></name></person-group> (<year>2022</year>). <article-title>Effect of task complexity on the post-movement Beta synchronization in the sensorimotor cortex</article-title>. <source>J. Evol. Biochem. Physiol.</source> <volume>58</volume>, <fpage>1905</fpage>&#x2013;<lpage>1913</lpage>. doi: <pub-id pub-id-type="doi">10.1134/S0022093022060199</pub-id></citation>
</ref>
<ref id="ref30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Hou</surname> <given-names>Y.</given-names></name> <name><surname>Huang</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>F.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Ren</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Sustained auditory Attentional load decreases audiovisual integration in older and younger adults</article-title>. <source>Neural Plast.</source> <volume>2021</volume>, <fpage>4516133</fpage>&#x2013;<lpage>4516110</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2021/4516133</pub-id>, PMID: <pub-id pub-id-type="pmid">34221001</pub-id></citation>
</ref>
<ref id="ref31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Z.</given-names></name> <name><surname>Luo</surname> <given-names>R.</given-names></name> <name><surname>Ping</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Sustained visual attentional load modulates audiovisual integration in older and younger adults</article-title>. <source>i-Perception</source> <volume>14</volume>:<fpage>20416695231157348</fpage>. doi: <pub-id pub-id-type="doi">10.1177/2041669523115734</pub-id>, PMID: <pub-id pub-id-type="pmid">36845028</pub-id></citation>
</ref>
<ref id="ref32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name></person-group> (<year>2020a</year>). <article-title>Age-related shifts in theta oscillatory activity during audio-visual integration regardless of visual Attentional load</article-title>. <source>Front. Aging Neurosci.</source> <volume>12</volume>:<fpage>571950</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnagi.2020.571950</pub-id>, PMID: <pub-id pub-id-type="pmid">33192463</pub-id></citation>
</ref>
<ref id="ref33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Zhao</surname> <given-names>N.</given-names></name> <name><surname>Hou</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Ren</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Auditory attentional load attenuates age-related audiovisual integration: an EEG study</article-title>. <source>Neuropsychologia</source> <volume>174</volume>:<fpage>108346</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2022.108346</pub-id>, PMID: <pub-id pub-id-type="pmid">35973479</pub-id></citation>
</ref>
<ref id="ref34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name> <name><surname>Tang</surname> <given-names>X.</given-names></name> <name><surname>Wu</surname> <given-names>F.</given-names></name> <name><surname>Wu</surname> <given-names>Q.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Comparison for younger and older adults: stimulus temporal asynchrony modulates audiovisual integration</article-title>. <source>Int. J. Psychophysiol.</source> <volume>124</volume>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2017.12.004</pub-id>, PMID: <pub-id pub-id-type="pmid">29248668</pub-id></citation>
</ref>
<ref id="ref35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Z.</given-names></name> <name><surname>Lu</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name></person-group> (<year>2020b</year>). <article-title>Stimulus specific to age-related audio-visual integration in discrimination tasks</article-title>. <source>i-Perception</source> <volume>11</volume>, <fpage>1</fpage>&#x2013;<lpage>14</lpage>. doi: <pub-id pub-id-type="doi">10.1177/2041669520978419</pub-id>, PMID: <pub-id pub-id-type="pmid">33403096</pub-id></citation>
</ref>
<ref id="ref36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name></person-group> (<year>2020c</year>). <article-title>Age-related alterations in audiovisual integration: a brief overiew</article-title>. <source>Psychologia</source> <volume>62</volume>, <fpage>233</fpage>&#x2013;<lpage>252</lpage>. doi: <pub-id pub-id-type="doi">10.2117/psysoc.2020-A002</pub-id></citation>
</ref>
<ref id="ref37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reuter-Lorenz</surname> <given-names>P. A.</given-names></name> <name><surname>Park</surname> <given-names>D. C.</given-names></name></person-group> (<year>2014</year>). <article-title>How does it STAC up? Revisiting the scaffolding theory of aging and cognition</article-title>. <source>Neuropsychol. Rev.</source> <volume>24</volume>, <fpage>355</fpage>&#x2013;<lpage>370</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11065-014-9270-9</pub-id>, PMID: <pub-id pub-id-type="pmid">25143069</pub-id></citation>
</ref>
<ref id="ref38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rey-Mermet</surname> <given-names>A.</given-names></name> <name><surname>Gade</surname> <given-names>M.</given-names></name> <name><surname>Oberauer</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>Should we stop thinking about inhibition? Searching for individual and age differences in inhibition ability</article-title>. <source>J. Exp. Psychol. Learn. Mem. Cogn.</source> <volume>44</volume>, <fpage>501</fpage>&#x2013;<lpage>526</lpage>. doi: <pub-id pub-id-type="doi">10.1037/xlm0000450</pub-id>, PMID: <pub-id pub-id-type="pmid">28956944</pub-id></citation>
</ref>
<ref id="ref39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scheliga</surname> <given-names>S.</given-names></name> <name><surname>Kellermann</surname> <given-names>T.</given-names></name> <name><surname>Lampert</surname> <given-names>A.</given-names></name> <name><surname>Rolke</surname> <given-names>R.</given-names></name> <name><surname>Spehr</surname> <given-names>M.</given-names></name> <name><surname>Habel</surname> <given-names>U.</given-names></name></person-group> (<year>2022</year>). <article-title>Neural correlates of multisensory integration in the human brain: An ALE meta-analysis</article-title>. <source>Rev. Neurosci.</source> <volume>34</volume>, <fpage>223</fpage>&#x2013;<lpage>245</lpage>. doi: <pub-id pub-id-type="doi">10.1515/revneuro-2022-0065</pub-id></citation>
</ref>
<ref id="ref40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Senkowski</surname> <given-names>D.</given-names></name> <name><surname>Saint-Amour</surname> <given-names>D.</given-names></name> <name><surname>Kelly</surname> <given-names>S. P.</given-names></name> <name><surname>Foxe</surname> <given-names>J. J.</given-names></name></person-group> (<year>2007</year>). <article-title>Multisensory processing of naturalistic objects in motion: a high-density electrical mapping and source estimation study</article-title>. <source>NeuroImage</source> <volume>36</volume>, <fpage>877</fpage>&#x2013;<lpage>888</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.01.053</pub-id>, PMID: <pub-id pub-id-type="pmid">17481922</pub-id></citation>
</ref>
<ref id="ref41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spotorno</surname> <given-names>S.</given-names></name> <name><surname>Masson</surname> <given-names>G. S.</given-names></name> <name><surname>Montagnini</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Fixational saccades during grating detection and discrimination</article-title>. <source>Vis. Res.</source> <volume>118</volume>, <fpage>105</fpage>&#x2013;<lpage>118</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2015.03.013</pub-id>, PMID: <pub-id pub-id-type="pmid">25849759</pub-id></citation>
</ref>
<ref id="ref42">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>B. E.</given-names></name> <name><surname>Meredith</surname> <given-names>M. A.</given-names></name></person-group> (<year>1993</year>). <source>The Merging of the Senses</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>The MIT Press</publisher-name>.</citation>
</ref>
<ref id="ref43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Talsma</surname> <given-names>D.</given-names></name> <name><surname>Doty</surname> <given-names>T. J.</given-names></name> <name><surname>Woldorff</surname> <given-names>M. G.</given-names></name></person-group> (<year>2007</year>). <article-title>Selective attention and audiovisual integration: is attending to both modalities a prerequisite for early integration?</article-title> <source>Cereb. Cortex</source> <volume>17</volume>, <fpage>679</fpage>&#x2013;<lpage>690</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhk016</pub-id>, PMID: <pub-id pub-id-type="pmid">16707740</pub-id></citation>
</ref>
<ref id="ref44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Talsma</surname> <given-names>D.</given-names></name> <name><surname>Senkowski</surname> <given-names>D.</given-names></name> <name><surname>Soto-Faraco</surname> <given-names>S.</given-names></name> <name><surname>Woldorff</surname> <given-names>M. G.</given-names></name></person-group> (<year>2010</year>). <article-title>The multifaceted interplay between attention and multisensory integration</article-title>. <source>Trends Cogn. Sci.</source> <volume>14</volume>, <fpage>400</fpage>&#x2013;<lpage>410</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2010.06.008</pub-id>, PMID: <pub-id pub-id-type="pmid">20675182</pub-id></citation>
</ref>
<ref id="ref45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Talsma</surname> <given-names>D.</given-names></name> <name><surname>Woldorff</surname> <given-names>M. G.</given-names></name></person-group> (<year>2005</year>). <article-title>Selective attention and multisensory integration: multiple phases of effects on the evoked brain activity</article-title>. <source>J. Cogn. Neurosci.</source> <volume>17</volume>, <fpage>1098</fpage>&#x2013;<lpage>1114</lpage>. doi: <pub-id pub-id-type="doi">10.1162/0898929054475172</pub-id>, PMID: <pub-id pub-id-type="pmid">16102239</pub-id></citation>
</ref>
<ref id="ref46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>X.</given-names></name> <name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name></person-group> (<year>2016</year>). <article-title>The interactions of multisensory integration with endogenous and exogenous attention</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>61</volume>, <fpage>208</fpage>&#x2013;<lpage>224</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2015.11.002</pub-id>, PMID: <pub-id pub-id-type="pmid">26546734</pub-id></citation>
</ref>
<ref id="ref47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>P.</given-names></name> <name><surname>Li</surname> <given-names>D.</given-names></name> <name><surname>Niu</surname> <given-names>Y.</given-names></name> <name><surname>Yan</surname> <given-names>T.</given-names></name> <name><surname>Li</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Increased functional brain network efficiency during audiovisual temporal asynchrony integration task in aging</article-title>. <source>Front. Aging Neurosci.</source> <volume>10</volume>:<fpage>316</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnagi.2018.00316</pub-id>, PMID: <pub-id pub-id-type="pmid">30356825</pub-id></citation>
</ref>
<ref id="ref48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>W.</given-names></name> <name><surname>Yan</surname> <given-names>T.</given-names></name> <name><surname>Song</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Beta-band functional connectivity influences audiovisual integration in older age: an EEG study. Frontiers in aging</article-title>. <source>Neuroscience</source> <volume>9</volume>:<fpage>239</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnagi.2017.00239</pub-id>, PMID: <pub-id pub-id-type="pmid">28824411</pub-id></citation>
</ref>
<ref id="ref49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Williams</surname> <given-names>R. S.</given-names></name> <name><surname>Biel</surname> <given-names>A. L.</given-names></name> <name><surname>Wegier</surname> <given-names>P.</given-names></name> <name><surname>Lapp</surname> <given-names>L. K.</given-names></name> <name><surname>Dyson</surname> <given-names>B. J.</given-names></name> <name><surname>Spaniol</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Age differences in the attention network test: evidence from behavior and event-related potentials</article-title>. <source>Brain Cogn.</source> <volume>102</volume>, <fpage>65</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bandc.2015.12.007</pub-id>, PMID: <pub-id pub-id-type="pmid">26760449</pub-id></citation>
</ref>
<ref id="ref50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>W.</given-names></name> <name><surname>Gao</surname> <given-names>Y.</given-names></name> <name><surname>Kimura</surname> <given-names>T.</given-names></name></person-group> (<year>2012</year>). <article-title>Age-related multisensory integration elicited by peripherally presented audiovisual stimuli</article-title>. <source>Neuroreport</source> <volume>23</volume>, <fpage>616</fpage>&#x2013;<lpage>620</lpage>. doi: <pub-id pub-id-type="doi">10.1097/wnr.0b013e3283552b0f</pub-id>, PMID: <pub-id pub-id-type="pmid">22643234</pub-id></citation>
</ref>
<ref id="ref51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>W.</given-names></name> <name><surname>Yang</surname> <given-names>X.</given-names></name> <name><surname>Guo</surname> <given-names>A.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Lin</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Audiovisual integration of the dynamic hand-held tool at different stimulus intensities in aging</article-title>. <source>Front. Hum. Neurosci.</source> <volume>16</volume>:<fpage>968987</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2022.968987</pub-id>, PMID: <pub-id pub-id-type="pmid">36590067</pub-id></citation>
</ref>
</ref-list>
<fn-group>
<fn id="fn0004">
<p>
<sup>1</sup>
<ext-link xlink:href="http://sccn.ucsd.edu/eeglab/" ext-link-type="uri">http://sccn.ucsd.edu/eeglab/</ext-link>
</p>
</fn>
</fn-group>
</back>
</article>