<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Computer Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-9898</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcomp.2026.1772813</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Trust rises, attention falls: divergent effects of exposure and education in driving automation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chouchane</surname> <given-names>Hanna</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3279898"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x02013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x02013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Sakamura</surname> <given-names>Yuki</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x02013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Sato</surname> <given-names>Kenji</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x02013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Abe</surname> <given-names>Genya</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x02013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Itoh</surname> <given-names>Makoto</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2038209"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x02013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x02013; review &#x00026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Graduate School of Systems and Information Engineering, University of Tsukuba</institution>, <city>Tsukuba</city>, <country country="jp">Japan</country></aff>
<aff id="aff2"><label>2</label><institution>Autonomous Driving Research Division, Japan Automobile Research Institute</institution>, <city>Tsukuba</city>, <country country="jp">Japan</country></aff>
<aff id="aff3"><label>3</label><institution>Institute of Systems and Information Engineering, University of Tsukuba</institution>, <city>Tsukuba</city>, <country country="jp">Japan</country></aff>
<aff id="aff4"><label>4</label><institution>Center for Artificial Intelligence Research, University of Tsukuba</institution>, <city>Tsukuba</city>, <country country="jp">Japan</country></aff>
<aff id="aff5"><label>5</label><institution>Tsukuba Institute for Advanced Research, University of Tsukuba</institution>, <city>Tsukuba</city>, <country country="jp">Japan</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Hanna Chouchane, <email xlink:href="mailto:hanna@chouchane.com">hanna@chouchane.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-04-21">
<day>21</day>
<month>04</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>8</volume>
<elocation-id>1772813</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>10</day>
<month>03</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>03</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Chouchane, Sakamura, Sato, Abe and Itoh.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Chouchane, Sakamura, Sato, Abe and Itoh</copyright-holder>
<license>
<ali:license_ref start_date="2026-04-21">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Drivers supervising Level 2 automation must maintain situation awareness while the system controls steering and speed. Miscalibrated trust can contribute to overreliance and lapses in monitoring, whereas insufficient trust leads to disuse. Prolonged supervision is associated with increased mind-wandering, which can slow reactions to critical events. This study tested whether brief educational interventions affect trust, attention, and takeover readiness during Level 2 driving. Our focus on brief interventions reflects the short, time-constrained onboarding that drivers typically receive when adopting driving automation systems.</p>
</sec>
<sec>
<title>Methods</title>
<p>Fifty-five licensed drivers with no prior hands-on experience of Level 2 automation completed a 15-min automated highway drive. Participants received either minimal instruction (Basic), capability-focused education (Knowledge-based), or limitation-focused education (Rule-based). Trust was measured at four time points; additional measures captured self-reported mind-wandering, gaze behavior, and takeover reaction time.</p>
</sec>
<sec>
<title>Results</title>
<p>Trust increased significantly over time in all groups, and educational framing did not alter this trajectory. Capability-focused education enhanced monitoring of the human-machine interface on two false discovery rate corrected metrics and produced faster takeover reactions than limitation-focused education (no difference vs. Basic). Across participants, greater trust growth correlated with higher mind-wandering, while more structured gaze was associated with lower mind-wandering.</p>
</sec>
<sec>
<title>Discussion</title>
<p>Overall, trust formation appeared to be primarily associated with direct experience with system performance, whereas targeted education refined what drivers monitored and how quickly they responded. Together, these results clarify how experience primarily builds trust while education selectively sharpens attention and response readiness in automated driving. These findings clarify distinct roles of experience and brief education in supervising automation and have implications for driver training, human-machine interface design, and gaze-based monitoring.</p>
</sec>
</abstract>
<kwd-group>
<kwd>driver education</kwd>
<kwd>gaze behavior</kwd>
<kwd>Level 2 automation</kwd>
<kwd>mind-wandering</kwd>
<kwd>situation awareness</kwd>
<kwd>supervisory attention</kwd>
<kwd>take over performance</kwd>
<kwd>trust in automation</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Japan Society for the Promotion of Science</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100001691</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by JSPS KAKENHI Grant Number JP24H00361.</funding-statement>
</funding-group>
<counts>
<fig-count count="9"/>
<table-count count="17"/>
<equation-count count="2"/>
<ref-count count="49"/>
<page-count count="19"/>
<word-count count="12293"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Human-Media Interaction</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Level 2 driving automation, as defined by the Society of Automotive Engineers (<xref ref-type="bibr" rid="B42">SAE International, 2021</xref>), combines longitudinal control (adaptive cruise control) and lateral control (lane-keeping assistance) to relieve drivers of continuous manual steering and speed adjustments. Despite this operational support, Level 2 systems explicitly require continuous driver supervision because the automation may encounter situations beyond its operational design domain or capabilities. Drivers must therefore remain ready to resume manual control at short notice when a takeover request is issued or when traffic conditions exceed system limits. In this context, takeover performance is commonly operationalised as the latency between the takeover request and the driver&#x00027;s first control input that initiates manual control, reflecting readiness to re-engage with the driving task (<xref ref-type="bibr" rid="B31">Merat et al., 2014</xref>; <xref ref-type="bibr" rid="B16">Gold et al., 2013</xref>; <xref ref-type="bibr" rid="B15">Eriksson and Stanton, 2017</xref>).</p>
<p>This supervisory role presents a fundamental human-factors challenge. Maintaining sustained monitoring during prolonged automation is mentally demanding, particularly when the system performs reliably and the task provides limited cognitive stimulation (<xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>). Under these conditions, drivers may become overly reliant on automation, showing reduced visual scanning of the road environment and slower readiness to intervene when required (<xref ref-type="bibr" rid="B36">Parasuraman et al., 1993</xref>; <xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>; <xref ref-type="bibr" rid="B23">Inagaki and Itoh, 2013</xref>). Japan&#x00027;s road-safety initiatives, including the Ministry of Land, Infrastructure, Transport and Tourism&#x00027;s (MLIT) Advanced Safety Vehicle (ASV) Plan, have identified appropriate human supervision of driver-assistance systems as a policy priority. Similar concerns are reflected in international frameworks such as the European Union&#x00027;s Vision Zero strategy and the United Nations&#x00027; commitment to reducing global road-traffic casualties by 50 per cent by 2030.</p>
<p>Despite progress in automation design, limited understanding remains of how users learn to supervise such systems effectively. <bold>This study addresses that gap by examining how pre-drive educational framings influence trust, attention, and takeover readiness during Level 2 automation. It explores whether targeted instruction can shape drivers&#x00027; mental models and mitigate the attentional drift observed in prior research</bold>.</p>
<p>Long-form driver education is rarely delivered for Level 2 systems. In current practices, onboarding is largely informal and relies on passive materials or self-guided learning: most drivers report learning through trial-and-error whilst driving, brochures, or owner manuals, with only a minority receiving any instruction from dealers (<xref ref-type="bibr" rid="B19">Harms et al., 2020</xref>; <xref ref-type="bibr" rid="B35">Oviedo-Trespalacios et al., 2021</xref>; <xref ref-type="bibr" rid="B10">DeGuzman and Donmez, 2021</xref>). Given that such approaches offer limited time for detailed explanation, it is important to determine whether even minimal pre-drive education can shape how users form trust and supervise automation.</p>
<p>This challenge echoes the classic <italic>ironies of automation</italic> described by <xref ref-type="bibr" rid="B2">Bainbridge (1983)</xref>, who observed that the more reliable a system becomes, the more crucial and difficult the human operator&#x00027;s monitoring role becomes. As automation handles routine control, drivers are left with the demanding task of supervising rare but safety-critical situations, often without sufficient opportunity to maintain the necessary monitoring and manual-control skills. These ironies are increasingly evident in modern driving automation, where sustained reliability can erode vigilance and delay intervention.</p>
<p>Trust in automation plays a central role in how drivers allocate attention and effort during supervisory tasks (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>). In this context, trust refers to the belief that an automated system will support the user&#x00027;s goals when outcomes are uncertain or the situation involves risk. Trust influences how people choose to engage with, depend on, or avoid automation. When trust is well calibrated to system capability, drivers are more likely to maintain appropriate vigilance. Poor calibration can increase safety risks: excessive trust may lead to complacency and reduced monitoring (<xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>; <xref ref-type="bibr" rid="B36">Parasuraman et al., 1993</xref>), whereas insufficient trust can result in disuse, where drivers avoid using automation even when it could assist them (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>).</p>
<p>Trust calibration poses distinct challenges in driving automation, where incorrect assumptions about system capability can lead to immediate safety risks (<xref ref-type="bibr" rid="B24">Khastgir et al., 2018</xref>; <xref ref-type="bibr" rid="B23">Inagaki and Itoh, 2013</xref>). As <xref ref-type="bibr" rid="B23">Inagaki and Itoh (2013)</xref> note, drivers may overtrust systems beyond their functional limits, resulting in over-reliance and delayed intervention. Unlike automation in controlled industrial domains, driving automation operates within dynamic and unpredictable traffic environments that continually test system boundaries (<xref ref-type="bibr" rid="B6">Campbell et al., 2018</xref>). Such variability demands that drivers understand not only what the automation can achieve under ideal conditions but also how its performance degrades as environmental complexity increases (<xref ref-type="bibr" rid="B24">Khastgir et al., 2018</xref>).</p>
<p>A foundational model for understanding how drivers maintain effective supervision is <xref ref-type="bibr" rid="B14">Endsley (1995b)</xref>&#x00027;s three-level framework of situation awareness, which distinguishes the perception of environmental elements, their comprehension in relation to goals, and the projection of their future status. Maintaining these levels is essential for safe supervisory control and underpins subsequent theoretical developments, including the situation-awareness discrepancy model adopted in this study.</p>
<p>Theoretical models of trust help explain these mechanisms. The Confidence, Awareness, Understanding, Satisfaction, and Enactment (CAUSE) model (<xref ref-type="bibr" rid="B41">Rowan, 1991</xref>) is a structured risk-communication framework showing how sequential messaging can build trust and support behavioral change. Communicators first establish confidence, then raise awareness of risks, foster understanding, promote satisfaction with solutions, and finally encourage enactment. In the context of driver education, this suggests that the sequencing and framing of information, not only the content about system performance, shape how users form and calibrate trust. In contrast, trust-calibration models (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>) describe feedback processes through which users compare observed system behavior with prior expectations and adjust their trust accordingly (<xref ref-type="bibr" rid="B33">Merritt and Ilgen, 2008</xref>). These experiential mechanisms operate alongside educational processes that shape initial expectations before interaction begins. The Malleable Attentional Resources Theory (MART) (<xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>; <xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>) complements this view by explaining how consistent automation performance can gradually compress attentional resources. When systems operate without error, monitoring becomes confined to abbreviated scanning routines that suffice in routine conditions but may be insufficient when rare critical events occur (<xref ref-type="bibr" rid="B36">Parasuraman et al., 1993</xref>; <xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>; <xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>).</p>
<p>Empirical research supports these theoretical accounts (<xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>; <xref ref-type="bibr" rid="B17">Gold et al., 2015</xref>). Studies show that both educational information and direct experience with automation performance can influence trust development, although the relative strength of these effects remains to be determined (<xref ref-type="bibr" rid="B25">K&#x000F6;rber et al., 2018</xref>; <xref ref-type="bibr" rid="B33">Merritt and Ilgen, 2008</xref>). After extended exposure to consistent automation performance, drivers reduce their visual attention to the forward roadway, engage more often in secondary tasks, and respond more slowly to unexpected takeover requests (<xref ref-type="bibr" rid="B28">Louw and Merat, 2017</xref>; <xref ref-type="bibr" rid="B31">Merat et al., 2014</xref>; <xref ref-type="bibr" rid="B16">Gold et al., 2013</xref>). Such patterns appear even when drivers are explicitly instructed to maintain vigilance, indicating that experiential learning exerts a strong influence during ongoing interaction with automation (<xref ref-type="bibr" rid="B16">Gold et al., 2013</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>).</p>
<p>Beyond trust dynamics, a related challenge is the natural tendency for attention to drift during monotonous supervision. Mind-wandering describes attention shifting away from the external task toward self-generated thoughts and feelings. In this paper we distinguish mind-wandering from the broader phenomenon of attentional drift. Attentional drift refers to the gradual reduction of active task monitoring during low-demand automation supervision, whereas mind-wandering specifically denotes internally generated thoughts that compete with task-related attention. This state is common in low-stimulation settings and carries measurable costs for ongoing performance (<xref ref-type="bibr" rid="B44">Smallwood and Schooler, 2015</xref>). In manual driving studies, mind-wandering has been linked to changes in driver behavior and reduced responsiveness, reflecting a shift of attention from the external driving environment to internally generated thought (<xref ref-type="bibr" rid="B20">He et al., 2011</xref>; <xref ref-type="bibr" rid="B3">Baldwin et al., 2017</xref>). Evidence from takeover research further shows that drivers respond more slowly when not actively monitoring, underscoring the safety relevance of attentional drift (<xref ref-type="bibr" rid="B15">Eriksson and Stanton, 2017</xref>).</p>
<p>Driving automation creates conditions that can encourage attentional drift by reducing immediate task demand. Reduced manual control and predictable system behavior lessen immediate task demands and may draw attention away from the roadway (<xref ref-type="bibr" rid="B32">Merat et al., 2019</xref>). Such low-stimulation contexts are known to increase mind-wandering (<xref ref-type="bibr" rid="B44">Smallwood and Schooler, 2015</xref>). Empirically, automated driving alters visual-monitoring patterns relative to manual control, indicating shifts in supervisory attention (<xref ref-type="bibr" rid="B28">Louw and Merat, 2017</xref>). When attention turns inward, drivers typically need additional time to re-engage with the scene and act, a pattern that aligns with longer and more variable takeover responses when monitoring is reduced (<xref ref-type="bibr" rid="B15">Eriksson and Stanton, 2017</xref>).</p>
<p>Visual-attention patterns derived from eye gaze provide behavioral indicators that can be interpreted as proxies for attentional engagement (<xref ref-type="bibr" rid="B46">Underwood et al., 2003</xref>). Structured visual scanning, characterized by systematic transitions between the forward roadway, mirrors, and peripheral regions, reflects active monitoring of the traffic environment (<xref ref-type="bibr" rid="B46">Underwood et al., 2003</xref>). By contrast, fragmented or unsystematic scanning patterns indicate reduced situational awareness and attentional disengagement (<xref ref-type="bibr" rid="B28">Louw and Merat, 2017</xref>; <xref ref-type="bibr" rid="B43">Schnebelen et al., 2020</xref>). First-order Markov-chain analysis has recently been applied to quantify the organization of gaze transitions, allowing researchers to describe how drivers structure and maintain monitoring routines under automation (<xref ref-type="bibr" rid="B39">Rabiner and Juang, 1986</xref>; <xref ref-type="bibr" rid="B43">Schnebelen et al., 2020</xref>; <xref ref-type="bibr" rid="B7">Chouchane et al., 2026</xref>). To examine these supervisory behaviors objectively, the study analyzed gaze transitions between predefined areas of interest using eye-tracking metrics that capture the organization of visual scanning during automation supervision.</p>
<p>Given these challenges of trust miscalibration and attentional drift, driver education represents a potentially valuable intervention for supporting both calibrated trust and sustained attention during automation supervision. The concept of <italic>informed safety</italic> emphasizes that users should receive clear, comprehensive information about the capabilities and limitations of automation to calibrate trust appropriately (<xref ref-type="bibr" rid="B6">Campbell et al., 2018</xref>; <xref ref-type="bibr" rid="B24">Khastgir et al., 2018</xref>). Effective education therefore extends beyond operational instructions to include understanding of sensing principles, boundary conditions, and expected driver roles. Such knowledge supports realistic mental models and helps prevent both over-trust and disuse when supervising automation.</p>
<p>We introduce two contrasting educational framings for Level 2 supervision: Knowledge-based (capability-focused) and Rule-based (limitation-focused) education. This distinction is grounded in established models of cognitive control and supervisory behavior. Rasmussen&#x00027;s Skills-Rules-Knowledge framework (<xref ref-type="bibr" rid="B40">Rasmussen, 1983</xref>) distinguishes capability-focused behavior, which relies on reasoning with an internal model of the system in unfamiliar or variable situations, from limitation-focused behavior, which applies stored procedures to familiar signs and boundary conditions. In driving automation, capability-focused information can help drivers build richer mental models of how the system senses and controls the vehicle, consistent with cognitive load theory&#x00027;s emphasis on instruction that supports schema construction and understanding of system structure (<xref ref-type="bibr" rid="B45">Sweller et al., 2011</xref>). Such understanding contributes to the process and purpose information that Lee and See identify as a basis for calibrated trust in automation (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>). Limitation-focused information, by contrast, highlights contexts in which automated performance may degrade and the actions required from the driver, reinforcing limitation-focused preparedness for boundary conditions and helping to reduce the risk of over-trust (<xref ref-type="bibr" rid="B40">Rasmussen, 1983</xref>; <xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>). These framings therefore target complementary cognitive pathways-conceptual understanding and conditional responding-that may influence how drivers monitor the system and prepare for intervention during supervision.</p>
<p>Prior researc h on educational framing in automation contexts has produced mixed findings. Some studies suggest that providing clear information about system limitations can reduce inappropriate reliance and support monitoring (<xref ref-type="bibr" rid="B11">DeGuzman et al., 2020</xref>; <xref ref-type="bibr" rid="B25">K&#x000F6;rber et al., 2018</xref>). Other accounts emphasize that trust calibration is strongly shaped by performance feedback during use, which may limit the durability of brief pre-drive instruction (<xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>). Methodologically, many studies assess immediate, short-term effects rather than tracking how trust and monitoring evolve over extended exposure to automation (<xref ref-type="bibr" rid="B17">Gold et al., 2015</xref>). Together, these observations highlight the need to examine whether targeted educational interventions can alter trust and attentional processes during sustained automation.</p>
<p>Against this background, the present study examines whether these contrasting educational framings influence trust development, attention management operationalised through gaze behavior, and takeover readiness during Level 2 automation. Building on evidence that exposure to consistent automation performance can degrade monitoring patterns (<xref ref-type="bibr" rid="B28">Louw and Merat, 2017</xref>; <xref ref-type="bibr" rid="B31">Merat et al., 2014</xref>; <xref ref-type="bibr" rid="B9">Cooper et al., 2023</xref>), we investigate whether targeted pre-drive instruction can mitigate these effects by shaping drivers&#x00027; expectations, mental models, and supervisory strategies.</p>
<p>We adopt the <italic>situation-awareness discrepancy model</italic> introduced in earlier work (<xref ref-type="bibr" rid="B8">Chouchane et al., 2022</xref>), which conceptualizes driver supervision as the difference between the situation awareness required by the driving context (&#x003B1;) and that actually achieved by the driver (&#x003B1;&#x02032;). Educational interventions are hypothesized to sustain &#x003B1;&#x02032; (achieved monitoring) at appropriate levels, thereby reducing the discrepancy &#x003B4; &#x0003D; &#x003B1; &#x02212; &#x003B1;&#x02032;. This model extends the logic of <xref ref-type="bibr" rid="B14">Endsley (1995b)</xref>&#x00027;s three-level situation awareness framework by representing supervisory attention as a measurable balance between normative monitoring demand and descriptive driver behavior. It also aligns with the Monitoring and Response Task (MART) framework (<xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>; <xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>) and with <xref ref-type="bibr" rid="B37">Parasuraman and Riley (1997)</xref>&#x00027;s taxonomy of automation use, misuse, and disuse, linking cognitive state dynamics to observable gaze patterns.</p>
<p>Within this model, we distinguish between <italic>trust calibration</italic>, which reflects how drivers perceive and internalize the system&#x00027;s reliability and limits, and <italic>capability-focused monitoring strategies</italic>, which determine how they visually allocate attention and verify the driving scene. These components may respond to educational framing through partially independent pathways: trust primarily develops through accumulated performance feedback and may be less sensitive to brief instruction (<xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>; <xref ref-type="bibr" rid="B33">Merritt and Ilgen, 2008</xref>), whereas monitoring strategies may adapt more readily to targeted guidance (<xref ref-type="bibr" rid="B25">K&#x000F6;rber et al., 2018</xref>).</p>
<p>To test these effects, the experiment employed a between-subjects design with three educational conditions. Basic instruction condition (reference condition) provided minimal information typical of current onboarding, whereas the capability-focused and limitation-focused conditions reflected the capability-focused and limitation-focused framings outlined above. Trust was measured at four time points throughout an 80-min session to capture both immediate post-instruction effects and subsequent evolution during automation exposure (<xref ref-type="bibr" rid="B17">Gold et al., 2015</xref>). Additional measures assessed mind-wandering (<xref ref-type="bibr" rid="B44">Smallwood and Schooler, 2015</xref>), visual scanning patterns using gaze-transition analysis (<xref ref-type="bibr" rid="B43">Schnebelen et al., 2020</xref>; <xref ref-type="bibr" rid="B46">Underwood et al., 2003</xref>), and reaction time during a critical takeover event (<xref ref-type="bibr" rid="B16">Gold et al., 2013</xref>; <xref ref-type="bibr" rid="B29">Lu et al., 2017</xref>).</p>
<p>The central research questions guiding this investigation were: (1) How does the framing of driver education influence trust dynamics over time when supervising Level 2 driving automation? (2) Does instructional framing affect drivers&#x00027; tendency to mind-wander whilst supervising automation? (3) Do different educational approaches influence visual scanning patterns during supervision? (4) Do different educational approaches influence takeover performance? (5) What relationships exist among trust development, attentional disengagement, and takeover readiness?</p>
<p>Finally, to examine how educational framing shapes supervision in Level 2 driving automation, we proposed a set of hypotheses organized according to the research questions, and derived from established theoretical models and prior empirical evidence.</p>
<p><bold>H1.1 (Trust trajectory)</bold>. Trust ratings were expected to increase significantly from baseline (T1) to post-drive assessment (T4) across repeated exposure to the automation system (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>).</p>
<p><bold>H1.2 (Educational influence on trust development)</bold>. Educational framing was expected to influence how trust developed across time (T1 to T4). Limitation-focused instruction highlighting system limitations could foster better-calibrated trust through improved understanding of boundaries (<xref ref-type="bibr" rid="B11">DeGuzman et al., 2020</xref>), whereas accumulated reliable experience might override brief instructional effects (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>).</p>
<p><bold>H2 (Educational effects on mind-wandering)</bold>. Limitation-focused education was expected to reduce mind-wandering relative to capability-focused and Basic instruction by encouraging active monitoring and sustaining achieved monitoring effort (&#x003B1;&#x02032;), thereby reducing the situation-awareness discrepancy (&#x003B4;) defined in prior work (<xref ref-type="bibr" rid="B8">Chouchane et al., 2022</xref>). This prediction aligns with theories of situation awareness and vigilance suggesting that higher monitoring engagement preserves attentional resources during prolonged automation (<xref ref-type="bibr" rid="B14">Endsley, 1995b</xref>; <xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>).</p>
<p><bold>H3 (Educational effects on gaze behavior)</bold>. Limitation-focused education was expected to produce more structured gaze patterns than the other conditions, characterized by more frequent recovery transitions to the road center and less peripheral attention (<xref ref-type="bibr" rid="B38">Price et al., 2019</xref>; <xref ref-type="bibr" rid="B8">Chouchane et al., 2022</xref>).</p>
<p><bold>H4 (Educational effects on takeover performance)</bold>. Participants receiving limitation-focused education were expected to show shorter reaction times during the takeover task, consistent with evidence that sustained attentional engagement and heightened situation awareness facilitate timely intervention (<xref ref-type="bibr" rid="B14">Endsley, 1995b</xref>; <xref ref-type="bibr" rid="B18">Greenlee et al., 2018</xref>).</p>
<p><bold>H5.1 (Trust-mind-wandering relationship)</bold>. Greater trust development (T1 to T4 change) was expected to correlate positively with mind-wandering, as increased confidence in system reliability reduces perceived monitoring demand (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>; <xref ref-type="bibr" rid="B21">Hergeth et al., 2016</xref>; <xref ref-type="bibr" rid="B47">Walker et al., 2019</xref>).</p>
<p><bold>H5.2 (Mind-wandering-gaze relationship)</bold>. Higher mind-wandering scores were expected to be associated with less structured scanning, including fewer mirror-to-road-center recoveries and more peripheral glances (<xref ref-type="bibr" rid="B28">Louw and Merat, 2017</xref>; <xref ref-type="bibr" rid="B31">Merat et al., 2014</xref>).</p>
<p>Together, these hypotheses address the research questions by linking educational framing, trust calibration, attentional state, gaze behavior, and takeover performance during Level 2 automation.</p>
</sec>
<sec sec-type="materials|methods" id="s2">
<label>2</label>
<title>Materials and methods</title>
<sec>
<label>2.1</label>
<title>Participants</title>
<p>Data were collected from 55 participants recruited through a third-party agency engaged by the Japan Automobile Research Institute (JARI), where the study took place. All participants reported no prior hands-on experience with Level 2 automation systems, and therefore represented an automation-na&#x000EF;ve sample at the time of testing.</p>
<p>The study aimed for approximately 20 participants per educational condition to provide a balanced design and reasonable sensitivity for detecting medium-to-large effects. Participants were randomly assigned to one of three groups: Basic Education (<italic>n</italic> &#x0003D; 20), capability-focused (Method 1, <italic>n</italic> &#x0003D; 15), and limitation-focused (Method 2, <italic>n</italic> &#x0003D; 20). The slight imbalance arose from normal variation in attendance and early withdrawals rather than any planned differences in allocation.</p>
<p>Of the 55 participants in the final sample, 16 self-identified as female and 39 as male (<xref ref-type="table" rid="T1">Table 1</xref>). Sex distribution was broadly comparable across groups: the Basic group comprised 6 females and 14 males, the capability-focused group 5 females and 10 males, and the limitation-focused group 5 females and 15 males. Age and self-reported annual driving mileage by education group are summarized in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Contingency table of participants by sex and education condition.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Group</th>
<th valign="top" align="center">Female</th>
<th valign="top" align="center">Male</th>
<th valign="top" align="center">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">B</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">14</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">M1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">15</td>
</tr>
<tr>
<td valign="top" align="left">M2</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">Total</td>
<td valign="top" align="center">16</td>
<td valign="top" align="center">39</td>
<td valign="top" align="center">55</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>B, Basic education; M1, capability-focused education; M2, limitation-focused education.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Descriptive statistics for age and annual driving distance (km/year) by education condition.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th/>
<th valign="top" align="center" colspan="3">Age (years)</th>
<th valign="top" align="center" colspan="3">Annual distance (km)</th>
</tr>
<tr>
<th valign="top" align="left">Statistic</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Valid cases</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">Missing</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
</tr>
<tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="center">40.2</td>
<td valign="top" align="center">36.7</td>
<td valign="top" align="center">43.6</td>
<td valign="top" align="center">16,400</td>
<td valign="top" align="center">7,407</td>
<td valign="top" align="center">14,900</td>
</tr>
<tr>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">11.6</td>
<td valign="top" align="center">10.2</td>
<td valign="top" align="center">10.9</td>
<td valign="top" align="center">13,880</td>
<td valign="top" align="center">4,782</td>
<td valign="top" align="center">14,950</td>
</tr>
<tr>
<td valign="top" align="left">Minimum</td>
<td valign="top" align="center">23</td>
<td valign="top" align="center">24</td>
<td valign="top" align="center">23</td>
<td valign="top" align="center">2,000</td>
<td valign="top" align="center">100</td>
<td valign="top" align="center">1,000</td>
</tr>
<tr>
<td valign="top" align="left">Maximum</td>
<td valign="top" align="center">57</td>
<td valign="top" align="center">56</td>
<td valign="top" align="center">59</td>
<td valign="top" align="center">50,000</td>
<td valign="top" align="center">15,000</td>
<td valign="top" align="center">70,000</td>
</tr>
<tr>
<td valign="top" align="left">25th percentile</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">29</td>
<td valign="top" align="center">34.5</td>
<td valign="top" align="center">7,250</td>
<td valign="top" align="center">2,000</td>
<td valign="top" align="center">10,000</td>
</tr>
<tr>
<td valign="top" align="left">50th percentile</td>
<td valign="top" align="center">39</td>
<td valign="top" align="center">36</td>
<td valign="top" align="center">43</td>
<td valign="top" align="center">10,000</td>
<td valign="top" align="center">10,000</td>
<td valign="top" align="center">10,000</td>
</tr>
<tr>
<td valign="top" align="left">75th percentile</td>
<td valign="top" align="center">49</td>
<td valign="top" align="center">42.5</td>
<td valign="top" align="center">52</td>
<td valign="top" align="center">24,250</td>
<td valign="top" align="center">10,000</td>
<td valign="top" align="center">15,000</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>B, Basic education; M1, capability-focused education; M2, limitation-focused education.</p>
</table-wrap-foot>
</table-wrap>
<p>All participants provided informed consent prior to the study. The protocol was reviewed and approved by the JARI Research Ethics Committee. No personally identifying data were retained; all datasets were anonymised using numerical identifiers.</p>
</sec>
<sec>
<label>2.2</label>
<title>Experimental design</title>
<p>The study employed a 3 &#x000D7; 4 mixed factorial designwith Educational Condition (Basic, capability-focused, limitation-focused) as a between-subjects factor and Time (T1, T2, T3, T4) as a within-subjects factor corresponding to four measurement points throughout the session. This structure (<xref ref-type="fig" rid="F1">Figure 1</xref>) enabled examination of both immediate and evolving effects of instructional framing across the automation experience, capturing trust calibration and behavioral outcomes over time.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Experimental design. Between-subjects factor: education condition (basic, method 1, method 2). Within-subjects factor: Time (T1 to T4). Arrows indicate repeated measurements within each participant across the four time points.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0001.tif">
<alt-text content-type="machine-generated">Diagram showing three education conditions&#x02014;Basic (n equals 20), Method 1 (n equals 15), Method 2 (n equals 20)&#x02014;each assessed at four time points: baseline, post-education, post-practice, and post-drive. Boxes represent sessions; Basic uses blank boxes, Method 1 has diagonal shading, Method 2 uses crosshatch shading. Arrows indicate progression through time points.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>2.3</label>
<title>Educational interventions</title>
<p>Educational briefings were designed to examine how different communication strategies shape drivers&#x00027; understanding of automation and their expectations of system behavior. Each briefing conveyed the same core informational content: the automation supports driving but still requires active human supervision. The briefings differed in instructional emphasis (capability-focused versus limitation-focused framing). The guiding question was: <italic>How should information about system capabilities and limitations be conveyed to adjust driver expectations appropriately?</italic></p>
<p>Educational content was delivered through scripted verbal and written briefings that were standardized across participants within each condition. The three conditions held duration, and delivery format constant, while varying the instructional emphasis (capability-focused versus limitation-focused framing) relative to the reference briefing.</p>
<p><bold>Basic education (reference condition):</bold> Participants received only the minimum information typically provided in current vehicle manuals or pre-drive explanations. The briefing stated: &#x0201C;<italic>The driving automation system you will use today is a partially automated driving system. When the system is activated, the vehicle can drive automatically without pedal or steering operation. However, the driver must monitor the road and surrounding environment. When an alert occurs, you must take over control of the vehicle.&#x0201D;</italic> This condition served as a reference point reflecting minimal onboarding information.</p>
<p><bold>Capability-focused education (method 1):</bold> This briefing explained how the automation system functions and perceives its surroundings. It described the system&#x00027;s control of steering and speed on highways up to 100 km/h, and how front and rear cameras detect surrounding vehicles and obstacles. Participants were told that system accuracy depends on image quality, which can deteriorate in poor weather. They were informed that the system issues an alert and disengages when reaching its operational limits, although delayed alerts may occur if sensing is impaired. This education aimed to build a general understanding of how the system works and why its performance changes under certain conditions. Visual aids illustrated camera positions and sensor coverage. The full script is provided in <xref ref-type="sec" rid="s11">Appendix 4.12.2</xref>.</p>
<p><bold>Limitation-focused education (method 2):</bold> This briefing conveyed the same core message as Method 1 but focused on operating boundaries. It explained that the system controls steering and pedals, functions in normal traffic and weather, and adjusts speed within its lane up to 100 km/h. Drivers were informed that visibility loss in rain or fog could degrade sensor performance and delay alerts. The message emphasized that when the system reaches its limits, it will issue an alert and disengage. Visual aids showed how adverse weather affects sensing. This framing encouraged drivers to monitor conditions and prepare to take control when necessary. The full script is provided in <xref ref-type="sec" rid="s11">Appendix 4.12.3</xref>.</p>
<p>All briefings were delivered by the same experimenter using standardized scripts to ensure consistency. Each lasted approximately 5 min. Participants were unaware that different educational versions existed or that the study examined trust calibration.</p>
<p>These framings were informed by Rasmussen&#x00027;s Skills-Rules-Knowledge framework (<xref ref-type="bibr" rid="B40">Rasmussen, 1983</xref>). The capability-focused condition provided conceptual information about how the system perceives the environment and controls speed and steering. The limitation-focused condition emphasized operating boundaries, environmental constraints, and the driver&#x00027;s responsibility to intervene when sensing is impaired. The Basic condition offered only minimal information typical of current onboarding. These distinctions allowed us to test how different types of pre-drive information shape supervision during Level 2 automation.</p>
</sec>
<sec>
<label>2.4</label>
<title>Driving simulator environment and critical scenario</title>
<p>The experiment was conducted using the Japan Automobile Research Institute&#x00027;s omnidirectional driving simulator, a high-fidelity system featuring a complete vehicle cabin mounted on a motion platform with 360-degree visual projection (<xref ref-type="fig" rid="F2">Figure 2</xref>). The simulator provides realistic vehicle dynamics, including motion cueing for acceleration, braking, and lateral movement, which enhances the ecological validity of driver responses during automation supervision and takeover events.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Japan Automobile Research Institute (JARI) omnidirectional driving simulator setup used in the study. The illustrations show the simulator setup <bold>(a)</bold> and projection environment <bold>(b)</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0002.tif">
<alt-text content-type="machine-generated">(a) Technical diagram of a driving simulator setup, showing an actual vehicle cabin mounted on a hexapod platform with a turntable, surrounded by a 360-degree spherical screen and three HDTV projectors. (b) Simulation view from above a car within a virtual cityscape, with curved road, buildings, ferris wheel, and blue sky appearing on a large immersive projection screen.</alt-text>
</graphic>
</fig>
<p>The simulated environment depicted a two-lane, two-direction, Japanese Expressway divided by a barrier, with moderate traffic density. Traffic consisted of computer-controlled vehicles programmed to maintain realistic speeds and following distances. The Level 2 automation system controlled the simulator vehicle&#x00027;s steering and speed throughout a 15-min driving automation segment, maintaining position in the center lane at approximately 80 km/h and adjusting speed to maintain safe following distance from lead vehicles.</p>
<p>The critical takeover scenario (<xref ref-type="fig" rid="F3">Figure 3</xref>) occurred at the conclusion of the 15-min automated segment. A lead vehicle performed an emergency braking maneuver that exceeded the automation&#x00027;s deceleration capability. The system issued a visual and auditory takeover request requiring the driver to resume manual control and apply braking to avoid collision. This scenario was designed to assess drivers&#x00027; readiness to intervene during safety-critical events following extended automation supervision.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Illustration of the critical takeover scenario. Fog impairs forward sensing and may produce a false negative due to image washout, delaying the takeover alert and resulting in a short time-to-collision (TTC) at the time of the takeover request. The shaded region represents reduced sensing/visibility under fog, and the red markers indicate the hazard zone where detection and warning may be delayed. A&#x02013;D denote the positions of the vehicle&#x00027;s sensing elements used for environment perception.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0003.tif">
<alt-text content-type="machine-generated">Diagram illustrating a foggy road scenario where a car&#x00027;s sensor fails to detect vehicles ahead due to visibility limitations, leading to delayed alerts. Labels indicate affected detection zones, alert points, and the event sequence: fog causes a delayed alert and a short time-to-collision.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>2.5</label>
<title>Measures and data collection</title>
<sec>
<label>2.5.1</label>
<title>Trust in automation</title>
<p>Trust in the driving automation system was measured using the same single-item question as <xref ref-type="bibr" rid="B1">Abe et al. (2017)</xref>. Participants responded to the prompt, &#x0201C;<italic>At this moment, how much do you trust the driving automation system?</italic>,&#x0201D; by marking a value on a continuous 0-100 scale anchored at 0 = &#x0201C;not at all&#x0201D; and 100 = &#x0201C;completely&#x0201D;. The question was administered in Japanese, with both the original Japanese and English wordings provided in <xref ref-type="sec" rid="s11">Appendix 4.10</xref>. Higher scores represent greater subjective trust in the automation.</p>
<p>Ratings were collected at four points during the experiment: baseline (T1), after the education session (T2), after the practice drive (T3), and after the driving automation session (T4). This schedule was designed to capture both the immediate effects of the education and later changes that occurred through direct interaction with the automation. A concise single-item scale was chosen to enable repeated measurement without interrupting the driving task, consistent with prior work showing that simple rating scales can effectively capture moment-to-moment changes in trust toward automation (<xref ref-type="bibr" rid="B26">Lee and Moray, 1992</xref>; <xref ref-type="bibr" rid="B34">Muir, 1996</xref>).</p>
</sec>
<sec>
<label>2.5.2</label>
<title>Mind-wandering</title>
<p>Mind-wandering was measured using a single-item retrospective scale, adapted from <xref ref-type="bibr" rid="B30">Mars et al. (2014)</xref> and translated into Japanese. Immediately after the takeover event, participants answered the question: &#x0201C;Compared with manual driving, how much time did you spend thinking about things unrelated to driving during the automated drive (before the alert)?&#x0201D; They marked their response on a continuous horizontal line ranging from 0% (&#x0201C;always thinking about driving as during manual driving&#x0201D;) to 100% (&#x0201C;never thinking about driving&#x0201D;). The complete wording in English and Japanese is provided in <xref ref-type="sec" rid="s11">Appendix 4.11</xref>.</p>
<p>A retrospective self-report method was chosen to preserve the natural flow of the automated driving experience and to avoid intrusive probe techniques (e.g., SAGAT), which can disrupt ongoing cognitive processing and alter task engagement (<xref ref-type="bibr" rid="B13">Endsley, 1995a</xref>, <xref ref-type="bibr" rid="B12">1988</xref>). This format allowed participants to reflect on their attention across the entire automated segment rather than at single moments. As noted by <xref ref-type="bibr" rid="B44">Smallwood and Schooler (2015)</xref>, mind-wandering is an internal, subjective state that is most effectively studied through self-report, especially when researchers aim to capture general fluctuations in attention without disturbing task performance.</p>
<p>Although retrospective reports depend on memory and may involve estimation error, this approach was appropriate here because it maintained an undisturbed and realistic supervision context. It therefore provided an ecologically valid measure of participants&#x00027; overall attentional disengagement during automation.</p>
</sec>
<sec>
<label>2.5.3</label>
<title>Gaze behavior</title>
<p>Eye-tracking data were recorded throughout the driving automation segment using a head-mounted NAC EMR-9 system sampling at 60 Hz. Gaze coordinates were manually annotated frame by frame into nine predefined Areas of Interest (AOIs) following ISO 15007:2020.</p>
<p>The AOI definitions, frame-by-frame annotation protocol, and transition-probability construction procedures follow our previously reported transition-analysis pipeline (<xref ref-type="bibr" rid="B7">Chouchane et al., 2026</xref>). These comprised the road center (RC; AOI 1), rear-view mirror (RVM; AOI 2), right side mirror (AOI 3), left side mirror (AOI 4), human-machine interface (HMI; AOI 5), right periphery (RP; AOI 6), left periphery (LP; AOI 7), driver side window (8) and other scene areas (AOI 9) (<xref ref-type="fig" rid="F4">Figure 4</xref>). AOIs were annotated manually frame-by-frame by a single trained annotator using a predefined annotation protocol, with decision rules for ambiguous frames to support consistency despite head-movement artifacts. Additional details of the annotation workflow and quality-control procedures are reported in our transition-analysis pipeline description (<xref ref-type="bibr" rid="B7">Chouchane et al., 2026</xref>).</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Driver-view AOI layout used for gaze annotation. The schematic indicates the forward roadway region (RC), mirrors (RVM, left/right), left/right peripheral regions (LP/RP), and the HMI/instrument cluster region, aligned with the driver&#x00027;s viewpoint in the simulator cabin.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0004.tif">
<alt-text content-type="machine-generated">Diagram showing a driver&#x02019;s cockpit view labeled with numbers indicating key areas, including a steering wheel, dashboard, side mirrors, windshield, lane markings, and roadside barriers converging toward the vanishing point on a roadway.</alt-text>
</graphic>
</fig>
<p>Gaze behavior was analyzed within a 24-s window centered on a side-vehicle cut-in event (beginning 15 s before and ending 5 s after the 4-s maneuver, occurring at 11 min 40 s into the automated drive) (<xref ref-type="fig" rid="F5">Figure 5</xref>). This epoch was selected to capture supervisory attention after extended stable automation and to elicit lateral monitoring followed by recovery to forward scanning; conditions under which vigilance decrements are likely to emerge.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Side-vehicle cut-in scenario gaze analysis window. Gaze data were analyzed over a 24-s window (15 s before and 5 s after the 4-s cut-in), capturing both lateral monitoring and recovery to the forward roadway.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0005.tif">
<alt-text content-type="machine-generated">Diagram depicting a highway scenario where three cars are aligned in a single lane and traveling at one hundred kilometers per hour. The lead vehicle moves to the left lane, indicated by a blue dashed arrow. Distance from the ego vehicle to the next car is seventy meters, while the subsequent gap is sixty meters, or approximately two point three seconds.</alt-text>
</graphic>
</fig>
<p>Previous research on spontaneous gaze during driving automation has shown that static gaze allocation captures meaningful aspects of supervision but fails to represent the gaze structure or sequencing of glances (<xref ref-type="bibr" rid="B8">Chouchane et al., 2022</xref>, <xref ref-type="bibr" rid="B7">2026</xref>). Building on these insights, the present analysis adopted a dynamic approach using first-order Markov modeling to quantify transition probabilities between AOIs, thereby describing how drivers organized visual scanning. Similar transition-based approaches have demonstrated that gaze sequences between the road center and mirrors better discriminate attentive from out-of-the-loop states than static time-on-AOI measures (<xref ref-type="bibr" rid="B43">Schnebelen et al., 2020</xref>). Accordingly, static metrics were treated as complementary and dynamic indicators served as the primary basis for interpretation.</p>
<p>Two composite indices were constructed to summarize supervisory scanning structure. The <bold>Recovery to Road Center (RRC)</bold> captured how consistently participants redirected attention from mirror locations to the forward roadway, operationalising the reintegration of peripheral information essential to Level 1 and Level 2 situation awareness (<xref ref-type="bibr" rid="B14">Endsley, 1995b</xref>). It was computed as:</p>
<disp-formula id="E1"><mml:math id="M1"><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:mtext>RRC</mml:mtext><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">Rear-view mirror</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext class="textrm" mathvariant="normal">Road center</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">Right mirror</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext class="textrm" mathvariant="normal">Road center</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">Left mirror)</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext class="textrm" mathvariant="normal">Road center</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<p>The <bold>Structured Scanning Index (SSI)</bold> provided a z-scored, directionally weighted composite integrating recovery frequency, sustained central monitoring, and reduced peripheral dwelling:</p>
<disp-formula id="E2"><mml:math id="M2"><mml:mrow><mml:mtext>SSI</mml:mtext><mml:mo>=</mml:mo><mml:mi>z</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>RRC</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>z</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TG</mml:mtext><mml:msub><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow><mml:mrow><mml:mtext>RC</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>z</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TG</mml:mtext><mml:msub><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow><mml:mrow><mml:mtext>LP</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mtext>TG</mml:mtext><mml:msub><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow><mml:mrow><mml:mtext>RP</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where TGT<sub>RC</sub> is total glance time on the road center and TGT<sub>LP</sub>, TGT<sub>RP</sub> are total glance times on the left and right peripheries, respectively. Higher SSI values indicate more structured, forward-anchored scanning routines. Although neither RRC nor SSI directly measure &#x0201C;being in the loop,&#x0201D; both serve as operational proxies for active supervisory control.</p>
<p>In addition, <bold>HMI monitoring</bold> (the proportion of fixations directed toward instrument-cluster and center-console displays) was analyzed as an exploratory metric to examine how educational framing influenced attention to automation-status information.</p>
<p>Together, these measures capture supervisory gaze at multiple levels: static allocation, dynamic transition structure, and system-information monitoring. Under Malleable Attentional Resources Theory (<xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>), prolonged low-demand automation can contract attentional investment, which may manifest as simplified or less structured scanning. The present framework quantifies such reorganization empirically through RRC and SSI, offering transferable indicators of supervisory engagement in Level 2 driving.</p>
</sec>
<sec>
<label>2.5.4</label>
<title>Takeover performance</title>
<p>Takeover performance was evaluated using the <bold>System-recognized reaction time (SRRT)</bold> metric, defined as the interval between takeover request (TOR) onset and the first driver input exceeding the automation disengagement thresholds. Automation disengagement was defined as driver input exceeding approximately 10% accelerator stroke, 5% brake stroke, or 5 Nm steering torque, marking effective re-engagement with manual control. These thresholds were applied consistently across all participants to ensure comparability of SRRT values.</p>
</sec>
</sec>
<sec>
<label>2.6</label>
<title>Procedure</title>
<p>Each session followed a standardized 80-min protocol designed to capture behavioral, physiological, and self-report data across all phases of the automation experience while ensuring participant comfort. <xref ref-type="fig" rid="F6">Figure 6</xref> illustrates the complete timeline and timing of trust assessments (T1 to T4).</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>Experimental timeline showing the full 80-min session sequence, including educational intervention, simulator familiarization, practice drive, main automated drive, and critical takeover event. Trust questionnaires (T1 to T4) were administered before and after key phases, and a retrospective mind-wandering questionnaire was completed at the end of the session. The procedure ensured standardized timing across participants while maintaining realism and comfort.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0006.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a sequential study process: Informed consent for ten minutes, driver education for ten minutes, practice drive for five minutes, eye-tracker calibration for fifteen minutes, and a Level 2 drive for seventeen minutes, with timepoints TQ1-TQ4 and MWQ marked at relevant stages.</alt-text>
</graphic>
</fig>
<p>Upon arrival at the simulator facility, participants provided written informed consent and completed demographic questionnaires. The session then progressed through the following sequence:</p>
<p><bold>T1 (Baseline trust measurement)</bold>. Participants first rated their initial level of trust in automation before receiving any detailed system information.</p>
<p><bold>Educational intervention and T2 (Post-education trust)</bold>.</p>
<p>Participants were randomly assigned to one of three instructional conditions (Basic, capability-focused, or limitation-focused; see Section 2.3). The assigned briefing was delivered by the experimenter while participants remained seated in the briefing room. Immediately after the briefing, participants provided a second trust rating (T2) to assess the immediate effect of the educational content.</p>
<p><bold>Simulator familiarization</bold>. Participants then moved to the driving simulator and completed a brief manual driving segment to acclimate to the simulator&#x00027;s steering and pedal dynamics and reduce novelty effects.</p>
<p><bold>Practice drive and T3 (Post-practice trust)</bold>. Participants then engaged the Level 2 automation during a short, non-critical practice segment on a straight motorway section. This phase provided hands-on experience with system operation and the human-machine interface. A third trust rating (T3) was collected immediately afterward.</p>
<p><bold>Eye-tracking calibration</bold>. Before the main drive, a 9-point calibration was performed for each participant using the NAC EMR-9 system, following established protocols to ensure accuracy throughout data collection.</p>
<p><bold>Main automated drive</bold>. Participants supervised the Level 2 automation for approximately 15 minutes on a simulated highway with moderate traffic. Several non-critical traffic interactions were embedded to sustain ecological validity, and continuous eye-tracking and vehicle control data were recorded. Participants were instructed to monitor the system and remain ready to resume control at any time.</p>
<p><bold>Critical takeover scenario</bold>. At the end of the automated segment, the lead vehicle performed sudden braking, triggering a visual and auditory takeover request prompting the driver to resume manual control. Takeover reaction time was recorded automatically by the simulator.</p>
<p><bold>T4 (Post-drive trust) and mind-wandering assessment</bold>. Immediately after the takeover event, participants completed the final trust questionnaire (T4) and a retrospective mind-wandering measure. A structured debriefing followed to collect qualitative feedback about the automation experience.</p>
</sec>
<sec>
<label>2.7</label>
<title>Statistical analysis</title>
<p>All analyses were performed using standard parametric and non-parametric methods according to data characteristics. Outliers were screened prior to hypothesis testing, and assumption checks were conducted for normality, sphericity, and homogeneity of variance. When Mauchly&#x00027;s test indicated violation of sphericity, Greenhouse-Geisser correction was applied, with adjusted degrees of freedom and epsilon (&#x003B5;) values reported. Levene&#x00027;s tests verified that the assumption of equal variances was met for these between-group comparisons.</p>
<sec>
<label>2.7.1</label>
<title>Trust development</title>
<p>Trust ratings were analyzed using a two-way repeated-measures ANOVA with <italic>Time</italic> (T1 to T4) as the within-subjects factor and <italic>Educational Condition</italic> (Basic, capability-focused, limitation-focused) as the between-subjects factor. Greenhouse-Geisser correction was applied where required, and effect sizes are reported as partial eta-squared (<inline-formula><mml:math id="M3"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>).</p>
</sec>
<sec>
<label>2.7.2</label>
<title>Mind-wandering</title>
<p>Self-reported mind-wandering scores (MWS) were compared across education groups using one-way ANOVA. Pearson correlations were used to assess associations between mind-wandering, trust change (T4 minus T1), gaze indices (RRC, SSI), and takeover reaction time (SRRT). Effect sizes are reported as Pearson&#x00027;s <italic>r</italic>.</p>
</sec>
<sec>
<label>2.7.3</label>
<title>Gaze behavior</title>
<p>Supervisory gaze metrics were analyzed with <italic>Educational Condition</italic> as the between-subjects factor. Pre-registered scanning composites (RRC, SSI) were evaluated with Kruskal-Wallis tests due to non-normality; HMI-focused metrics (TGT<sub>5</sub>, MGD<sub>5</sub>, MGR<sub>5</sub>) were analyzed with one-way ANOVA when assumptions were met, otherwise with Kruskal-Wallis. Post-hoc comparisons used Benjamini-Hochberg false discovery rate (FDR) control within metric families (<xref ref-type="bibr" rid="B5">Benjamini and Hochberg, 1995</xref>). Both unadjusted <italic>p</italic> and adjusted <italic>q</italic><sub>BH</sub> values are reported. Pearson (or Spearman, when appropriate) correlations assessed associations between gaze metrics and mind-wandering.</p>
</sec>
<sec>
<label>2.7.4</label>
<title>Takeover performance</title>
<p>System-recognized reaction times (SRRT) were slightly right-skewed due to one valid long reaction time (22.9 s) observed in the Basic group. Because this reflected a genuine delayed takeover rather than a measurement error, the value was retained, and non-parametric methods were used. A Kruskal-Wallis test compared SRRT across <italic>Educational Conditions</italic>. When omnibus results were significant, Dunn pairwise comparisons with Holm correction were conducted. Associations between SRRT, mind-wandering, and gaze metrics were examined using Spearman correlations. Effect sizes are reported as rank-based epsilon-squared (<inline-formula><mml:math id="M4"><mml:msubsup><mml:mrow><mml:mi>&#x003B5;</mml:mi></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textrm" mathvariant="normal">rank</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>) for Kruskal-Wallis tests and Spearman&#x00027;s &#x003C1; for correlations.</p>
<p>All tests were two-tailed with an &#x003B1; level of .05. Effect sizes are interpreted as partial <inline-formula><mml:math id="M5"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> for ANOVA, <inline-formula><mml:math id="M6"><mml:msubsup><mml:mrow><mml:mi>&#x003B5;</mml:mi></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textrm" mathvariant="normal">rank</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> for non-parametric analyses, and <italic>r</italic> for correlations.</p>
</sec>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec>
<label>3.1</label>
<title>Trust development across time and educational conditions</title>
<p>This section addresses <bold>RQ1</bold> (trust dynamics) and evaluates <bold>H1.1-H1.2</bold>.</p>
<p>Findings from the repeated-measures ANOVA (<xref ref-type="table" rid="T3">Table 3</xref>) address <bold>H1.1</bold> (main effect of Time) and <bold>H1.2</bold> (Time &#x000D7; Group), both involving the within-subject factor Time. Outcomes from the between-subjects tests (<xref ref-type="table" rid="T4">Table 4</xref>) address the between-group component of <bold>H1.2</bold> (main effect of Group). Levene&#x00027;s tests verified that the assumption of equal variances was met for these between-group comparisons.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Results of repeated-measures ANOVA for within-subjects effects on trust across time.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Cases</th>
<th valign="top" align="center">Sphericity correction</th>
<th valign="top" align="center">Sum of squares</th>
<th valign="top" align="center"><italic>df</italic></th>
<th valign="top" align="center">Mean square</th>
<th valign="top" align="center"><italic>F</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
<th valign="top" align="center"><inline-formula><mml:math id="M7"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mtext>p</mml:mtext></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Time</td>
<td valign="top" align="center">Greenhouse-Geisser</td>
<td valign="top" align="center">7131.028</td>
<td valign="top" align="center">1.694</td>
<td valign="top" align="center">4,209.356</td>
<td valign="top" align="center">21.156</td>
<td valign="top" align="center">&#x0003C;0.001</td>
<td valign="top" align="center">0.092</td>
</tr>
<tr>
<td valign="top" align="left">Time &#x000D7; Group</td>
<td valign="top" align="center">Greenhouse-Geisser</td>
<td valign="top" align="center">540.780</td>
<td valign="top" align="center">3.388</td>
<td valign="top" align="center">159.608</td>
<td valign="top" align="center">0.802</td>
<td valign="top" align="center">0.509</td>
<td valign="top" align="center">0.007</td>
</tr>
<tr>
<td valign="top" align="left">Residuals</td>
<td valign="top" align="center">Greenhouse-Geisser</td>
<td valign="top" align="center">16,516.190</td>
<td valign="top" align="center">83.010</td>
<td valign="top" align="center">198.965</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>Degrees of freedom adjusted with Greenhouse-Geisser.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Between-subjects effects for education group on mean trust.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Effect</th>
<th valign="top" align="center">Sum of squares</th>
<th valign="top" align="center"><italic>df</italic></th>
<th valign="top" align="center">Mean square</th>
<th valign="top" align="center"><italic>F</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
<th valign="top" align="center"><inline-formula><mml:math id="M8"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mtext>p</mml:mtext></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Group</td>
<td valign="top" align="center">1158.012</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">579.006</td>
<td valign="top" align="center">0.621</td>
<td valign="top" align="center">0.541</td>
<td valign="top" align="center">0.015</td>
</tr>
<tr>
<td valign="top" align="left">Residuals</td>
<td valign="top" align="center">45,652.381</td>
<td valign="top" align="center">49</td>
<td valign="top" align="center">931.681</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
</tr></tbody>
</table>
</table-wrap>
<sec>
<label>3.1.1</label>
<title>Main effect of time (H1.1)</title>
<p>A repeated-measures ANOVA with Time (T1 to T4) as the within-subjects factor and education group as the between-subjects factor revealed a significant main effect of Time on trust ratings, <italic>F</italic>(1.694, 83.010) &#x0003D; 21.156, <italic>p</italic> &#x0003C; 0.001, partial&#x003B7;<sup>2</sup> &#x0003D; 0.092 (Greenhouse-Geisser corrected) (<xref ref-type="table" rid="T3">Table 3</xref>). Descriptive analyses indicated a consistent upward trend in trust scores across all educational groups from the initial time point (T1) to the final assessment (T4) (see <xref ref-type="fig" rid="F7">Figure 7</xref>). Participants in the Basic group showed an increase from <italic>M</italic> &#x0003D; 58.50 (<italic>SD</italic> &#x0003D; 19.54) at T1 to <italic>M</italic> &#x0003D; 68.00 (<italic>SD</italic> &#x0003D; 16.73) at T4. Those in the Method 1 group improved from <italic>M</italic> &#x0003D; 50.67 (<italic>SD</italic> &#x0003D; 19.81) to <italic>M</italic> &#x0003D; 64.00 (<italic>SD</italic> &#x0003D; 22.30), while Method 2 participants demonstrated a rise from <italic>M</italic> &#x0003D; 52.25 (<italic>SD</italic> &#x0003D; 18.39) to <italic>M</italic> &#x0003D; 71.75 (<italic>SD</italic> &#x0003D; 13.70). Descriptive statistics by group and time are in <xref ref-type="table" rid="T5">Table 5</xref>. <bold>These findings support H1.1</bold>, which proposed that trust would rise from the baseline (T1) to the post-drive stage (T4) as participants became more familiar with the automation system.</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>Mean trust trajectories (T1 to T4) for each education group (Basic, M1, M2).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0007.tif">
<alt-text content-type="machine-generated">Line graph comparing mean trust ratings over four time points (T1 to T4) for three groups: B (dotted line), M1 (solid line), and M2 (dashed line), showing increases across all groups with slight variations at T4.</alt-text>
</graphic>
</fig>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Descriptive statistics for trust scores at each time point (T1 to T4) by education group.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th/>
<th valign="top" align="center" colspan="3">Trust T1</th>
<th valign="top" align="center" colspan="3">Trust T2</th>
<th valign="top" align="center" colspan="3">Trust T3</th>
<th valign="top" align="center" colspan="3">Trust T4</th>
</tr>
<tr>
<th valign="top" align="left">Statistic</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
<th valign="top" align="center">B</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Valid cases</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="center">58.5</td>
<td valign="top" align="center">50.7</td>
<td valign="top" align="center">52.3</td>
<td valign="top" align="center">60.0</td>
<td valign="top" align="center">56.0</td>
<td valign="top" align="center">57.5</td>
<td valign="top" align="center">68.5</td>
<td valign="top" align="center">68.0</td>
<td valign="top" align="center">70.3</td>
<td valign="top" align="center">68.0</td>
<td valign="top" align="center">64.0</td>
<td valign="top" align="center">71.8</td>
</tr>
<tr>
<td valign="top" align="left">Standard error</td>
<td valign="top" align="center">4.37</td>
<td valign="top" align="center">5.11</td>
<td valign="top" align="center">4.11</td>
<td valign="top" align="center">3.97</td>
<td valign="top" align="center">5.59</td>
<td valign="top" align="center">3.55</td>
<td valign="top" align="center">4.43</td>
<td valign="top" align="center">4.90</td>
<td valign="top" align="center">3.45</td>
<td valign="top" align="center">3.74</td>
<td valign="top" align="center">5.76</td>
<td valign="top" align="center">3.06</td>
</tr>
<tr>
<td valign="top" align="left">Standard deviation</td>
<td valign="top" align="center">19.5</td>
<td valign="top" align="center">19.8</td>
<td valign="top" align="center">18.4</td>
<td valign="top" align="center">17.8</td>
<td valign="top" align="center">21.7</td>
<td valign="top" align="center">15.9</td>
<td valign="top" align="center">19.8</td>
<td valign="top" align="center">19.0</td>
<td valign="top" align="center">15.4</td>
<td valign="top" align="center">16.7</td>
<td valign="top" align="center">22.3</td>
<td valign="top" align="center">13.7</td>
</tr>
<tr>
<td valign="top" align="left">Minimum</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">40</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">50</td>
</tr>
<tr>
<td valign="top" align="left">Maximum</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">80</td>
<td valign="top" align="center">80</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">80</td>
<td valign="top" align="center">80</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">100</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>B, Basic; M1, capability-focused; M2, limitation-focused. T1 to T4, baseline, post-education, post-practice, post-drive.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<label>3.1.2</label>
<title>Education group effects (H1.2)</title>
<p>Overall trust did not differ by education group, <italic>F</italic><sub>(2,49)</sub> = 0.621, <italic>p</italic> &#x0003D; 0.541, <inline-formula><mml:math id="M9"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>015</mml:mn></mml:math></inline-formula> (<xref ref-type="table" rid="T4">Table 4</xref>). The Time &#x000D7; Group interaction was also non-significant, <italic>F</italic><sub>(3.388,83.010)</sub> = 0.802, <italic>p</italic> &#x0003D; 0.509, &#x003B7;<sup>2</sup> &#x0003D; 0.007 (<xref ref-type="table" rid="T3">Table 3</xref>), suggesting that educational framing did not lead to significant differences in how trust evolved over time. <bold>These results do not support H1.2</bold>, which predicted that educational framing would influence the development of trust across time (T1 to T4).</p>
</sec>
</sec>
<sec>
<label>3.2</label>
<title>Driver education and mind-wandering</title>
<p>This section addresses <bold>RQ2</bold> (mind-wandering) and evaluates <bold>H2</bold>, followed by <bold>RQ5</bold> (relationships) for <bold>H5.1</bold>.</p>
<p>After identifying the general pattern of trust development, we next explored whether the educational interventions affected mind-wandering during automation supervision.</p>
<sec>
<label>3.2.1</label>
<title>Education group effects on mind-wandering (H2)</title>
<p>A one-way ANOVA tested for differences in self-reported mind-wandering scores (MWS) at T4. The analysis showed no significant group differences, <italic>F</italic><sub>(2,52)</sub> = 1.571, <italic>p</italic> &#x0003D;.218, &#x003B7;<sup>2</sup> &#x0003D; 0.057 (<xref ref-type="table" rid="T6">Table 6</xref>). Descriptively, participants in the Method 2 condition reported higher mind-wandering (<italic>M</italic> &#x0003D; 38.75, <italic>SD</italic> &#x0003D; 23.39) than those in the Basic (<italic>M</italic> &#x0003D; 27.50, <italic>SD</italic> &#x0003D; 17.43) and Method 1 groups (<italic>M</italic> &#x0003D; 28.67, <italic>SD</italic> &#x0003D; 24.46; see <xref ref-type="table" rid="T7">Table 7</xref> and <xref ref-type="fig" rid="F8">Figure 8</xref>). This pattern contradicts the expectation that limitation-focused education would help reduce mind-wandering. <bold>These results do not support H2</bold>, which predicted that limitation-focused education would reduce mind-wandering compared with capability-focused and basic instruction.</p>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>One-way ANOVA results for mind-wandering across education groups.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Source</th>
<th valign="top" align="center">Sum of squares</th>
<th valign="top" align="center">df</th>
<th valign="top" align="center">Mean square</th>
<th valign="top" align="center"><italic>F</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
<th valign="top" align="center"><inline-formula><mml:math id="M10"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mtext>p</mml:mtext></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula></th>
<th valign="top" align="center">&#x003C9;<sup>2</sup></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Group</td>
<td valign="top" align="center">1482.462</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">741.231</td>
<td valign="top" align="center">1.571</td>
<td valign="top" align="center">0.218</td>
<td valign="top" align="center">0.057</td>
<td valign="top" align="center">0.020</td>
</tr>
<tr>
<td valign="top" align="left">Residuals</td>
<td valign="top" align="center">24,542.083</td>
<td valign="top" align="center">52</td>
<td valign="top" align="center">471.963</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
</tr></tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Descriptive statistics for mind-wandering (MWS) by education group.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Group</th>
<th valign="top" align="center"><italic>N</italic></th>
<th valign="top" align="center">Mean</th>
<th valign="top" align="center">SD</th>
<th valign="top" align="center">SE</th>
<th valign="top" align="center">Coefficient of variation</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">B</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">27.50</td>
<td valign="top" align="center">17.43</td>
<td valign="top" align="center">3.90</td>
<td valign="top" align="center">0.63</td>
</tr>
<tr>
<td valign="top" align="left">M1</td>
<td valign="top" align="center">15</td>
<td valign="top" align="center">28.67</td>
<td valign="top" align="center">24.46</td>
<td valign="top" align="center">6.32</td>
<td valign="top" align="center">0.85</td>
</tr>
<tr>
<td valign="top" align="left">M2</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">38.75</td>
<td valign="top" align="center">23.39</td>
<td valign="top" align="center">5.23</td>
<td valign="top" align="center">0.60</td>
</tr></tbody>
</table>
</table-wrap>
<fig position="float" id="F8">
<label>Figure 8</label>
<caption><p>Mind-wandering scores at T4 by educational group (basic, method 1, method 2).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0008.tif">
<alt-text content-type="machine-generated">Box plot showing percentage of mind-wandering for three education conditions: B, M1, and M2. M1 has the highest variability, M2 has higher median mind-wandering than B, and B has the lowest median.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>3.2.2</label>
<title>Trust development and mind-wandering relationship (H5.1)</title>
<p>A Pearson correlation was conducted to examine the relationship between trust change (T4 minus T1) and mind-wandering scores. The analysis showed a significant positive correlation, <italic>r</italic> &#x0003D; 0.296, <italic>p</italic> &#x0003D; 0.028, 95% CI [0.034, 0.521] (<xref ref-type="table" rid="T8">Table 8</xref>). <bold>These results support H5.1</bold>. Although the correlation was weak (<italic>r</italic> &#x0003D; 0.296), a statistically significant positive association was found, indicating that participants who showed larger trust gains also reported more frequent mind-wandering.</p>
<table-wrap position="float" id="T8">
<label>Table 8</label>
<caption><p>Correlation between trust change and mind-wandering (MWS).</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Statistic</th>
<th valign="top" align="center">Trust change &#x02194; MWS</th>
<th valign="top" align="center">Value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Pearson&#x00027;s <italic>r</italic></td>
<td/>
<td valign="top" align="center">0.296</td>
</tr>
<tr>
<td valign="top" align="left"><italic>p</italic>-value</td>
<td/>
<td valign="top" align="center">0.028</td>
</tr>
<tr>
<td valign="top" align="left">95% CI (lower)</td>
<td/>
<td valign="top" align="center">0.034</td>
</tr>
<tr>
<td valign="top" align="left">95% CI (upper)</td>
<td/>
<td valign="top" align="center">0.521</td>
</tr>
<tr>
<td valign="top" align="left">Fisher&#x00027;s <italic>z</italic></td>
<td/>
<td valign="top" align="center">0.305</td>
</tr>
<tr>
<td valign="top" align="left">SE (Fisher&#x00027;s <italic>z</italic>)</td>
<td/>
<td valign="top" align="center">0.139</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec>
<label>3.3</label>
<title>Gaze behavior and performance outcomes</title>
<p>This section addresses <bold>RQ3</bold> (visual scanning) and evaluates <bold>H3</bold>, then addresses <bold>RQ5</bold> for <bold>H5.2</bold>, and finally addresses <bold>RQ4</bold> (takeover performance) for <bold>H4</bold>.</p>
<sec>
<label>3.3.1</label>
<title>Education group effects on gaze metrics (pre-registered composites) (H3)</title>
<p>Descriptive statistics for the main gaze measures are shown in <xref ref-type="table" rid="T9">Table 9</xref>. To test whether educational framing influenced supervisory gaze, omnibus Kruskal-Wallis tests were conducted on the preregistered key dependent variables (KDVs): Recovery to Road Center (RRC) and Structured Scanning Index (SSI). As shown in <xref ref-type="table" rid="T10">Table 10</xref>, none of these measures differed significantly across groups (RRC: <italic>H</italic>(2) &#x0003D; 1.37, <italic>p</italic> &#x0003D;.505; SSI: <italic>H</italic>(2) &#x0003D; 3.10, <italic>p</italic> &#x0003D;.212). <bold>These results do not support H3</bold>.</p>
<table-wrap position="float" id="T9">
<label>Table 9</label>
<caption><p>Descriptive statistics for primary gaze outcomes and key derived static indicators.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Metric</th>
<th valign="top" align="center">B (mean &#x000B1; SD)</th>
<th valign="top" align="center">M1</th>
<th valign="top" align="center">M2</th>
<td valign="top" align="center"><italic>n</italic></td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Rear-view mirror &#x02192; Road center (prob.)</td>
<td valign="top" align="center">0.548 &#x000B1; 0.348</td>
<td valign="top" align="center">0.357 &#x000B1; 0.370</td>
<td valign="top" align="center">0.505 &#x000B1; 0.376</td>
<td valign="top" align="center">42</td>
</tr>
<tr>
<td valign="top" align="left">Recovery to Road center (RRC)</td>
<td valign="top" align="center">2.543 &#x000B1; 0.937</td>
<td valign="top" align="center">2.560 &#x000B1; 0.892</td>
<td valign="top" align="center">2.293 &#x000B1; 1.194</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Structured Scanning Index (SSI, <italic>z</italic>)</td>
<td valign="top" align="center">0.100 &#x000B1; 0.411</td>
<td valign="top" align="center">&#x02212;0.134 &#x000B1; 0.522</td>
<td valign="top" align="center">0.041 &#x000B1; 0.699</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Mean glance duration: Mirrors (s)</td>
<td valign="top" align="center">0.563 &#x000B1; 0.388</td>
<td valign="top" align="center">0.455 &#x000B1; 0.247</td>
<td valign="top" align="center">0.500 &#x000B1; 0.294</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Mean glance duration: Periphery (s)</td>
<td valign="top" align="center">0.406 &#x000B1; 0.141</td>
<td valign="top" align="center">0.459 &#x000B1; 0.168</td>
<td valign="top" align="center">0.458 &#x000B1; 0.286</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Mean glance rate: Periphery (Hz)</td>
<td valign="top" align="center">0.082 &#x000B1; 0.042</td>
<td valign="top" align="center">0.135 &#x000B1; 0.075</td>
<td valign="top" align="center">0.094 &#x000B1; 0.072</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Total glance time: Periphery (s)</td>
<td valign="top" align="center">1.865 &#x000B1; 0.941</td>
<td valign="top" align="center">3.209 &#x000B1; 1.704</td>
<td valign="top" align="center">1.963 &#x000B1; 1.470</td>
<td valign="top" align="center">47</td>
</tr>
<tr>
<td valign="top" align="left">Total glance time: Road center (s)</td>
<td valign="top" align="center">11.716 &#x000B1; 4.951</td>
<td valign="top" align="center">11.178 &#x000B1; 4.801</td>
<td valign="top" align="center">11.776 &#x000B1; 5.874</td>
<td valign="top" align="center">47</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>B, Basic; M1, capability-focused; M2, limitation-focused.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T10">
<label>Table 10</label>
<caption><p>Omnibus Kruskal-Wallis tests for pre-registered scanning dependent variables.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Metric</th>
<th valign="top" align="center">Test</th>
<th valign="top" align="center"><italic>H</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Recovery to Road center (RRC)</td>
<td valign="top" align="center">Kruskal-Wallis</td>
<td valign="top" align="center">1.367</td>
<td valign="top" align="center">0.505</td>
</tr>
<tr>
<td valign="top" align="left">Structured Scanning Index (SSI)</td>
<td valign="top" align="center">Kruskal-Wallis</td>
<td valign="top" align="center">3.102</td>
<td valign="top" align="center">0.212</td>
</tr></tbody>
</table>
</table-wrap>
<p>Within the static AOI measures (<xref ref-type="table" rid="T11">Table 11</xref>), two HMI-related metrics showed significant group differences after Benjamini&#x02013;Hochberg correction: HMI total glance time (TGT<sub>5</sub>), <italic>H</italic>(2) &#x0003D; 11.02, <italic>p</italic> &#x0003D;.004, <italic>q</italic><sub>BH</sub> &#x0003D;.036, and HMI mean glance duration (MGD<sub>5</sub>), <italic>H</italic>(2) &#x0003D; 11.28, <italic>p</italic> &#x0003D;.0036, <italic>q</italic><sub>BH</sub> &#x0003D;.032. Post hoc Dunn tests indicated higher values for Method 1 compared with both Basic and Method 2 groups (pairwise <italic>q</italic>&#x00027;s = 0.006&#x02013;0.015). HMI glance rate (MGR<sub>5</sub>) followed the same pattern but did not remain significant after FDR adjustment (<italic>p</italic> &#x0003D;.006, <italic>q</italic><sub>BH</sub> &#x0003D; 0.057).</p>
<table-wrap position="float" id="T11">
<label>Table 11</label>
<caption><p>HMI-centered static gaze metrics: omnibus Kruskal-Wallis tests and Benjamini-Hochberg-adjusted Dunn post hocs.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Metric</th>
<th valign="top" align="center"><italic>H</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
<th valign="top" align="center"><italic>q</italic><sub>BH</sub></th>
<th valign="top" align="center">B vs M1 <italic>q</italic></th>
<th valign="top" align="center">M1 vs M2 <italic>q</italic></th>
<th valign="top" align="center">B vs M2 <italic>q</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">TGT<sub>5</sub> (HMI total time)</td>
<td valign="top" align="center">11.02</td>
<td valign="top" align="center">0.004</td>
<td valign="top" align="center">0.036</td>
<td valign="top" align="center">0.006</td>
<td valign="top" align="center">0.015</td>
<td valign="top" align="center">0.718</td>
</tr>
<tr>
<td valign="top" align="left">MGD<sub>5</sub> (HMI mean glance duration)</td>
<td valign="top" align="center">11.28</td>
<td valign="top" align="center">0.004</td>
<td valign="top" align="center">0.032</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">0.985</td>
</tr>
<tr>
<td valign="top" align="left">MGR<sub>5</sub> (HMI glance rate)</td>
<td valign="top" align="center">10.14</td>
<td valign="top" align="center">0.006</td>
<td valign="top" align="center">0.057</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">0.023</td>
<td valign="top" align="center">0.621</td>
</tr></tbody>
</table>
</table-wrap>
<p>Exploratory transition analyses (<xref ref-type="table" rid="T12">Table 12</xref>) revealed several nominal trends (e.g., Right-periphery &#x02192; HMI <italic>p</italic> &#x0003D; 0.051), but none survived FDR correction within the transition family. Overall, preregistered scanning metrics showed no educational effects, although exploratory HMI measures suggested greater display monitoring for the Method 1 group in the scenario.</p>
<table-wrap position="float" id="T12">
<label>Table 12</label>
<caption><p>Exploratory transition probabilities with the smallest omnibus <italic>p</italic>-values.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Transition</th>
<th valign="top" align="center"><italic>n</italic></th>
<th valign="top" align="center"><italic>H</italic></th>
<th valign="top" align="center"><italic>p</italic> (uncorr.)</th>
<th valign="top" align="center"><italic>p</italic><sub>FDR</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Right periphery &#x02192; HMI</td>
<td valign="top" align="center">45</td>
<td valign="top" align="center">5.965</td>
<td valign="top" align="center">0.051</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" align="left">Left periphery &#x02192; HMI</td>
<td valign="top" align="center">31</td>
<td valign="top" align="center">5.198</td>
<td valign="top" align="center">0.074</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" align="left">Rear-view mirror &#x02192; HMI</td>
<td valign="top" align="center">42</td>
<td valign="top" align="center">4.993</td>
<td valign="top" align="center">0.082</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" align="left">Road center &#x02192; HMI</td>
<td valign="top" align="center">45</td>
<td valign="top" align="center">4.923</td>
<td valign="top" align="center">0.085</td>
<td valign="top" align="center">0.896</td>
</tr>
<tr>
<td valign="top" align="left">Right periphery &#x02192; Left periphery</td>
<td valign="top" align="center">45</td>
<td valign="top" align="center">4.736</td>
<td valign="top" align="center">0.094</td>
<td valign="top" align="center">0.787</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>3.3.2</label>
<title>Mind-wandering and gaze behavior relationship (H5.2)</title>
<p>More structured scanning was associated with lower mind-wandering. Participants with higher Recovery to Road Center (RRC) scores reported less mind-wandering (<xref ref-type="table" rid="T13">Table 13</xref>; Pearson <italic>r</italic> &#x0003D; &#x02212;0.329, <italic>p</italic> &#x0003D;.024, 95% CI [&#x02212;0.563, &#x02212;0.046]; Spearman &#x003C1; &#x0003D; &#x02212;0.367, <italic>p</italic> &#x0003D; 0.011). The Structured Scanning Index (SSI) showed a similar pattern (Pearson <italic>r</italic> &#x0003D; &#x02212;0.364, <italic>p</italic> &#x0003D; 0.012, 95% CI [&#x02212;0.590, &#x02212;0.086]; Spearman &#x003C1; &#x0003D; &#x02212;0.367, <italic>p</italic> &#x0003D; 0.011). Although these correlations were weak to modest in magnitude, both measures showed a consistent negative association with self-reported mind-wandering. Together, the results indicate that structured scanning accounted for roughly 11&#x02013;13 per cent of the variance in mind-wandering (<italic>r</italic><sup>2</sup>), <bold>supporting H5.2</bold>.</p>
<table-wrap position="float" id="T13">
<label>Table 13</label>
<caption><p>Correlations between structured scanning composites and mind-wandering (MWS) and system-recognized reaction time (SRRT).</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><italic>x</italic></th>
<th valign="top" align="center"><italic>y</italic></th>
<th valign="top" align="center"><italic>n</italic></th>
<th valign="top" align="center">Pearson <italic>r</italic></th>
<th valign="top" align="center"><italic>p</italic></th>
<th valign="top" align="center">95% CI</th>
<th valign="top" align="center">Spearman &#x003C1;</th>
<th valign="top" align="center"><italic>p</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">RRC</td>
<td valign="top" align="center">MWS</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">&#x02212;0.329</td>
<td valign="top" align="center">0.024</td>
<td valign="top" align="center">[&#x02212;0.563, &#x02212;0.046]</td>
<td valign="top" align="center">&#x02212;0.367</td>
<td valign="top" align="center">0.011</td>
</tr>
<tr>
<td valign="top" align="left">RRC</td>
<td valign="top" align="center">SRRT (s)</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">0.090</td>
<td valign="top" align="center">0.545</td>
<td valign="top" align="center">[&#x02212;0.200, 0.370]</td>
<td valign="top" align="center">&#x02212;0.160</td>
<td valign="top" align="center">0.287</td>
</tr>
<tr>
<td valign="top" align="left">SSI</td>
<td valign="top" align="center">MWS</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">&#x02212;0.364</td>
<td valign="top" align="center">0.012</td>
<td valign="top" align="center">[&#x02212;0.590, &#x02212;0.086]</td>
<td valign="top" align="center">&#x02212;0.367</td>
<td valign="top" align="center">0.011</td>
</tr>
<tr>
<td valign="top" align="left">SSI</td>
<td valign="top" align="center">SRRT (s)</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">0.110</td>
<td valign="top" align="center">0.480</td>
<td valign="top" align="center">[&#x02212;0.190, 0.380]</td>
<td valign="top" align="center">&#x02212;0.110</td>
<td valign="top" align="center">0.473</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>3.3.3</label>
<title>Takeover performance (H4)</title>
<p>System-recognized reaction times (SRRT) differed significantly across educational groups, <italic>H</italic>(2) &#x0003D; 7.898, <italic>p</italic> &#x0003D;.019, <inline-formula><mml:math id="M11"><mml:msubsup><mml:mrow><mml:mi>&#x003B5;</mml:mi></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textrm" mathvariant="normal">rank</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mo>.</mml:mo><mml:mn>152</mml:mn></mml:math></inline-formula>, 95% CI [0.024, 0.381] (<xref ref-type="table" rid="T14">Table 14</xref>). Median SRRTs were shortest for the Method 1 group (Mdn &#x0003D; 2.142 s), followed by Method 2 (Mdn &#x0003D; 2.770 s) and Basic groups (Mdn &#x0003D; 2.715 s; see <xref ref-type="table" rid="T15">Table 15</xref> and <xref ref-type="fig" rid="F9">Figure 9</xref>). Post hoc Dunn tests with Holm correction confirmed a significant difference between Method 1 and Method 2 (<italic>p</italic><sub>Holm</sub> &#x0003D;.015), with no other contrasts reaching significance (<xref ref-type="table" rid="T16">Table 16</xref>). <bold>These results do not support H4</bold>, which predicted shorter SRRTs for participants in the limitation-focused education condition.</p>
<table-wrap position="float" id="T14">
<label>Table 14</label>
<caption><p>Kruskal-Wallis omnibus test for SRRT across education groups.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" rowspan="2">Effect</th>
<th valign="top" align="center" rowspan="2"><italic>H</italic></th>
<th valign="top" align="center" rowspan="2">df</th>
<th valign="top" align="center" rowspan="2"><italic>p</italic></th>
<th valign="top" align="center" colspan="3">Rank &#x003B5;<sup>2</sup> (95% CI)</th>
</tr>
<tr>
<th valign="top" align="center">Estimate</th>
<th valign="top" align="center">Lower</th>
<th valign="top" align="center">Upper</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Group</td>
<td valign="top" align="center">7.898</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">0.019</td>
<td valign="top" align="center">0.152</td>
<td valign="top" align="center">0.024</td>
<td valign="top" align="center">0.381</td>
</tr></tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T15">
<label>Table 15</label>
<caption><p>Descriptive statistics for SRRT by education group.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Group</th>
<th valign="top" align="center"><italic>n</italic></th>
<th valign="top" align="center">Mean</th>
<th valign="top" align="center">SD</th>
<th valign="top" align="center">Median</th>
<th valign="top" align="center">Q1</th>
<th valign="top" align="center">Q3</th>
<th valign="top" align="center">Min</th>
<th valign="top" align="center">Max</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">B</td>
<td valign="top" align="center">18</td>
<td valign="top" align="center">3.651</td>
<td valign="top" align="center">4.837</td>
<td valign="top" align="center">2.715</td>
<td valign="top" align="center">2.107</td>
<td valign="top" align="center">2.893</td>
<td valign="top" align="center">1.550</td>
<td valign="top" align="center">22.90</td>
</tr>
<tr>
<td valign="top" align="left">M1</td>
<td valign="top" align="center">14</td>
<td valign="top" align="center">2.127</td>
<td valign="top" align="center">0.557</td>
<td valign="top" align="center">2.145</td>
<td valign="top" align="center">1.723</td>
<td valign="top" align="center">2.460</td>
<td valign="top" align="center">1.230</td>
<td valign="top" align="center">3.00</td>
</tr>
<tr>
<td valign="top" align="left">M2</td>
<td valign="top" align="center">21</td>
<td valign="top" align="center">2.861</td>
<td valign="top" align="center">0.729</td>
<td valign="top" align="center">2.770</td>
<td valign="top" align="center">2.430</td>
<td valign="top" align="center">3.130</td>
<td valign="top" align="center">1.770</td>
<td valign="top" align="center">5.02</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>B, Basic; M1, capability-focused; M2, limitation-focused. One high outlier (22.9 s) was retained.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="F9">
<label>Figure 9</label>
<caption><p>System-recognized reaction times (SRRT) by education group (basic, M1, M2).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcomp-08-1772813-g0009.tif">
<alt-text content-type="machine-generated">Box plot comparing reaction times across three education conditions labeled B, M1, and M2, with each group containing one outlier above the main distribution. Reaction times cluster between zero and five for all conditions.</alt-text>
</graphic>
</fig>
<table-wrap position="float" id="T16">
<label>Table 16</label>
<caption><p>Dunn <italic>post hoc</italic> pairwise comparisons for SRRT with Holm correction.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Comparison</th>
<th valign="top" align="center"><italic>z</italic></th>
<th valign="top" align="center"><italic>r</italic><sub><italic>rb</italic></sub></th>
<th valign="top" align="center"><italic>p</italic> (raw)</th>
<th valign="top" align="center"><italic>p</italic><sub>Holm</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">B vs M1</td>
<td valign="top" align="center">1.804</td>
<td valign="top" align="center">0.373</td>
<td valign="top" align="center">0.071</td>
<td valign="top" align="center">0.143</td>
</tr>
<tr>
<td valign="top" align="left">B vs M2</td>
<td valign="top" align="center">&#x02212;1.009</td>
<td valign="top" align="center">0.188</td>
<td valign="top" align="center">0.313</td>
<td valign="top" align="center">0.313</td>
</tr>
<tr>
<td valign="top" align="left">M1 vs M2</td>
<td valign="top" align="center">&#x02212;2.802</td>
<td valign="top" align="center">0.565</td>
<td valign="top" align="center">0.005</td>
<td valign="top" align="center">0.015</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>This study investigated whether educational interventions could calibrate trust and sustain supervisory attention during Level 2 driving automation. The results suggested an asymmetry: trust formation appeared to be primarily associated with direct exposure to automation performance, whereas educational framing was associated with differences in HMI monitoring and takeover readiness, while broader scanning structure remained unaffected. Exposure was the dominant factor shaping trust trajectories (H1.1&#x02013;H1.2), while capability-focused education was associated with differences in HMI monitoring and takeover readiness (H4). Trust growth was also associated with increased mind-wandering (H5.1), and mind-wandering correlated with less structured scanning (H5.2), linking subjective and behavioral markers of supervisory disengagement. Collectively, these findings help to clarify both the limitations and the potential, targeted benefits of pre-drive education for supporting safe human-automation interaction (<xref ref-type="table" rid="T17">Table 17</xref>).</p>
<table-wrap position="float" id="T17">
<label>Table 17</label>
<caption><p>Summary of hypothesis testing outcomes.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Hypothesis</th>
<th valign="top" align="left">Prediction</th>
<th valign="top" align="left">Result</th>
<th valign="top" align="center">Support</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">H1.1</td>
<td valign="top" align="left">Trust increases across T1 to T4 during Level 2 automation.</td>
<td valign="top" align="left">Significant main effect of Time; trust rose consistently across groups.</td>
<td valign="top" align="center">&#x025A1;</td>
</tr> <tr>
<td valign="top" align="left">H1.2</td>
<td valign="top" align="left">Educational framing influences trust trajectory across time.</td>
<td valign="top" align="left">No Group or Time by Group effects; trajectories were comparable.</td>
<td valign="top" align="center">&#x000D7;</td>
</tr> <tr>
<td valign="top" align="left">H2</td>
<td valign="top" align="left">Limitation-focused education reduces mind-wandering compared with other groups.</td>
<td valign="top" align="left">No significant group differences in mind-wandering.</td>
<td valign="top" align="center">&#x000D7;</td>
</tr> <tr>
<td valign="top" align="left">H3</td>
<td valign="top" align="left">Educational framing affects structured scanning (RRC, SSI).</td>
<td valign="top" align="left">No group differences in preregistered scanning composites.</td>
<td valign="top" align="center">&#x000D7;</td>
</tr> <tr>
<td valign="top" align="left">H4</td>
<td valign="top" align="left">Limitation-focused education produces the fastest takeover reactions.</td>
<td valign="top" align="left">Group differences observed, but Method 1 (capability-focused) was faster than Method 2 (limitation-focused); prediction not met.</td>
<td valign="top" align="center">&#x000D7;</td>
</tr> <tr>
<td valign="top" align="left">H5.1</td>
<td valign="top" align="left">Greater trust increase is associated with higher mind-wandering.</td>
<td valign="top" align="left">Weak but significant positive correlation (<italic>r</italic> &#x0003D; 0.296, <italic>p</italic> &#x0003D; 0.028).</td>
<td valign="top" align="center">&#x025A1;</td>
</tr>
<tr>
<td valign="top" align="left">H5.2</td>
<td valign="top" align="left">More structured scanning is linked to lower mind-wandering.</td>
<td valign="top" align="left">Weak to modest but significant negative correlations for RRC and SSI (<italic>r</italic> &#x0003D; &#x02212;0.329, <italic>p</italic> &#x0003D; 0.024; <italic>r</italic> &#x0003D; &#x02212;0.364, <italic>p</italic> &#x0003D; 0.012).</td>
<td valign="top" align="center">&#x025A1;</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>RRC, Recovery to Road Center; SSI, Structured Scanning Index.</p>
<p>&#x025A1; = Supported; &#x000D7; = Not supported.</p>
</table-wrap-foot>
</table-wrap>
<sec>
<label>4.1</label>
<title>Trust development is primarily exposure driven</title>
<p>Trust increased across all educational conditions as participants gained exposure to Level 2 automation, supporting H1.1. This pattern is consistent with earlier evidence that direct interaction with automation gradually aligns trust with actual system performance, even when users begin with different initial information (<xref ref-type="bibr" rid="B4">Beggiato and Krems, 2013</xref>; <xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>). Neither capability-focused nor limitation-focused framing appeared to alter this trajectory (H1.2), suggesting that direct exposure to the system&#x00027;s behavior outweighed prior instruction.</p>
<p>This pattern aligns with the CAUSE model (<xref ref-type="bibr" rid="B41">Rowan, 1991</xref>), which describes effective communication as a sequential process: establishing credibility, raising awareness, promoting understanding, achieving satisfaction, and supporting enactment. The brief instructional framings in this study were unlikely to complete this process; instead, participants appeared to calibrate trust through their direct observation of system reliability. In this context, performance feedback rather than message framing likely served as the main influence on trust development.</p>
<p>The positive correlation between trust growth and mind-wandering (H3) indicated a potential tension between confidence in the system and supervisory engagement. Participants who showed greater increases in trust also showed lower values on monitoring-related indicators, consistent with a trust-associated reduction in supervisory engagement. This pattern is consistent with Malleable Attentional Resources Theory (MART) (<xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>; <xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>), which proposes that attentional investment contracts under low task demand. The relationship was observed across educational conditions, suggesting that reduced monitoring may emerge generally as drivers gain experience with stable automation.</p>
</sec>
<sec>
<label>4.2</label>
<title>Capability-focused education and monitoring priorities</title>
<p>Although capability-focused education did not substantially alter trust trajectories, it appeared to yield two modest benefits (H8): greater attention to the HMI and shorter system-recognized reaction times during takeover. Participants who received knowledge-based (capability-focused) instruction monitored automation status displays more frequently and responded more promptly when manual control was required. These findings suggest that a conceptual understanding of system operation may have refined monitoring selectivity and readiness for authority transfer.</p>
<p>Within Endsley&#x00027;s situation-awareness framework (<xref ref-type="bibr" rid="B14">Endsley, 1995b</xref>), these improvements may reflect an increased ability to interpret what automation feedback signals about system state. Capability-focused instruction might have rendered HMI cues more meaningful and diagnostic, enabling participants to interpret them more efficiently without compromising roadway monitoring. The faster SRRT further suggests somewhat smoother coordination during manual resumption.</p>
<p>Hence, brief conceptual instruction may provide targeted benefits by sharpening monitoring priorities and readiness, even though it does not seem to mitigate trust-related vigilance loss. These effects illustrate how education and exposure can influence distinct aspects of supervision: exposure shaping trust and perceived monitoring demand, and education fine-tuning particular monitoring behaviors.</p>
</sec>
<sec>
<label>4.3</label>
<title>Limitation-focused education and its limited impact</title>
<p>Rule-based (limitation-oriented) education showed little measurable influence on trust, attention, or takeover performance. Several factors may explain this outcome. One possible interpretation is that emphasizing system limits did not translate into actionable monitoring strategies during a stable scenario, which may have limited observable behavioral effects. The instruction listed boundary conditions such as poor weather or degraded lane markings but offered little procedural guidance on how to detect or respond to them. Without actionable cues, participants had limited opportunity to apply these rules during the stable highway scenario.</p>
<p>The motivational tone may also have contributed. Emphasizing system fallibility might have led drivers to expect that failures would occur only under specific conditions, such as poor weather or unclear lane markings. Because the simulated drive presented no such cues, participants may have focused less on continuous monitoring and more on waiting for visible signs of malfunction. This conditional form of supervision differs from the general vigilance loss associated with growing trust, yet it can be viewed as another expression of trust miscalibration, where expectations about when the system might fail are narrower than its actual range of vulnerabilities (<xref ref-type="bibr" rid="B37">Parasuraman and Riley, 1997</xref>). Moreover, the drive remained within the automation&#x00027;s operational domain, so the limitation messages were not especially relevant. Under these conditions, participants may have regarded the Limitation-focused briefing as less meaningful, which could have limited its behavioral impact.</p>
</sec>
<sec>
<label>4.4</label>
<title>Educational framing and control modes</title>
<p>The Skills-Rules-Knowledge (SRK) framework (<xref ref-type="bibr" rid="B40">Rasmussen, 1983</xref>) provides a useful lens for understanding these differences. Limitation-focused instruction depends on stored &#x0201C;if-then&#x0201D; procedures, which apply only when matching cues arise. In this study, such cues were absent, and the learned rules were therefore unlikely to be triggered. Capability-focused education, by contrast, fosters conceptual models that apply across diverse conditions. Understanding how the automation senses and acts may have improved the perceived relevance of HMI information and facilitated faster recognition of control transitions. In Rasmussen&#x00027;s framework, capability-focused processing is required when operators face unfamiliar or unexpected situations for which no stored rules apply. Such conditions are analogous to automation takeovers, where drivers must interpret system behavior and re-establish manual control.</p>
</sec>
<sec>
<label>4.5</label>
<title>Integration with theoretical frameworks</title>
<p>These findings can be interpreted within the situation-awareness discrepancy model, &#x003B4; &#x0003D; &#x003B1;&#x02212;&#x003B1;&#x02032;, introduced in earlier work (<xref ref-type="bibr" rid="B8">Chouchane et al., 2022</xref>). Within this model, trust is likely to shape perceived monitoring demand (&#x003B1;). As trust increases, drivers may judge that less active supervision is necessary. This change reflects heightened confidence in automation reliability rather than an actual reduction in the monitoring required. The observed association between higher trust and mind-wandering supports this interpretation, indicating that lower perceived demand can promote attentional drift and weaker supervisory engagement.</p>
<p>Educational interventions, by contrast, may affect achieved monitoring (&#x003B1;&#x02032;) through partially independent pathways. Capability-focused education seemed to enhance certain components of monitoring, such as HMI attention and takeover readiness. These effects represent localized gains in &#x003B1;&#x02032;, without offsetting the overall decline in perceived demand. Consequently, brief instruction may reinforce specific monitoring habits but is unlikely to counter the wider vigilance contraction associated with rising trust.</p>
<p>Malleable Attentional Resources Theory (MART) (<xref ref-type="bibr" rid="B49">Young and Stanton, 2002</xref>; <xref ref-type="bibr" rid="B48">Warm et al., 2008</xref>) provides a complementary explanation for this contraction. Under stable, low-demand automation, attentional resources tend to narrow, leading to less structured and less frequent scanning between key visual areas. The observed correlation between mind-wandering and reduced scanning organization (lower SSI and RRC) aligns with this mechanism. The CAUSE model (<xref ref-type="bibr" rid="B41">Rowan, 1991</xref>) further clarifies why instructional messages had limited effect: effective risk communication depends on credible and sequenced engagement rather than isolated briefings. Finally, performance-based trust models (<xref ref-type="bibr" rid="B27">Lee and See, 2004</xref>; <xref ref-type="bibr" rid="B22">Hoff and Bashir, 2015</xref>) help explain why direct exposure to automation performance dominated trust development.</p>
<p>Taken together, these frameworks suggest that trust most plausibly influences perceived monitoring demand, while education can refine selective monitoring behaviors. The combination of rising trust and stable system performance therefore creates conditions in which drivers monitor less actively even though objective supervisory requirements remain unchanged.</p>
</sec>
<sec>
<label>4.6</label>
<title>Practical implications</title>
<p>The results suggest that brief pre-drive education has limited influence on trust trajectories, which develop mainly through direct exposure to automation performance. Verbal or written instructions, though valuable for setting initial expectations, appear insufficient to shape how trust evolves during actual use. Nonetheless, such education can enhance comprehension and promote more focused monitoring.</p>
<p>The modest benefits of capability-focused instruction indicate that more extensive, scenario-based training could produce broader effects. Programmes combining conceptual explanation with guided practice might help drivers encounter automation in varied conditions and near its operational limits, allowing better calibration of both trust and monitoring strategies. Real-time feedback from instructors could reinforce how and when to intervene, supporting balanced supervision.</p>
<p>The improvement in HMI monitoring following capability-focused instruction also points to the importance of interface transparency. Displays that provide meaningful cues, such as sensor coverage or automation confidence, can enhance monitoring, provided the information is interpretable and not overwhelming. Education and interface design may therefore work best when developed in tandem.</p>
<p>Finally, the link between scanning structure and mind-wandering highlights opportunities for driver-monitoring systems. Beyond tracking gaze-on-road duration, systems might assess how organized scanning patterns remain over time. Identifying reduced mirror-to-road transitions or increased randomness could enable early alerts before performance declines.</p>
</sec>
<sec>
<label>4.7</label>
<title>Limitations</title>
<p>Several factors limit interpretation and generalisability. First, between-group sample sizes were modest (<italic>n</italic> &#x0003D; 15&#x02013;20 per educational condition), which limits statistical power to detect small effects and interactions; null findings for group differences should therefore be interpreted cautiously. Second, the educational interventions were brief and delivered once; repeated or interactive training might yield stronger and more durable effects. Third, the simulated drive involved only one takeover event after 15 minutes of automation in stable conditions. Real driving includes greater variability in road, traffic, and weather conditions, which may influence both trust and attention. Fourth, participants were inexperienced with hands-on Level 2 automation use, and the findings therefore reflect early exposure rather than longer-term adaptation; future studies should examine whether educational benefits or trust-attention dynamics persist over extended use. Fifth, mind-wandering was measured retrospectively, which could have underestimated momentary fluctuations; incorporating concurrent or physiological measures would strengthen future assessments. Finally, the instructional materials represented one implementation of capability-focused and limitation-focused framing. Although the briefings differed in emphasis, we did not include a direct manipulation check to verify whether participants internalized the framings as intended. Given the shared informational content across the briefings, it is possible that overlap attenuated between-group differences in trust and gaze metrics, and effects should therefore be interpreted cautiously. Variations in content, delivery medium, or realism, as well as brief post-briefing checks of capability versus boundary understanding, would strengthen future validation of framing-based interventions.</p>
</sec>
<sec>
<label>4.8</label>
<title>Future directions</title>
<p>Future research should examine how educational and exposure factors interact over time and across contexts. Longitudinal studies could track trust and monitoring behavior over extended periods of real-world driving. Scenario-based training could identify how conceptual understanding and hands-on exposure combine to calibrate trust and attention effectively. Research in diverse driving environments, including urban and adverse-weather conditions, would clarify when limitation-focused education becomes most relevant. Finally, combining educational interventions with adaptive HMIs and monitoring systems could help sustain driver engagement.</p>
</sec>
<sec>
<label>4.9</label>
<title>Conclusions</title>
<p>Brief pre-drive education modulated specific monitoring behaviors but did not influence the trajectory of trust formation during Level 2 automation. Trust increased over time across all groups and was not affected by instructional framing, consistent with experience-driven adjustment to observed system performance. Capability-focused education was associated with greater attention to automation feedback and faster takeover responses compared with limitation-focused education, indicating that conceptual information may refine how drivers monitor system status without altering overall trust development.</p>
<p>Across participants, increases in trust were accompanied by patterns consistent with reduced structured monitoring, suggesting a potential trade-off between growing confidence and sustained supervisory engagement during routine automation. More structured gaze organization was associated with lower self-reported mind-wandering, supporting the interpretation of gaze metrics as behavioral proxies for attentional allocation rather than direct measures of attention.</p>
<p>These findings suggest the potential value of integrated approaches combining extended experiential training, transparent interface design, and gaze-based monitoring systems to support sustained supervisory engagement in partially automated driving.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s6">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Japan Automobile Research Institute, Research Ethics Committee. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>HC: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. YS: Methodology, Software, Writing &#x02013; review &#x00026; editing. KS: Funding acquisition, Project administration, Software, Writing &#x02013; review &#x00026; editing. GA: Conceptualization, Data curation, Funding acquisition, Investigation, Methodology, Project administration, Resources, Supervision, Writing &#x02013; review &#x00026; editing. MI: Conceptualization, Funding acquisition, Investigation, Methodology, Resources, Supervision, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<ack><title>Acknowledgments</title><p>The authors thank JARI Autonomous Driving Research Division members for the technical support and valuable discussions throughout the project development.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fcomp.2026.1772813/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fcomp.2026.1772813/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Abe</surname> <given-names>G.</given-names></name> <name><surname>Sato</surname> <given-names>K.</given-names></name> <name><surname>Itoh</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Driver trust in automated driving systems: The case of overtaking and passing</article-title>. <source>IEEE Trans. Human-Mach. Syst</source>. <volume>48</volume>, <fpage>85</fpage>&#x02013;<lpage>94</lpage>. doi: <pub-id pub-id-type="doi">10.1109/THMS.2017.2781619</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Bainbridge</surname> <given-names>L.</given-names></name></person-group> (<year>1983</year>). <article-title>&#x0201C;Ironies of automation,&#x0201D;</article-title> in <source>Analysis, Design and Evaluation of Man-Machine Systems</source> (<publisher-loc>London</publisher-loc>: <publisher-name>Elsevier</publisher-name>), <fpage>129</fpage>&#x02013;<lpage>135</lpage>.</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baldwin</surname> <given-names>C. L.</given-names></name> <name><surname>Roberts</surname> <given-names>D. M.</given-names></name> <name><surname>Barragan</surname> <given-names>D.</given-names></name> <name><surname>Lee</surname> <given-names>J. D.</given-names></name> <name><surname>Lerner</surname> <given-names>N.</given-names></name> <name><surname>Higgins</surname> <given-names>J. S.</given-names></name></person-group> (<year>2017</year>). <article-title>Detecting and quantifying mind wandering during simulated driving</article-title>. <source>Front. Hum. Neurosci</source>. <volume>11</volume>:<fpage>406</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2017.00406</pub-id><pub-id pub-id-type="pmid">28848414</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Beggiato</surname> <given-names>M.</given-names></name> <name><surname>Krems</surname> <given-names>J. F.</given-names></name></person-group> (<year>2013</year>). <article-title>The evolution of mental model, trust and acceptance of adaptive cruise control in relation to initial information</article-title>. <source>Transport. Res. Part F: Traffic Psychol. Behav</source>. <volume>18</volume>, <fpage>47</fpage>&#x02013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.trf.2012.12.006</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Benjamini</surname> <given-names>Y.</given-names></name> <name><surname>Hochberg</surname> <given-names>Y.</given-names></name></person-group> (<year>1995</year>). <article-title>Controlling the false discovery rate: a practical and powerful approach to multiple testing</article-title>. <source>J. Royal Statist. Soc.: Series B</source>. <volume>57</volume>, <fpage>289</fpage>&#x02013;<lpage>300</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.2517-6161.1995.tb02031.x</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Campbell</surname> <given-names>J. L.</given-names></name> <name><surname>Brown</surname> <given-names>J. L.</given-names></name> <name><surname>Graving</surname> <given-names>J. S.</given-names></name> <name><surname>Richard</surname> <given-names>C. M.</given-names></name> <name><surname>Lichty</surname> <given-names>M. G.</given-names></name> <name><surname>Bacon</surname> <given-names>L. P.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>&#x0201C;Human factors design guidance for level 2 and level 3 automated driving concepts,&#x0201D;</article-title> in <source>Technical Report</source>.</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chouchane</surname> <given-names>H.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Sakamura</surname> <given-names>Y.</given-names></name> <name><surname>Nakamura</surname> <given-names>H.</given-names></name> <name><surname>Abe</surname> <given-names>G.</given-names></name> <name><surname>Itoh</surname> <given-names>M.</given-names></name></person-group> (<year>2026</year>). <article-title>Supervisory gaze behaviour under different automation durations in level 2 driving: a first-order transition analysis</article-title>. <source>Appl. Sci</source>. <volume>16</volume>:<fpage>1401</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app16031401</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Chouchane</surname> <given-names>H.</given-names></name> <name><surname>Nakamura</surname> <given-names>H.</given-names></name> <name><surname>Sato</surname> <given-names>K.</given-names></name> <name><surname>Antona-Makoshi</surname> <given-names>J.</given-names></name> <name><surname>Abe</surname> <given-names>G.</given-names></name> <name><surname>Itoh</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Identifying the out of the loop phenomenon during driving automation using spontaneous gaze behavior,&#x0201D;</article-title> in <source>Proceedings of the Human Factors and Ergonomics Society Annual Meeting</source> (<publisher-loc>Los Angeles, CA</publisher-loc>: <publisher-name>SAGE Publications</publisher-name>), <fpage>305</fpage>&#x02013;<lpage>309</lpage>.</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cooper</surname> <given-names>J. M.</given-names></name> <name><surname>Crabtree</surname> <given-names>K. W.</given-names></name> <name><surname>McDonnell</surname> <given-names>A. S.</given-names></name> <name><surname>May</surname> <given-names>D.</given-names></name> <name><surname>Strayer</surname> <given-names>S. C.</given-names></name> <name><surname>Tsogtbaatar</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Driver behavior while using level 2 vehicle automation: a hybrid naturalistic study</article-title>. <source>Cognit. Res.: Princ. Implicat</source>. <volume>8</volume>:<fpage>71</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41235-023-00527-5</pub-id><pub-id pub-id-type="pmid">38117387</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>DeGuzman</surname> <given-names>C. A.</given-names></name> <name><surname>Donmez</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>Drivers still have limited knowledge about adaptive cruise control even when they own the system</article-title>. <source>Transp. Res. Rec</source>. <volume>2675</volume>, <fpage>328</fpage>&#x02013;<lpage>339</lpage>. doi: <pub-id pub-id-type="doi">10.1177/03611981211011482</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>DeGuzman</surname> <given-names>C. A.</given-names></name> <name><surname>Hopkins</surname> <given-names>S. A.</given-names></name> <name><surname>Donmez</surname> <given-names>B.</given-names></name></person-group> (<year>2020</year>). <article-title>Driver takeover performance and monitoring behavior with driving automation at system-limit versus system-malfunction failures</article-title>. <source>Transp. Res. Rec</source>. <volume>2674</volume>, <fpage>140</fpage>&#x02013;<lpage>151</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0361198120912228</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Endsley</surname> <given-names>M. R.</given-names></name></person-group> (<year>1988</year>). <article-title>&#x0201C;Design and evaluation for situation awareness enhancement,&#x0201D;</article-title> in <source>Proc. Human Fact. Soc. Annual Meet</source>. <volume>32</volume>, <fpage>97</fpage>&#x02013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1177/154193128803200221</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Endsley</surname> <given-names>M. R.</given-names></name></person-group> (<year>1995a</year>). <article-title>Measurement of situation awareness in dynamic systems</article-title>. <source>Hum. Factors</source> <volume>37</volume>, <fpage>65</fpage>&#x02013;<lpage>84</lpage>. doi: <pub-id pub-id-type="doi">10.1518/001872095779049499</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Endsley</surname> <given-names>M. R.</given-names></name></person-group> (<year>1995b</year>). <article-title>Toward a theory of situation awareness in dynamic systems</article-title>. <source>Hum. Factors</source> <volume>37</volume>, <fpage>32</fpage>&#x02013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.1518/001872095779049543</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Eriksson</surname> <given-names>A.</given-names></name> <name><surname>Stanton</surname> <given-names>N. A.</given-names></name></person-group> (<year>2017</year>). <article-title>Takeover time in highly automated vehicles: noncritical transitions to and from manual control</article-title>. <source>Hum. Factors</source> <volume>59</volume>, <fpage>689</fpage>&#x02013;<lpage>705</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0018720816685832</pub-id><pub-id pub-id-type="pmid">28124573</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>C.</given-names></name> <name><surname>Damb&#x000F6;ck</surname> <given-names>D.</given-names></name> <name><surname>Lorenz</surname> <given-names>L.</given-names></name> <name><surname>Bengler</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). &#x0201C;&#x0201C;take over!&#x0201D; how long does it take to get the driver back into the loop?,&#x0201D; in <italic>Proceedings of the Human Factors and Ergonomics Society Annual Meeting</italic> (Los Angeles, CA: Sage Publications), <fpage>1938</fpage>&#x02013;<lpage>1942</lpage>.</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>C.</given-names></name> <name><surname>K&#x000F6;rber</surname> <given-names>M.</given-names></name> <name><surname>Hohenberger</surname> <given-names>C.</given-names></name> <name><surname>Lechner</surname> <given-names>D.</given-names></name> <name><surname>Bengler</surname> <given-names>K.</given-names></name></person-group> (<year>2015</year>). <article-title>Trust in automation-before and after the experience of take-over scenarios in a highly automated vehicle</article-title>. <source>Procedia Manufact</source>. <volume>3</volume>, <fpage>3025</fpage>&#x02013;<lpage>3032</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.promfg.2015.07.847</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Greenlee</surname> <given-names>E. T.</given-names></name> <name><surname>DeLucia</surname> <given-names>P. R.</given-names></name> <name><surname>Newton</surname> <given-names>D. C.</given-names></name></person-group> (<year>2018</year>). <article-title>Driver vigilance in automated vehicles: Hazard detection failures are a matter of time</article-title>. <source>Hum. Factors</source> <volume>60</volume>, <fpage>465</fpage>&#x02013;<lpage>476</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0018720818761711</pub-id><pub-id pub-id-type="pmid">29513611</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Harms</surname> <given-names>I. M.</given-names></name> <name><surname>Bingen</surname> <given-names>L.</given-names></name> <name><surname>Steffens</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Addressing the awareness gap: a combined survey and vehicle registration analysis to assess car owners&#x00027; usage of adas in fleets</article-title>. <source>Transport. Res. Part A: Policy Pract</source>. <volume>134</volume>, <fpage>65</fpage>&#x02013;<lpage>77</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tra.2020.01.018</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Becic</surname> <given-names>E.</given-names></name> <name><surname>Lee</surname> <given-names>Y.-C.</given-names></name> <name><surname>McCarley</surname> <given-names>J. S.</given-names></name></person-group> (<year>2011</year>). <article-title>Mind wandering behind the wheel: Performance and oculomotor correlates</article-title>. <source>Hum. Factors</source> <volume>53</volume>, <fpage>13</fpage>&#x02013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0018720810391530</pub-id><pub-id pub-id-type="pmid">21469530</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hergeth</surname> <given-names>S.</given-names></name> <name><surname>Lorenz</surname> <given-names>L.</given-names></name> <name><surname>Vilimek</surname> <given-names>R.</given-names></name> <name><surname>Krems</surname> <given-names>J. F.</given-names></name></person-group> (<year>2016</year>). <article-title>Keep your scanners peeled: Gaze behavior as a measure of automation trust during highly automated driving</article-title>. <source>Hum. Factors</source> <volume>58</volume>, <fpage>509</fpage>&#x02013;<lpage>519</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0018720815625744</pub-id><pub-id pub-id-type="pmid">26843570</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hoff</surname> <given-names>K. A.</given-names></name> <name><surname>Bashir</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Trust in automation: Integrating empirical evidence on factors that influence trust</article-title>. <source>Hum. Factors</source> <volume>57</volume>, <fpage>407</fpage>&#x02013;<lpage>434</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0018720814547570</pub-id><pub-id pub-id-type="pmid">25875432</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Inagaki</surname> <given-names>T.</given-names></name> <name><surname>Itoh</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Human&#x00027;s overtrust in and overreliance on advanced driver assistance systems: a theoretical framework</article-title>. <source>Int. J. Vehicular Technol</source>. <volume>2013</volume>:<fpage>951762</fpage>. doi: <pub-id pub-id-type="doi">10.1155/2013/951762</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khastgir</surname> <given-names>S.</given-names></name> <name><surname>Birrell</surname> <given-names>S.</given-names></name> <name><surname>Dhadyalla</surname> <given-names>G.</given-names></name> <name><surname>Jennings</surname> <given-names>P.</given-names></name></person-group> (<year>2018</year>). <article-title>Calibrating trust through knowledge: Introducing the concept of informed safety for automation in vehicles</article-title>. <source>Transport. Res. Part C: Emerg. Technol</source>. <volume>96</volume>, <fpage>290</fpage>&#x02013;<lpage>303</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.trc.2018.07.001</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x000F6;rber</surname> <given-names>M.</given-names></name> <name><surname>Baseler</surname> <given-names>E.</given-names></name> <name><surname>Bengler</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>Introduction matters: manipulating trust in automation and reliance in automated driving</article-title>. <source>Appl. Ergon</source>. <volume>66</volume>, <fpage>18</fpage>&#x02013;<lpage>31</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.apergo.2017.07.006</pub-id><pub-id pub-id-type="pmid">28958427</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Moray</surname> <given-names>N.</given-names></name></person-group> (<year>1992</year>). <article-title>Trust, control strategies and allocation of function in human-machine systems</article-title>. <source>Ergonomics</source> <volume>35</volume>, <fpage>1243</fpage>&#x02013;<lpage>1270</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00140139208967392</pub-id><pub-id pub-id-type="pmid">1516577</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>J. D.</given-names></name> <name><surname>See</surname> <given-names>K. A.</given-names></name></person-group> (<year>2004</year>). <article-title>Trust in automation: Designing for appropriate reliance</article-title>. <source>Hum. Factors</source> <volume>46</volume>, <fpage>50</fpage>&#x02013;<lpage>80</lpage>. doi: <pub-id pub-id-type="doi">10.1518/hfes.46.1.50.30392</pub-id><pub-id pub-id-type="pmid">15151155</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Louw</surname> <given-names>T.</given-names></name> <name><surname>Merat</surname> <given-names>N.</given-names></name></person-group> (<year>2017</year>). <article-title>Are you in the loop? Using gaze dispersion to understand driver visual attention during vehicle automation</article-title>. <source>Transport. Res. Part C: Emerg. Technol</source>. <volume>76</volume>, <fpage>35</fpage>&#x02013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.trc.2017.01.001</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>Z.</given-names></name> <name><surname>Coster</surname> <given-names>X.</given-names></name> <name><surname>de Winter</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>How much time do drivers need to obtain situation awareness? A laboratory-based study of automated driving</article-title>. <source>Appl. Ergon</source>. <volume>60</volume>, <fpage>293</fpage>&#x02013;<lpage>304</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.apergo.2016.12.003</pub-id><pub-id pub-id-type="pmid">28166888</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mars</surname> <given-names>F.</given-names></name> <name><surname>Deroo</surname> <given-names>M.</given-names></name> <name><surname>Charron</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). Driver adaptation to haptic shared control of the steering wheel. In 2014 ieee international conference on systems, man, and cybernetics (SMC) (San Diego, CA: IEEE), <fpage>1505</fpage>&#x02013;<lpage>1509</lpage>.</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Merat</surname> <given-names>N.</given-names></name> <name><surname>Jamson</surname> <given-names>A. H.</given-names></name> <name><surname>Lai</surname> <given-names>F. C.</given-names></name> <name><surname>Daly</surname> <given-names>M.</given-names></name> <name><surname>Carsten</surname> <given-names>O. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Transition to manual: Driver behaviour when resuming control from a highly automated vehicle</article-title>. <source>Transport. Res. Part F: Traffic Psychol. Behav</source>. <volume>27</volume>, <fpage>274</fpage>&#x02013;<lpage>282</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.trf.2014.09.005</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Merat</surname> <given-names>N.</given-names></name> <name><surname>Seppelt</surname> <given-names>B.</given-names></name> <name><surname>Louw</surname> <given-names>T.</given-names></name> <name><surname>Engstr&#x000F6;m</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J. D.</given-names></name> <name><surname>Johansson</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The &#x0201C;out-of-the-loop&#x0201D; concept in automated driving: proposed definition, measures and implications</article-title>. <source>Cognit. Technol. Work</source> <volume>21</volume>, <fpage>87</fpage>&#x02013;<lpage>98</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10111-018-0525-8</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Merritt</surname> <given-names>S. M.</given-names></name> <name><surname>Ilgen</surname> <given-names>D. R.</given-names></name></person-group> (<year>2008</year>). <article-title>Not all trust is created equal: dispositional and history-based trust in human-automation interactions</article-title>. <source>Hum. Factors</source> <volume>50</volume>, <fpage>194</fpage>&#x02013;<lpage>210</lpage>. doi: <pub-id pub-id-type="doi">10.1518/001872008X288574</pub-id><pub-id pub-id-type="pmid">18516832</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muir</surname> <given-names>V.</given-names></name></person-group> (<year>1996</year>). <article-title>Trust in automation: part II. Theoretical issues in the study of trust and human intervention in automation systems</article-title>. <source>Ergonomics</source> <volume>39</volume>, <fpage>429</fpage>&#x02013;<lpage>460</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00140139608964474</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oviedo-Trespalacios</surname> <given-names>O.</given-names></name> <name><surname>Tichon</surname> <given-names>J.</given-names></name> <name><surname>Briant</surname> <given-names>O.</given-names></name></person-group> (<year>2021</year>). <article-title>Is a flick-through enough? A content analysis of advanced driver assistance systems (adas) user manuals</article-title>. <source>PLoS ONE</source> <volume>16</volume>:<fpage>e0252688</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0252688</pub-id><pub-id pub-id-type="pmid">34138889</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parasuraman</surname> <given-names>R.</given-names></name> <name><surname>Molloy</surname> <given-names>R.</given-names></name> <name><surname>Singh</surname> <given-names>I. L.</given-names></name></person-group> (<year>1993</year>). <article-title>Performance consequences of automation-induced &#x00027;complacency&#x00027;</article-title>. <source>Int. J. Aviat. Psychol</source>. <volume>3</volume>, <fpage>1</fpage>&#x02013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1207/s15327108ijap0301_1</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parasuraman</surname> <given-names>R.</given-names></name> <name><surname>Riley</surname> <given-names>V.</given-names></name></person-group> (<year>1997</year>). <article-title>Humans and automation: use, misuse, disuse, abuse</article-title>. <source>Hum. Factors</source> <volume>39</volume>, <fpage>230</fpage>&#x02013;<lpage>253</lpage>. doi: <pub-id pub-id-type="doi">10.1518/001872097778543886</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Price</surname> <given-names>M.</given-names></name> <name><surname>Lee</surname> <given-names>J. D.</given-names></name> <name><surname>Dinparastdjadid</surname> <given-names>A.</given-names></name> <name><surname>Toyoda</surname> <given-names>H.</given-names></name> <name><surname>Domeyer</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Effect of automation instructions and vehicle control algorithms on eye behavior in highly automated vehicles</article-title>. <source>Int. J. Automot. Eng</source>. <volume>10</volume>, <fpage>73</fpage>&#x02013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.20485/jsaeijae.10.1_73</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rabiner</surname> <given-names>L. R.</given-names></name> <name><surname>Juang</surname> <given-names>B.</given-names></name></person-group> (<year>1986</year>). <article-title>A tutorial on hidden markov models</article-title>. <source>IEEE ASSp Magazine</source> <volume>3</volume>, <fpage>4</fpage>&#x02013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1109/MASSP.1986.1165342</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rasmussen</surname> <given-names>J.</given-names></name></person-group> (<year>1983</year>). <article-title>Skills, rules, and knowledge; signals, signs and symbols, and other distinction in human performance models</article-title>. <source>IEEE Trans. Syst. Man Cybernet</source>. <volume>13</volume>, <fpage>61</fpage>&#x02013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TSMC.1983.6313160</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rowan</surname> <given-names>K. E.</given-names></name></person-group> (<year>1991</year>). <article-title>Goals, obstacles, and strategies in risk communication: a problem-solving approach to improving communication about risks</article-title>. <source>J. Appl. Commun. Res</source>. <volume>19</volume>, <fpage>300</fpage>&#x02013;<lpage>329</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00909889109365311</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="book"><collab>SAE International</collab> (<year>2021</year>). <article-title>&#x0201C;Taxonomy and definitions for terms related to driving automation systems for on-road motor vehicles,&#x0201D;</article-title> in <source>Technical Report J3016_202104</source>. <publisher-loc>Warrendale, PA</publisher-loc>: <publisher-name>SAE International</publisher-name>.</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schnebelen</surname> <given-names>D.</given-names></name> <name><surname>Charron</surname> <given-names>C.</given-names></name> <name><surname>Mars</surname> <given-names>F.</given-names></name></person-group> (<year>2020</year>). <article-title>Estimating the out-of-the-loop phenomenon from visual strategies during highly automated driving</article-title>. <source>Accid. Analy. Prevent</source>. <volume>148</volume>:<fpage>105776</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aap.2020.105776</pub-id><pub-id pub-id-type="pmid">33039817</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smallwood</surname> <given-names>J.</given-names></name> <name><surname>Schooler</surname> <given-names>J. W.</given-names></name></person-group> (<year>2015</year>). <article-title>The science of mind wandering: Empirically navigating the stream of consciousness</article-title>. <source>Annu. Rev. Psychol</source>. <volume>66</volume>, <fpage>487</fpage>&#x02013;<lpage>518</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-psych-010814-015331</pub-id><pub-id pub-id-type="pmid">25293689</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Sweller</surname> <given-names>J.</given-names></name> <name><surname>Ayres</surname> <given-names>P.</given-names></name> <name><surname>Kalyuga</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <source>Cognitive Load Theory</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>.</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Underwood</surname> <given-names>G.</given-names></name> <name><surname>Chapman</surname> <given-names>P.</given-names></name> <name><surname>Brocklehurst</surname> <given-names>N.</given-names></name> <name><surname>Underwood</surname> <given-names>J.</given-names></name> <name><surname>Crundall</surname> <given-names>D.</given-names></name></person-group> (<year>2003</year>). <article-title>Visual attention while driving: sequences of eye fixations made by experienced and novice drivers</article-title>. <source>Ergonomics</source> <volume>46</volume>, <fpage>629</fpage>&#x02013;<lpage>646</lpage>. doi: <pub-id pub-id-type="doi">10.1080/0014013031000090116</pub-id><pub-id pub-id-type="pmid">12745692</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Walker</surname> <given-names>F.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Martens</surname> <given-names>M. H.</given-names></name> <name><surname>Verwey</surname> <given-names>W. B.</given-names></name></person-group> (<year>2019</year>). <article-title>Gaze behaviour and electrodermal activity: Objective measures of drivers&#x00027; trust in automated vehicles</article-title>. <source>Transport. Res. Part F: Traffic Psychol. Behav</source>. <volume>64</volume>, <fpage>401</fpage>&#x02013;<lpage>412</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.trf.2019.05.021</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Warm</surname> <given-names>J. S.</given-names></name> <name><surname>Parasuraman</surname> <given-names>R.</given-names></name> <name><surname>Matthews</surname> <given-names>G.</given-names></name></person-group> (<year>2008</year>). <article-title>Vigilance requires hard mental work and is stressful</article-title>. <source>Hum. Factors</source> <volume>50</volume>, <fpage>433</fpage>&#x02013;<lpage>441</lpage>. doi: <pub-id pub-id-type="doi">10.1518/001872008X312152</pub-id><pub-id pub-id-type="pmid">18689050</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Young</surname> <given-names>M. S.</given-names></name> <name><surname>Stanton</surname> <given-names>N. A.</given-names></name></person-group> (<year>2002</year>). <article-title>Malleable attentional resources theory: a new explanation for the effects of mental underload on performance</article-title>. <source>Hum. Factors</source> <volume>44</volume>, <fpage>365</fpage>&#x02013;<lpage>375</lpage>. doi: <pub-id pub-id-type="doi">10.1518/0018720024497709</pub-id><pub-id pub-id-type="pmid">12502155</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/179827/overview">Andrej Ko&#x00161;ir</ext-link>, University of Ljubljana, Slovenia</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2647683/overview">Liza Dixon</ext-link>, University of Ulm, Germany</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3132457/overview">Jinzhen Dou</ext-link>, Southeast University, China</p>
</fn>
</fn-group>
</back>
</article>