<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">648529</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2021.648529</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Effects of Sensory Feedback and Collider Size on Reach-to-Grasp Coordination in Haptic-Free Virtual Reality</article-title>
<alt-title alt-title-type="left-running-head">Furmanek et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Effects of Sensory Feedback</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Furmanek</surname>
<given-names>Mariusz P.</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/385205/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mangalam</surname>
<given-names>Madhur</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/728800/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lockwood</surname>
<given-names>Kyle</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1428642/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Smith</surname>
<given-names>Andrea</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1428653/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yarossi</surname>
<given-names>Mathew</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/375287/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Tunik</surname>
<given-names>Eugene</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/339357/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Department of Physical Therapy, Movement and Rehabilitation Sciences, Northeastern University, <addr-line>Boston</addr-line>, <addr-line>MA</addr-line>, <country>United&#x20;States</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>Institute of Sport Sciences, Academy of Physical Education, <addr-line>Katowice</addr-line>, <country>Poland</country>
</aff>
<aff id="aff3">
<label>
<sup>3</sup>
</label>Department of Electrical and Computer Engineering, Northeastern University, <addr-line>Boston</addr-line>, <addr-line>MA</addr-line>, <country>United&#x20;States</country>
</aff>
<aff id="aff4">
<label>
<sup>4</sup>
</label>Department of Bioengineering, Northeastern University, <addr-line>Boston</addr-line>, <addr-line>MA</addr-line>, <country>United&#x20;States</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/815574/overview">Maxime T. Robert</ext-link>, Laval University, Canada</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1046240/overview">Mary C. Whitton</ext-link>, University of North Carolina at Chapel Hill, United&#x20;States</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/686512/overview">Nikita Aleksandrovich Kuznetsov</ext-link>, Louisiana State University, United&#x20;States</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Mariusz P. Furmanek, <email>m.furmanek@northeastern.edu</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Virtual Reality and Human Behaviour, a section of the journal Frontiers in Virtual Reality</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>08</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>2</volume>
<elocation-id>648529</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>12</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>05</day>
<month>07</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Furmanek, Mangalam, Lockwood, Smith, Yarossi and Tunik.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Furmanek, Mangalam, Lockwood, Smith, Yarossi and Tunik</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>Technological advancements and increased access have prompted the adoption of head- mounted display based virtual reality (VR) for neuroscientific research, manual skill training, and neurological rehabilitation. Applications that focus on manual interaction within the virtual environment (VE), especially haptic-free VR, critically depend on virtual hand-object collision detection. Knowledge about how multisensory integration related to hand-object collisions affects perception-action dynamics and reach-to-grasp coordination is needed to enhance the immersiveness of interactive VR. Here, we explored whether and to what extent sensory substitution for haptic feedback of hand-object collision (visual, audio, or audiovisual) and collider size (size of spherical pointers representing the fingertips) influences reach-to-grasp kinematics. In Study 1, visual, auditory, or combined feedback were compared as sensory substitutes to indicate the successful grasp of a virtual object during reach-to-grasp actions. In Study 2, participants reached to grasp virtual objects using spherical colliders of different diameters to test if virtual collider size impacts reach-to-grasp. Our data indicate that collider size but not sensory feedback modality significantly affected the kinematics of grasping. Larger colliders led to a smaller size-normalized peak aperture. We discuss this finding in the context of a possible influence of spherical collider size on the perception of the virtual object&#x2019;s size and hence effects on motor planning of reach-to-grasp. Critically, reach-to-grasp spatiotemporal coordination patterns were robust to manipulations of sensory feedback modality and spherical collider size, suggesting that the nervous system adjusted the reach (transport) component commensurately to the changes in the grasp (aperture) component. These results have important implications for research, commercial, industrial, and clinical applications of&#x20;VR.</p>
</abstract>
<kwd-group>
<kwd>visual feedback</kwd>
<kwd>auditory feedback</kwd>
<kwd>haptic feedback</kwd>
<kwd>collision detection</kwd>
<kwd>prehension</kwd>
<kwd>virtual environment</kwd>
<kwd>virtual reality</kwd>
</kwd-group>
<contract-num rid="cn001">2R01NS085122 2R01HD058301</contract-num>
<contract-num rid="cn002">1804550</contract-num>
<contract-num rid="cn003">1935337</contract-num>
<contract-sponsor id="cn001">National Institutes of Health<named-content content-type="fundref-id">10.13039/100000002</named-content>
</contract-sponsor>
<contract-sponsor id="cn002">Division of Chemical, Bioengineering, Environmental, and Transport Systems<named-content content-type="fundref-id">10.13039/100000146</named-content>
</contract-sponsor>
<contract-sponsor id="cn003">Division of Civil, Mechanical and Manufacturing Innovation<named-content content-type="fundref-id">10.13039/100000147</named-content>
</contract-sponsor>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Natural hand-object interactions are critical for a fully immersive virtual reality (VR) experience. In the real world, reach-to-grasp coordination is facilitated by congruent visual and proprioceptive feedback of limb position and orientation and haptic feedback of object properties (<xref ref-type="bibr" rid="B7">Bingham et&#x20;al., 2007</xref>; <xref ref-type="bibr" rid="B15">Coats et&#x20;al., 2008</xref>; <xref ref-type="bibr" rid="B8">Bingham and Mon-Williams, 2013</xref>; <xref ref-type="bibr" rid="B10">Bozzacchi et&#x20;al., 2014</xref>; <xref ref-type="bibr" rid="B70">Whitwell et&#x20;al., 2015</xref>; <xref ref-type="bibr" rid="B11">Bozzacchi et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B29">Hosang et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B66">Volcic and Domini, 2016</xref>; <xref ref-type="bibr" rid="B9">Bozzacchi et&#x20;al., 2018</xref>). In virtual environments (VE), visual feedback of the avatar hand may be incongruent with proprioceptive feedback from the biological hand. This discrepancy can arise from technological limitations (e.g., latency, rendering speed, and tracking accuracy) related to how the scene is calibrated (<xref ref-type="bibr" rid="B62">Stanney, 2002</xref>) or how the VR task is manipulated (<xref ref-type="bibr" rid="B25">Groen and Werkhoven, 1998</xref>; <xref ref-type="bibr" rid="B51">Prachyabrued and Borst, 2013</xref>). Moreover, the virtual representation of the limb may be distorted in appearance (<xref ref-type="bibr" rid="B3">Argelaguet et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B35">Liu et&#x20;al., 2019</xref>) in a similar manner to the use of a cursor to represent hand position in traditional computer displays. For example, visualization of the index finger and thumb as simple spherical colliders to allow pincer grasping of objects in VE is often employed (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B64">van Polanen et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>). The colliders&#x2019; size is often arbitrarily chosen by researchers but can have profound effects on behavior, especially for dexterous and accuracy-demanding tasks. Finally, when not combined with haptic devices, haptic information about whether and how a given object has been grasped is absent, creating additional uncertainty. The lack of haptic feedback about object properties may be supplemented with terminal visual feedback (sensory substitution) in the form of the object changing its color, or as auditory feedback in the form of a sound, to signal that the virtual object has been contacted or grasped and to minimize hand-object interpenetration (<xref ref-type="bibr" rid="B73">Zahariev and MacKenzie, 2003</xref>; <xref ref-type="bibr" rid="B74">Zahariev and MacKenzie, 2007</xref>; <xref ref-type="bibr" rid="B13">Castiello et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B59">Sedda et&#x20;al., 2011</xref>; <xref ref-type="bibr" rid="B53">Prachyabrued and Borst, 2012</xref>; <xref ref-type="bibr" rid="B52">Prachyabrued and Borst, 2014</xref>; <xref ref-type="bibr" rid="B12">Canales and J&#xf6;rg, 2020</xref>).</p>
<p>One of the most common and well-studied forms of hand-object interactions is reaching and grasping an object. Reach-to-grasp movements involve a reach component describing the transport of the hand toward the object and a grasp component describing the preshaping of the fingers to the object. Traditionally, the end of a &#x201c;reach-to-grasp&#x201d; movement is defined by contact with the object. The reach component is quantified through analysis of hand transport kinematics (e.g., trajectory and velocity of the wrist motion), and the grasp component is quantified through analysis of aperture kinematics (e.g., interdigit distance in time) (<xref ref-type="bibr" rid="B30">Jeannerod, 1981</xref>; <xref ref-type="bibr" rid="B31">Jeannerod, 1984</xref>). Planning and execution of successful reach-to-grasp movements require both spatial and temporal coordination between the reach and grasp components (<xref ref-type="bibr" rid="B54">Rand et&#x20;al., 2008</xref>; <xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>). Whether the transport and aperture components represent information flow in independent neural channels remains an open and interesting question (<xref ref-type="bibr" rid="B17">Culham et&#x20;al., 2006</xref>; <xref ref-type="bibr" rid="B65">Vesia and Crawford, 2012</xref>; <xref ref-type="bibr" rid="B58">Schettino et&#x20;al., 2017</xref>); however, several kinematic features of coordination between the two components have been well described (<xref ref-type="bibr" rid="B28">Haggard and Wing, 1991</xref>; <xref ref-type="bibr" rid="B49">Paulignan et&#x20;al., 1991a</xref>; <xref ref-type="bibr" rid="B48">Paulignan et&#x20;al., 1991b</xref>; <xref ref-type="bibr" rid="B24">Gentilucci et&#x20;al., 1992</xref>; <xref ref-type="bibr" rid="B27">Haggard and Wing, 1995</xref>; <xref ref-type="bibr" rid="B19">Dubrowski et&#x20;al., 2002</xref>). For instance, peak transport velocity tends to occur at 30<inline-formula id="inf1">
<mml:math id="m1">
<mml:mtext>%</mml:mtext>
</mml:math>
</inline-formula> of the total time to complete the movement (<xref ref-type="bibr" rid="B31">Jeannerod, 1984</xref>), and peak aperture (maximal hand opening) occurs at 60&#x2013;70<inline-formula id="inf2">
<mml:math id="m2">
<mml:mtext>%</mml:mtext>
</mml:math>
</inline-formula> of total movement time (<xref ref-type="bibr" rid="B14">Castiello, 2005</xref>). Furthermore, there is substantial evidence to support that the grasp and reach are strongly coordinated in the spatial domain (<xref ref-type="bibr" rid="B27">Haggard and Wing, 1995</xref>; <xref ref-type="bibr" rid="B54">Rand et&#x20;al., 2008</xref>). Namely, the distance of the hand from the object when hand opening ceases and hand closing begins (closure distance, usually the point of peak aperture) can be accurately predicted from state estimates of transport velocity, transport acceleration, and aperture.</p>
<p>There is growing interest in contrasting performance of dexterous actions, such as reach-to-grasp, when executed in the physical environment (PE) and VE. In our previous work, we showed that temporal features of reach-to-grasp coordination and the control law governing closure (<xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>) were preserved in a VE that utilized a reductionist spherical collider representation of the index and thumb and audiovisual feedback-based sensory substitution. However, we noted that movement speed and maximum grip aperture differed between the real environment and VE (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>). These studies utilized only a single set of parameters for the presentation of feedback in the VE, and therefore, the influence of different parameters for representation of the virtual fingers and substitution of haptic feedback is unknown. The goal of this investigation was to test the extent to which the selection feedback parameters influence behavior in the VE. In two studies, we systematically varied parameters related to the sensory modality of haptic sensory substitution (Study 1) and the size of the spherical colliders representing the index-tip and thumb-tip (Study 2) to better understand the influence of these parameters on features of reach-to-grasp performance in VR. In both studies, participants reach to grasp virtual objects at a natural pace in an immersive VE presented <italic>via</italic> a head-mounted display (HMD).</p>
<p>Study 1 was designed to test whether visual, auditory, or audiovisual sensory substitution for haptic feedback of the object properties significantly affects reach-to-grasp kinematics. Participants grasped virtual objects of different sizes and placed them at different distances, where the change in color of the object (visual), tone (auditory), or both (audiovisual) was used to provide the terminal feedback that grasp was completed and achieved successfully. A previous study using spherical colliders to reach to grasp virtual objects reported that audio and audiovisual terminal feedback of the object being grasped resulted in shorter movement times than visual or absent terminal feedback, though there was no effect of terminal feedback on peak aperture (<xref ref-type="bibr" rid="B74">Zahariev and MacKenzie, 2007</xref>). While this study had a similar design to our Study 1, it was conducted using stereoscopic glasses to obtain a 3D view of images presented on a 2D display, and the results may not transfer to an HMD-based presentation of VR that presents a more immersive experience and is more commonly used today. Furthermore, no analysis of temporal or spatial reach-to-grasp kinematics was provided, limiting interpretations about the effects of terminal feedback on reach-to-grasp coordination. A more recent study using a robotic-looking virtual hand avatar to reach to grasp and transport virtual objects in an HMD immersive VR setup found that movement time was shorter for visual, compared to auditory or absent, terminal feedback (<xref ref-type="bibr" rid="B12">Canales and J&#xf6;rg, 2020</xref>). Interestingly, participants subjectively preferred audio terminal feedback to other sensory modalities despite the fact that audio feedback produced the slowest movements. The Canales and J&#xf6;rg study did not measure the kinematics of the movement and therefore interpretation about movement coordination is limited. Based on these studies and our previous work (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>), we expected that the modality of terminal feedback used to signal successful grasp would affect reach-to-grasp kinematics due to uncertainty of contact with an object. Specifically, we hypothesized that, with multimodal (audiovisual) feedback, participants would show (H1.1) greater scaling of aperture to object width and (H1.2) faster completion of the reach-to-grasp task, but (H1.3) the spatiotemporal coordination between the reach and the grasp components of the movement should remain preserved across terminal feedback condition.</p>
<p>To date, no study has systematically examined the impact of the size of the virtual effector on reach-to-grasp kinematics. Study 2 was designed to fill this gap in the literature. Participants used spherical colliders of different diameters to reach to grasp virtual objects of different sizes placed at different distances. Ogawa and coworkers (<xref ref-type="bibr" rid="B46">Ogawa et&#x20;al., 2018</xref>) reported that the size of a virtual avatar hand affects participants&#x2019; perception of object size in an HMD-based VE, but they did not study reach-to-grasp movements or analyze movement kinematics. Extrapolating from their results, we hypothesized that the size of the spherical collider would affect maximum grip aperture, with smaller colliders predicted to result in larger maximum grip aperture (H2). We specifically used a reduced version of the avatar hand (just two dots representing the thumb and index fingertips) to reduce the number of factors that can potentially affect reach-to-grasp kinematics, such as differences in the shape, color, and texture of a more biological looking hand avatar (<xref ref-type="bibr" rid="B36">Lok et&#x20;al., 2003</xref>; <xref ref-type="bibr" rid="B46">Ogawa et&#x20;al., 2018</xref>). Moreover, the spherical colliders allowed for more precise localization of the fingertips in VE than is typical of anthropomorphic hand avatars (<xref ref-type="bibr" rid="B68">Vosinakis and Koutsabasis, 2018</xref>) and eliminated the influence of visuoproprioceptive discrepancies caused by potential tracking or joint angle calibration errors inherent in sensor gloves. Similar reductionist effectors have been successfully used in multiple previous studies for similar reasons (<xref ref-type="bibr" rid="B74">Zahariev and MacKenzie, 2007</xref>; <xref ref-type="bibr" rid="B72">Zahariev and Mackenzie, 2008</xref>; <xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>). Furthermore, a recent study where only the target and the richness of hand anthropomorphism (e.g., 2-point, point-dot hand, and full hand) were visible to participants reported that kinematic performance was best when either the minimal (2-point) or enriched hand-like model (skeleton, full) was provided (<xref ref-type="bibr" rid="B61">Sivakumar et&#x20;al., 2021</xref>). Therefore, in the present study, we used simple spheres representing the fingertips to systematically test the effect of collider size on reach-to-grasp behavior.</p>
<p>Study 1 and Study 2 were designed to increase knowledge about how choices for haptic sensory substitution and collider size may affect reach-to-grasp performance in HMD-based VR. This work has the potential to directly impact the design of VR platforms used for commercial, industrial, research, and rehabilitation applications.</p>
</sec>
<sec id="s2">
<title>2 Materials and Methods</title>
<sec id="s2-1">
<title>2.1 Participants</title>
<p>Ten adults [seven men and three women; <italic>M</italic>
<inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, age &#x3d; 21.1<inline-formula id="inf4">
<mml:math id="m4">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula>5.88&#xa0;years; all right-handed (<xref ref-type="bibr" rid="B47">Oldfield et&#x20;al., 1971</xref>)] with no reported muscular, orthopedic, or neurological health concerns voluntarily participated in both studies after providing informed consent approved by the Institutional Review Board (IRB) at Northeastern University. The participant pool was a convenience sample of undergraduate and graduate students. Some participants had previously participated in reach-to-grasp studies in our hf-VE; however, none of the participants reported extensive experience in VR (e.g., gaming and simulations).</p>
</sec>
<sec id="s2-2">
<title>2.2 Reach-to-Grasp Task, Virtual Environment, and Kinematic Measurement</title>
<p>Each participant reached to grasp 3D-printed physical objects in the PE and their exact virtual renderings in the haptic-free virtual environment (hf-VE) of three different sizes, small (<italic>width</italic> <inline-formula id="inf5">
<mml:math id="m5">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> <italic>height</italic> <inline-formula id="inf6">
<mml:math id="m6">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> <italic>depth</italic> &#x3d; 3.6&#x20;<inline-formula id="inf7">
<mml:math id="m7">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 8&#x20;<inline-formula id="inf8">
<mml:math id="m8">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 2.5&#xa0;cm), medium (5.4&#x20;<inline-formula id="inf9">
<mml:math id="m9">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 8&#x20;<inline-formula id="inf10">
<mml:math id="m10">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 2.5&#xa0;cm), and large (7.2&#x20;<inline-formula id="inf11">
<mml:math id="m11">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 8&#x20;<inline-formula id="inf12">
<mml:math id="m12">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 2.5&#xa0;cm), placed at three different distances, near (24&#xa0;cm), middle (30&#xa0;cm), and far (36&#xa0;cm) from the initial position of the fingertips. Objects were rotated along their vertical axis to 75&#xb0; measured from the horizontal axis to avoid excessive wrist extension. The physical objects were 3D printed using PLA thermoplastic (mass: small: 30&#xa0;g; medium: 44&#xa0;g; large: 59&#xa0;g) and covered with glow-in-the-dark&#x20;paint.</p>
<p>A commercial HTC Vive Pro, comprised of HMD and an infrared laser emitter unit, was used. The virtual scene was created and rendered in Unity (ver. 5.6, 64 bits, Unity Technologies, San Francisco, CA) with C&#x23; as the programming language, running on a computer with Windows 7 Ultimate, 64-bit operating system, an Intel(R) Xenon(R) CPU E5-1630 v3 3.7 GHz, 32&#xa0;GB RAM, and an NVIDIA Quadro M6000 graphics card. Given the power of the PC and simplicity of the VE, scenes were rendered in less than one frame time (see below). The interpupillary distance in the HMD was individually adjusted to each participant. Objects were displayed in stereovision giving the perception that they were 3D. Participants were asked to confirm that they perceived the object as 3D and that they could distinguish the object&#x2019;s edges, though we did not formally test for stereopsis. Motion tracking of the head was achieved by streaming data from an IMU and laser-based photodiodes embedded in the headset. A detailed description of the HTC Vive&#x2019;s head tracking system is published elsewhere (<xref ref-type="bibr" rid="B44">Niehorster et&#x20;al., 2017</xref>). Position and orientation data provided by the Vive were acquired through Unity at <inline-formula id="inf13">
<mml:math id="m13">
<mml:mo>&#x223c;</mml:mo>
</mml:math>
</inline-formula>90&#xa0;Hz, the frame rate of the HTC Vive. Prior work has reported that, for large head movements, the average error between the laser-measured position and the position reported by the Vive is less than 1&#xa0;cm (<xref ref-type="bibr" rid="B37">Luckett, 2018</xref>). In our experiment, each participant&#x2019;s head remained relatively stable (the task did not involve extensive head motion) and therefore head tracking inconsistencies were negligible and none of the subjects reported any shifts or jumps in the visual display. An eight-camera motion tracking system (120&#xa0;Hz, PPT Studio NTM, WorldViz Inc., Santa Barbara, CA) captured the 3D motion of IRED markers attached to the participants&#x2019; wrist and fingertips. The placement procedure of the IRED markers on the fingertip was as follows: an identical 3D-printed physical object was grasped at the top of its height, and markers were attached to the tops of fingertips in a way that minimized the distance between the object and marker. The centroid of the virtual sphere corresponded to the detected position of the IRED. Note that although data were collected at 120&#xa0;Hz in the PPT system, acquisition of samples in Unity was limited to <inline-formula id="inf14">
<mml:math id="m14">
<mml:mo>&#x223c;</mml:mo>
</mml:math>
</inline-formula>90&#xa0;Hz, the frame rate of the HTC Vive. Prior to each data collection, the 3D motion capture system was calibrated. This entailed using a standard frame to reset the origin and axes of the 3D space in PPT to match the Unity origin. According to the manufacturer and confirmed by our team when analyzing the residuals during the calibration procedure, the error of the PPT system was less than 1&#xa0;mm. End-to-end latency, indicating the time between the physical movement of the motion sensor (from PPT) and movement rendered in the virtual scene, was 22&#xa0;ms (upper bound on the true system latency). This latency was not associated with motion sickness (<xref ref-type="bibr" rid="B62">Stanney, 2002</xref>; <xref ref-type="bibr" rid="B4">Barrett, 2004</xref>) in a previous publication using a nearly identical system (<xref ref-type="bibr" rid="B44">Niehorster et&#x20;al., 2017</xref>). No participants in our study anecdotally reported symptoms of motion sickness; however, no formal assessment of subjective symptoms of motion sickness was completed. The schedule of trials, virtual renderings of the target object, and timing/triggering of the perturbation were controlled using custom software developed in C&#x23;. We recently published two reports showing that spatiotemporal coordination of reach-to-grasp movements is similar in the above described hf-VE compared to that of the real world (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>).</p>
</sec>
<sec id="s2-3">
<title>2.3 Procedure and Instructions to Participants</title>
<p>Each participant was seated on a chair with the right arm and hand placed on a table in front of them. At the start position, the thumb and index finger straddled a 1.5&#xa0;cm wide plastic peg located 12&#xa0;cm in front and 24&#xa0;cm to the right of the sternum, with the thumb depressing a switch. Lifting the thumb off the switch marked movement onset. Upon an auditory tone (&#x201c;beep&#x201d; signal), the participant reached to grasp the virtual object presented in the HMD, lifted it, held it until it disappeared (3.5&#xa0;s from movement onset, i.e.,&#x20;the moment the switch was released), and returned their hand to the starting position. Each auditory tone was time jittered within 0.5&#xa0;s standard deviation from 1&#xa0;s after trial start (i.e.,&#x20;after the start switch was activated) to avoid participants&#x2019; adaptation. A custom collision detection algorithm was used to determine when the virtual object was grasped. Each finger was represented by a sphere. When any point on the sphere made contact with any point on the object, it was considered &#x201c;attached.&#x201d; Once both fingers were &#x201c;attached&#x201d; to the object, the object was considered &#x201c;grasped,&#x201d; and translational movement from the fingers would also move the object. A 1.2&#xa0;cm error margin, imposed on the distance between the spheres, was used to maintain grasp. If the distance between the spheres increased by more than 1.2&#xa0;cm from its value at the time the object was &#x201c;grasped&#x201d; (e.g., if the fingers opened), the object was no longer considered grasped, the color changed to white, and it would drop to the table. Conversely, if the distance between the spheres decreased by more than 1.2&#xa0;cm from its value at the time the object was &#x201c;grasped,&#x201d; the object was considered &#x201c;overgrasped.&#x201d; An &#x201c;overgrasped&#x201d; object would turn white and would remain frozen. If neither error occurred, the object was considered to be grasped successfully, and its color changed to red (visual feedback condition) or a tone sounded (audio condition); see below for details about terminal feedback conditions. 1.2&#xa0;cm error margin was chosen after extensive piloting of the experiment. In the future, we are planning to systematically check for the effect of the error margin on reach-to-grasp behavior.</p>
<p>Before data collection, each participant was familiarized with the setup and procedure. Familiarization consisted of 30 trials of grasping virtual and physical objects (five trials <inline-formula id="inf15">
<mml:math id="m15">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> three objects, placed at the middle distance) first in PE and then in hf-VE. The participant was instructed to reach and grasp an object at a comfortable speed in the middle along its vertical dimension. Following familiarization, the participant began experimental trials. Further details are provided in the subsequent sections.</p>
<p>To wash out any effect of sensory feedback (Study 1) or collider size (Study 2) on reach-to-grasp coordination, each participant performed a block of reach-to-grasp movements in PE prior to each hf-VE block. The rendering in the virtual scene showed two spheres, representing the thumb and index fingertips, which were visible to the participant. To make the PE condition comparable with regard to what a participant saw, the room was darkened so that the participants could see only the glow-in-the-dark object and the illuminated IRED markers on their fingertips. Overhead lights were turned on and off (after every five trials) to prevent adaptation to the dark. PE trials were used strictly for washout and although data were recorded during these trials, the data were not analyzed nor presented in this manuscript.</p>
</sec>
<sec id="s2-4">
<title>2.4 Study 1: Manipulations of Sensory Feedback</title>
<p>Each participant was tested in a single session consisting of 270 trials evenly spread across six blocks of 45 trials, alternating between PE and hf-VE with the first block performed in PE. The participant was given a 2&#x20;min break between consecutive blocks. In the three blocks for hf-VE, visual (V), auditory (A), and both visual and auditory [audiovisual (AV)] feedback were provided to indicate that the virtual object had been grasped. In the vision condition, the object turned from blue to red. In the auditory condition, the sound of a click (875&#xa0;Hz, 50&#xa0;ms duration) was presented. In the audiovisual condition, the object turned from blue to red in addition to the sound of a click (<xref ref-type="fig" rid="F1">Figure&#x20;1</xref>, top) and remained red until the object disappeared or was released/overgrasped. The collider size remained constant (diameter &#x3d; 0.8&#xa0;cm) in each feedback condition. The order of feedback conditions was pseudorandomized across participants. Each condition was collected in a single block that contained 45 trials (three object sizes, three object distances, and five trials per size-distance pair). Objects in each block were presented in the same order [small-near (five trials), small-middle (five trials), and small-far (five trials); medium-near (five trials), medium-middle (five trials), and medium-far (five trials); large-near (five trials), large-middle (five trials), and large-far (five trials)]. Each block of virtual grasping was preceded by an identical block of grasping physical objects to wash out possible carryover effects from the previous hf-VE&#x20;block.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Schematic illustration of the experimental setup and procedure. After wearing an HTC Vive<sup>TM</sup> head-mounted display (HMD), the participants sat on a chair in front of the experimental rig, with their thumb pressing a start switch (indicated in yellow). IRED markers were attached to the participant&#x2019;s wrist and the tips of the thumb and index finger. An auditory cue&#x2014;a beep&#x2014;signaled the participant to reach to grasp the object (small: 3.6 &#xd7; 2.5 &#xd7; 8&#xa0;cm; medium: 5.4 &#xd7; 2.5 &#xd7; 8&#xa0;cm; large: 7.2 &#xd7; 2.5 &#xd7; 8&#xa0;cm), placed at three different distances relative to the switch (near: 24&#xa0;cm; middle: 30&#xa0;cm; far: 36&#xa0;cm). An inset presents the first person scene that appeared in the HMD. Translucent panels containing text in the visual scene were only visible to the experimenter. In Study 1, participants grasped the object with 0.8&#xa0;cm colliders, and visual, auditory, or audiovisual feedback was provided to signal that the object has been grasped. In Study 2, audiovisual feedback was provided to signal that the object has been grasped, and participants grasped the object with 0.2, 0.4, 0.8, 1.2, and 1.4&#xa0;cm colliders. In middle and bottom panels, the medium object is presented with the accurate scaling relationship between object dimensions and collider size.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g001.tif"/>
</fig>
</sec>
<sec id="s2-5">
<title>2.5 Study 2: Manipulations of Collider Size</title>
<p>Each participant was tested in a single session consisting of 450 trials evenly spread across ten blocks of 45 trials, alternating between PE and hf-VE with the first block performed in PE. The participant was given a 2&#x20;min break between consecutive blocks. In the five hf-VE blocks, we manipulated the collider size to be 0.2, 0.4, 0.8, 1.2, or 1.4&#xa0;cm (<xref ref-type="fig" rid="F1">Figure&#x20;1</xref>, bottom). Collider size was constant for all trials within a block. The order that collider size blocks were presented was pseudorandomized across participants. Each block contained 45 trials (three object sizes, three object distances, and five trials per size-distance pair). Objects in each block were presented in the same order [small-near (five trials), small-middle (five trials), and small-far (five trials); medium-near (five trials), medium-middle (five trials), and medium-far (five trials); large-near (five trials), large-middle (five trials), and large-far (five trials)]. Each block of virtual grasping was preceded by an identical block of grasping physical objects to wash out possible carryover effects from the previous hf-VE&#x20;block.</p>
</sec>
<sec id="s2-6">
<title>2.6 Kinematic Processing</title>
<p>All kinematic data were analyzed offline using custom MATLAB routines (Mathworks Inc., Natick, MA). For each trial, time series data for the planar motion of the markers in the x- and y-coordinates were cropped from movement onset (the moment the switch was released) to movement offset (the moment the collision detection criterion was met). Transport distance (i.e.,&#x20;the straight-line distance of the wrist marker from the starting position in the transverse plane) and aperture (the straight-line distance between the thumb and index finger markers in the transverse plane) trajectories were computed for each trial. The first derivative of transport displacement and aperture was computed to obtain the velocity profiles for kinematic feature extraction. All time series were filtered at 6&#xa0;Hz using a fourth-order low-pass Butterworth filter. In line with our past data processing protocols, trials in which participants did not move or lifted their fingers off the starting switch not in the process of making a goal-directed action toward the object were excluded from the analysis. Excluded trials comprised <inline-formula id="inf16">
<mml:math id="m16">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 3% of trials in any given condition.</p>
<p>Additionally, we also computed the time series for size-normalized aperture. The rationale for this normalization was twofold. First, markers were attached to the dorsum of the digits (on the nail) to avoid interference with grasping. Second, in hf-VE, the collider&#x2019;s relative sizes and the target object might influence the grasp. For instance, a larger collider might lead to a small object being perceived disproportionately smaller than a large object. Normalizing peak aperture by object size allowed us to examine any effect of such perceptual discrepancy on the&#x20;grasp.</p>
<p>For each trial, the following kinematic features, units in parentheses, were extracted using the filtered time series data:<list list-type="simple">
<list-item>
<p>&#x2022; Movement time (ms): duration from movement onset to movement offset.</p>
</list-item>
<list-item>
<p>&#x2022; Peak aperture (cm): maximum distance between the fingertip markers. Peak aperture also marked the initiation of closure or closure onset (henceforth, CO), which we refer to as aperture at&#x20;CO.</p>
</list-item>
<list-item>
<p>&#x2022; Size-normalized peak aperture: peak aperture normalized by the target object&#x20;width.</p>
</list-item>
<list-item>
<p>&#x2022; Time to peak aperture (ms): time from movement onset to peak aperture.</p>
</list-item>
<list-item>
<p>&#x2022; Closure distance (cm): distance between the wrist&#x2019;s position at CO and the object&#x2019;s center.</p>
</list-item>
<list-item>
<p>&#x2022; Peak transport velocity (cm/s): maximum velocity of the wrist marker.</p>
</list-item>
<list-item>
<p>&#x2022; Time to peak transport velocity (ms): time from movement onset to maximum velocity of the wrist marker.</p>
</list-item>
<list-item>
<p>&#x2022; Transport velocity at CO (cm/s): velocity of the wrist marker at the time of&#x20;CO.</p>
</list-item>
</list>
</p>
<p>Movement time was used to examine the global effect of condition manipulations on reach-to-grasp movements. Peak aperture, time to peak aperture, and size-normalized peak aperture were used to examine the effect on the grasp component. Likewise, peak transport velocity and time to peak transport velocity were used to examine the effect on the transport component. Finally, time to peak transport velocity and time to peak aperture as well as transport velocity at CO and closure distance were used to examine the effects of task manipulations on reach-to-grasp coordination (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>).</p>
</sec>
<sec id="s2-7">
<title>2.7 Statistical Analysis</title>
<p>All analyses were initially performed at the trial level to compute means for each subject. Subjects&#x2019; means were then submitted to analysis of variance for group-level statistics. 3&#x20;<inline-formula id="inf17">
<mml:math id="m17">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 3&#x20;<inline-formula id="inf18">
<mml:math id="m18">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 3 repeated measures analyses of variance (rm-ANOVAs) with within-subject factors of sensory feedback (visual, auditory, and audiovisual), object size (small, medium, and large), and object distance (near, middle, and far) were used to evaluate the effects on each kinematic variable in Study 1. 5&#x20;<inline-formula id="inf19">
<mml:math id="m19">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 3&#x20;<inline-formula id="inf20">
<mml:math id="m20">
<mml:mo>&#xd7;</mml:mo>
</mml:math>
</inline-formula> 3&#x20;rm-ANOVAs with within-subject factors of collider size (0.2, 0.4, 0.8, 1.2, and 1.4), object size (small, medium, and large), and object distance (near, middle, and far) were used to evaluate the effects on each kinematic variable separately in Study 2. In most cases, the data met assumptions for normality, homogeneity of variance, and sphericity. When an assumption of sphericity was not met, a Greenhouse-Geisser correction was applied. All tests were performed in Statistica (ver. 13, Dell Inc.). Each test statistic was considered significant at the two-tailed alpha level of 0.05. All effect sizes are reported as partial eta-squared (<inline-formula id="inf21">
<mml:math id="m21">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>).</p>
<p>We used linear mixed-effects (LME) models to test the relationship between time to peak transport velocity and time to peak aperture and between closure transport velocity at CO and closure distance, in both Studies 1 and 2. The same LMEs also tested whether and how the respective relationship was influenced by sensory feedback in Study 1 and collider size in Study 2. In LMEs for Study 1, sensory feedback served as a categorical independent variable with three levels: visual, auditory, and audiovisual. The &#x201c;visual&#x201d; feedback served as the reference level. In LMEs for Study 2, collider size served as a continuous independent variable. In each model, participant identity was treated as a random effect. Both models were fit using the lmer() function in the package <italic>lme4</italic> (<xref ref-type="bibr" rid="B5">Bates et&#x20;al., 2014</xref>) for <italic>R</italic> (<xref ref-type="bibr" rid="B63">Team R. C., 2013</xref>). Approximate effect sizes for LMEs were computed using the omega_squared() function in the package <italic>effectsize</italic> (<xref ref-type="bibr" rid="B6">Ben-Shachar et&#x20;al., 2021</xref>) for <italic>R</italic>. Coefficients were considered significant at the alpha level of&#x20;0.05.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Results</title>
<sec id="s3-1">
<title>3.1 Study 1: Effects of Sensory Feedback on Reach-to-Grasp Movements</title>
<p>
<xref ref-type="fig" rid="F2">Figure&#x20;2A</xref> shows the trajectories of the mean 2D position of the wrist, thumb, and index finger corresponding to each sensory feedback condition for a representative participant (averaged across all trials) for the medium object placed at the middle distance. <xref ref-type="fig" rid="F2">Figure&#x20;2B</xref> shows the mean transport velocity and aperture profiles obtained from the trajectories shown in <xref ref-type="fig" rid="F2">Figure&#x20;2A</xref>. Notice that, in both figures, the curves for the three feedback conditions entirely eclipse each other, indicating that sensory feedback affected neither the wrist, thumb, and index finger trajectories nor the transport velocity and aperture profiles. <xref ref-type="fig" rid="F3">Figure&#x20;3</xref> shows the phase relationship between transport velocity and size-normalized aperture (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>). An almost invariant location of peak transport velocity and peak aperture, which mark the onset of the shaping phase and the closure phase, respectively, indicates that this phase relationship did not vary across feedback conditions.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Mean trajectories for a representative participant showing reach-to-grasp kinematics for different sensory feedback and collider size. <italic>Study 1.</italic> <bold>(A)</bold> Marker trajectories for the wrist, thumb, and index finger across different conditions of sensory feedback. <bold>(B)</bold> Time-normalized aperture (solid lines) and transport velocity (dashed-dotted lines) profiles across different conditions of sensory feedback. <italic>Study 2.</italic> <bold>(C)</bold> Marker trajectories for the wrist, thumb, and index finger across different collider sizes. <bold>(D)</bold> Time-normalized aperture (solid lines) and transport velocity (dash-dotted lines) profiles across different collider&#x20;sizes.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g002.tif"/>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Study 1. Phase plots of size-normalized aperture vs. transport velocity for each condition of sensory feedback for a representative participant. Diamonds and circles indicate size-normalized peak aperture and peak transport velocity, respectively. Black arrows indicate the progression of reach-to-grasp movement.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g003.tif"/>
</fig>
<p>An rm-ANOVA revealed that movement time did not differ among the three types of sensory feedback (<italic>p</italic> <inline-formula id="inf22">
<mml:math id="m22">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T1">Table&#x20;1</xref>). As expected, movement time differed across objects placed at different distances (<italic>F</italic>2,18&#x20;<inline-formula id="inf23">
<mml:math id="m23">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 36.71, <italic>p</italic> <inline-formula id="inf24">
<mml:math id="m24">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001). Bonferroni&#x2019;s post hoc tests revealed that movement time was longer for more distant objects (middle vs. near: 48&#x20;<inline-formula id="inf25">
<mml:math id="m25">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 10&#xa0;ms, <italic>p</italic> <inline-formula id="inf26">
<mml:math id="m26">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 87&#x20;<inline-formula id="inf27">
<mml:math id="m27">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 10&#xa0;ms, <italic>p</italic> <inline-formula id="inf28">
<mml:math id="m28">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. middle: 38&#x20;<inline-formula id="inf29">
<mml:math id="m29">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 10&#xa0;ms, <italic>p</italic> <inline-formula id="inf30">
<mml:math id="m30">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.004; <xref ref-type="fig" rid="F4">Figure&#x20;4A</xref>). Neither the main effect of object size nor any of the interaction effects of sensory feedback, object distance, and object size was significant (<italic>p</italic> <inline-formula id="inf31">
<mml:math id="m31">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.05, <xref ref-type="table" rid="T1">Table&#x20;1</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Outcomes of 3 &#xd7; 3 &#xd7; 3&#x20;rm-ANOVAs examining the effects of sensory feedback (visual, auditory, and audiovisual), object size (small, medium, and large), and object distance (near, middle, and far) on each kinematic variable in Study 1.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Variables</th>
<th align="center">Sensory feedback (SF)</th>
<th align="center">Object size (OS)</th>
<th align="center">Object distance (OD)</th>
<th align="center">Interactions</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="3" align="left">MT (ms)</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 36.71</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf32">
<mml:math id="m32">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.80</td>
</tr>
<tr>
<td rowspan="3" align="left">PA (cm)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 232.39</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf33">
<mml:math id="m33">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.96</td>
</tr>
<tr>
<td rowspan="3" align="left">SN-PA (a.u.)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>1,9.2</sub> &#x3d; 34.08</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf34">
<mml:math id="m34">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.79</td>
</tr>
<tr>
<td rowspan="3" align="left">PV (cm/s)</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>1.1,10.1</sub> &#x3d; 239.96</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf35">
<mml:math id="m35">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.96</td>
</tr>
<tr>
<td rowspan="3" align="left">T-PV (ms)</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 33.00</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf36">
<mml:math id="m36">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.78</td>
</tr>
<tr>
<td rowspan="3" align="left">TV-CO (cm/s)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 9.42</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 5.11</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.010</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf37">
<mml:math id="m37">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.51</td>
<td align="center">
<inline-formula id="inf38">
<mml:math id="m38">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.36</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>MT: movement time, PA: peak aperture, SN-PA: size-normalized peak aperture, PV: peak transport velocity, T-PV: time to peak transport velocity, and TV-CO: transport velocity at closure onset. NS: not significant.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Study 1. Effects of <bold>(A)</bold> object distance on movement time, <bold>(B)</bold> object size on peak aperture, and <bold>(C)</bold> object size on size-normalized peak aperture. Error bars indicate &#xb1;1<italic>SEM</italic> (<italic>n</italic>&#x20;&#x3d; 10). Data calculated across all levels of sensory feedback for each participant.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g004.tif"/>
</fig>
<p>Neither sensory feedback nor object distance affected any kinematic variable related to the grasp component: peak aperture and size-normalized peak aperture (<italic>p</italic> <inline-formula id="inf39">
<mml:math id="m39">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T1">Table&#x20;1</xref>). With respect to these variables, peak aperture differed across objects of different sizes (<italic>F</italic>2,18&#x20;<inline-formula id="inf40">
<mml:math id="m40">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 232.39, <italic>p</italic> <inline-formula id="inf41">
<mml:math id="m41">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), as did size-normalized peak aperture (<italic>F</italic>1,9.2&#x20;<inline-formula id="inf42">
<mml:math id="m42">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 34.08, <italic>p</italic> <inline-formula id="inf43">
<mml:math id="m43">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001). Bonferroni&#x2019;s post hoc tests revealed that peak aperture was larger for a larger object (medium vs. small: 1.3&#x20;<inline-formula id="inf44">
<mml:math id="m44">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.1&#xa0;cm, <italic>p</italic> <inline-formula id="inf45">
<mml:math id="m45">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. small: 2.7&#x20;<inline-formula id="inf46">
<mml:math id="m46">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.1&#xa0;cm, <italic>p</italic> <inline-formula id="inf47">
<mml:math id="m47">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. medium: 1<inline-formula id="inf48">
<mml:math id="m48">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula>0.3&#x20;<inline-formula id="inf49">
<mml:math id="m49">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.1&#xa0;ms, <italic>p</italic>&#x20;&#x3c; 0.001; <xref ref-type="fig" rid="F4">Figure&#x20;4B</xref>) confirming that the grasp was scaled to object size. However, the size-normalized peak aperture was larger for a smaller object (medium vs. small: &#x2212;1.5&#x20;<inline-formula id="inf50">
<mml:math id="m50">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.3, <italic>p</italic> <inline-formula id="inf51">
<mml:math id="m51">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. small: &#x2212;2.0&#x20;<inline-formula id="inf52">
<mml:math id="m52">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.3, <italic>p</italic> <inline-formula id="inf53">
<mml:math id="m53">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001, <xref ref-type="fig" rid="F4">Figure&#x20;4C</xref>), suggesting that participants had a greater aperture overshoot for smaller objects, consistent with past results (<xref ref-type="bibr" rid="B41">Meulenbroek et&#x20;al., 2001</xref>; <xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>). None of the interaction effects of sensory feedback, object distance, and object size on peak aperture or size-normalized peak aperture were significant (<italic>p</italic> <inline-formula id="inf54">
<mml:math id="m54">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.05; <xref ref-type="table" rid="T1">Table 1</xref>).</p>
<p>Sensory feedback did not affect any variable related to the transport component: peak transport velocity, time to peak transport velocity, and transport velocity at CO (<italic>p</italic> <inline-formula id="inf55">
<mml:math id="m55">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T1">Table&#x20;1</xref>). As expected, peak transport velocity (<italic>F</italic>1.1,10.1 <inline-formula id="inf56">
<mml:math id="m56">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula>&#x20;239.96, <italic>p</italic> <inline-formula id="inf57">
<mml:math id="m57">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), time to peak transport velocity (<italic>F</italic>2,18 <inline-formula id="inf58">
<mml:math id="m58">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula>&#x20;33.00, <italic>p</italic> <inline-formula id="inf59">
<mml:math id="m59">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), and transport velocity at CO (<italic>F</italic>2,18&#x20;<inline-formula id="inf60">
<mml:math id="m60">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 5.11, <italic>p</italic>&#x20;<inline-formula id="inf61">
<mml:math id="m61">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.010) differed across objects placed at different distances. Bonferroni&#x2019;s post hoc tests revealed that the values were larger for&#x20;a more distant object for peak transport velocity (middle vs. near: 12.6&#x20;<inline-formula id="inf62">
<mml:math id="m62">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf63">
<mml:math id="m63">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 23.6&#x20;<inline-formula id="inf64">
<mml:math id="m64">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic>&#x20;<inline-formula id="inf65">
<mml:math id="m65">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. middle: 11.0&#x20;<inline-formula id="inf66">
<mml:math id="m66">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf67">
<mml:math id="m67">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), time to peak transport velocity (middle vs. near: 19&#x20;<inline-formula id="inf68">
<mml:math id="m68">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 3&#xa0;ms, <italic>p</italic> <inline-formula id="inf69">
<mml:math id="m69">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 25&#x20;<inline-formula id="inf70">
<mml:math id="m70">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 3&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf71">
<mml:math id="m71">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), and transport velocity at CO (far vs. near: 4.3&#x20;<inline-formula id="inf72">
<mml:math id="m72">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.4&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf73">
<mml:math id="m73">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.006). Neither the main effect to object size nor any of the interaction effects of sensory feedback, object distance, and object size on any of these variables was significant (<italic>p</italic> <inline-formula id="inf74">
<mml:math id="m74">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05). Furthermore, transport velocity at CO differed across objects of different sizes (<italic>F</italic>2,18&#x20;<inline-formula id="inf75">
<mml:math id="m75">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 9.42, <italic>p</italic> <inline-formula id="inf76">
<mml:math id="m76">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001). Bonferroni&#x2019;s post hoc tests revealed that transport velocity at CO was lower for a smaller object (large vs. small: 7.8&#x20;<inline-formula id="inf77">
<mml:math id="m77">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.8&#xa0;cm/s, <italic>p</italic>&#x20;<inline-formula id="inf78">
<mml:math id="m78">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.001). Otherwise, neither the main effect of object size nor any of the interaction effects of sensory feedback, object distance, and object size was significant for any of these variables (<italic>p</italic> <inline-formula id="inf79">
<mml:math id="m79">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T1">Table&#x20;1</xref>).</p>
<p>To investigate whether reach-to-grasp coordination was influenced by visual, auditory, and audiovisual feedback, LMEs were performed to test the relationship between time to peak transport velocity and time to peak aperture and between closure transport velocity at CO and closure distance, and how it was influenced by sensory feedback. Time to peak aperture increased with time to peak transport velocity (<italic>B</italic> <inline-formula id="inf80">
<mml:math id="m80">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 1.23&#x20;<inline-formula id="inf81">
<mml:math id="m81">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.16, <italic>t</italic> <inline-formula id="inf82">
<mml:math id="m82">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 7.95, <italic>p</italic>&#x20;<inline-formula id="inf83">
<mml:math id="m83">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; <xref ref-type="fig" rid="F7">Figure&#x20;7A</xref>). The observed increase in time to peak aperture with an increase in time to peak transport velocity did not differ between the three types of sensory feedback (<italic>p</italic> <inline-formula id="inf84">
<mml:math id="m84">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T2">Table&#x20;2</xref>). Likewise, closure distance increased with transport velocity at CO (<italic>B</italic> <inline-formula id="inf85">
<mml:math id="m85">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.15&#x20;<inline-formula id="inf86">
<mml:math id="m86">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.0057, <italic>t</italic> <inline-formula id="inf87">
<mml:math id="m87">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 26.50, <italic>p</italic> <inline-formula id="inf88">
<mml:math id="m88">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; <xref ref-type="fig" rid="F7">Figure&#x20;7B</xref>). The observed increase in closure distance with an increase in transport velocity at CO did not differ between the three types of sensory feedback (<italic>p</italic> <inline-formula id="inf89">
<mml:math id="m89">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T2">Table&#x20;2</xref>). Together, these results indicate that sensory feedback signaling that the object had been grasped did not affect the coordination between the transport and aperture components, including the initiation of closure based on the state estimate of transport velocity.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Summary of linear mixed-effects (LME) models in Study 1.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Effects</th>
<th align="center">
<italic>B</italic>
</th>
<th align="center">
<inline-formula id="inf90">
<mml:math id="m90">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula>1<italic>SE</italic>
</th>
<th align="center">
<italic>t</italic>
</th>
<th align="center">
<italic>p</italic>
</th>
<th align="center">
<inline-formula id="inf91">
<mml:math id="m91">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Time to peak aperture</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">&#x2003;Intercept</td>
<td align="center">139.44</td>
<td align="center">50.85</td>
<td align="center">2.74</td>
<td align="center">0.0068</td>
<td align="center">&#x2013;</td>
</tr>
<tr>
<td align="left">&#x2003;TPV</td>
<td align="center">1.23</td>
<td align="center">0.16</td>
<td align="center">7.95</td>
<td align="center">0.0000</td>
<td align="center">0.25</td>
</tr>
<tr>
<td align="left">&#x2003;Auditory</td>
<td align="center">14.96</td>
<td align="center">45.47</td>
<td align="center">0.33</td>
<td align="center">0.7424</td>
<td align="center">0.004</td>
</tr>
<tr>
<td align="left">&#x2003;Audiovisual</td>
<td align="center">40.74</td>
<td align="center">44.44</td>
<td align="center">0.92</td>
<td align="center">0.3602</td>
<td align="center">0.004</td>
</tr>
<tr>
<td align="left">&#x2003;TPV &#xd7; auditory</td>
<td align="center">&#x2212;0.05</td>
<td align="center">0.15</td>
<td align="center">&#x2212;0.31</td>
<td align="center">0.7542</td>
<td align="center">0.004</td>
</tr>
<tr>
<td align="left">&#x2003;TPV &#xd7; audiovisual</td>
<td align="center">&#x2212;0.13</td>
<td align="center">0.15</td>
<td align="center">&#x2212;0.87</td>
<td align="center">0.3869</td>
<td align="center">0.004</td>
</tr>
<tr>
<td align="left">Closure distance</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;Intercept</td>
<td align="center">&#x2212;0.82</td>
<td align="center">0.38</td>
<td align="center">&#x2212;2.19</td>
<td align="center">0.0378</td>
<td align="center">&#x2013;</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;TV-CO</td>
<td align="center">0.15</td>
<td align="center">0.01</td>
<td align="center">26.50</td>
<td align="center">0.0000</td>
<td align="center">0.79</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;Auditory</td>
<td align="center">&#x2212;0.23</td>
<td align="center">0.27</td>
<td align="center">&#x2212;0.87</td>
<td align="center">0.3880</td>
<td align="center">0.001</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;Audiovisual</td>
<td align="center">0.18</td>
<td align="center">0.27</td>
<td align="center">0.68</td>
<td align="center">0.4950</td>
<td align="center">0.001</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;TV-CO &#xd7; auditory</td>
<td align="center">0.00</td>
<td align="center">0.01</td>
<td align="center">0.77</td>
<td align="center">0.4413</td>
<td align="center">0.003</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;TV-CO &#xd7; audiovisual</td>
<td align="center">&#x2212;0.01</td>
<td align="center">0.01</td>
<td align="center">&#x2212;0.96</td>
<td align="center">0.3370</td>
<td align="center">0.003</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In summary, these results confirm the known effects of object size and object distance on variables related to the aperture and transport components, respectively (<xref ref-type="bibr" rid="B49">Paulignan et&#x20;al., 1991a</xref>; <xref ref-type="bibr" rid="B48">Paulignan et&#x20;al., 1991b</xref>). However, each type of sensory feedback&#x2014;visual, auditory, or audiovisual&#x2014;is equally provided for successful reach-to-grasp.</p>
</sec>
<sec id="s3-2">
<title>3.2 Study 2: Effects of Collider Size on Reach-to-Grasp Movements</title>
<p>
<xref ref-type="fig" rid="F2">Figure&#x20;2C</xref> shows the trajectories of the mean 2D position of the wrist, thumb, and index finger corresponding to each collider size condition for a representative participant (averaged across all trials) for the medium object placed at the middle distance. <xref ref-type="fig" rid="F2">Figure&#x20;2D</xref> shows mean transport velocity and aperture profiles obtained from the trajectories shown in <xref ref-type="fig" rid="F2">Figure&#x20;2C</xref>. Notice that, in both figures, curves for the five collider sizes show noticeable differences. <xref ref-type="fig" rid="F5">Figure&#x20;5</xref> shows the phase relationship between transport velocity and size-normalized aperture. Notice that the magnitude of size-normalized peak aperture reduces with collider size and disproportionately more for a smaller and a more distant object, but it occurs at about the same transport velocity.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Study 2. Phase plots of size-normalized aperture vs. transport velocity for each collider size for a representative participant. Diamonds and circles indicate size-normalized peak aperture and peak transport velocity, respectively. Black arrows indicate the progression of reach-to-grasp movement.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g005.tif"/>
</fig>
<p>An rm-ANOVA revealed a significant main effect of collider size on movement time (<italic>F</italic>4,36&#x20;<inline-formula id="inf92">
<mml:math id="m92">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 2.87, <italic>p</italic> <inline-formula id="inf93">
<mml:math id="m93">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.030, <xref ref-type="table" rid="T3">Table&#x20;3</xref>). However, Bonferroni&#x2019;s post hoc tests failed to identify any pairwise differences for different collider sizes (<italic>p</italic> <inline-formula id="inf94">
<mml:math id="m94">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05, <xref ref-type="fig" rid="F6">Figure&#x20;6A</xref>). As expected, movement time differed across objects placed at different distances (<italic>F</italic>1.1,10&#x20;<inline-formula id="inf95">
<mml:math id="m95">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 59.70, <italic>p</italic> <inline-formula id="inf96">
<mml:math id="m96">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001). Bonferroni&#x2019;s post hoc tests revealed that movement time was larger for a more distantly placed object (middle vs. near: 49&#x20;<inline-formula id="inf97">
<mml:math id="m97">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 9&#xa0;ms, <italic>p</italic> <inline-formula id="inf98">
<mml:math id="m98">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 97&#x20;<inline-formula id="inf99">
<mml:math id="m99">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 9&#xa0;ms, <italic>p</italic> <inline-formula id="inf100">
<mml:math id="m100">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. middle: 48&#x20;<inline-formula id="inf101">
<mml:math id="m101">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 9&#xa0;ms, <italic>p</italic> <inline-formula id="inf102">
<mml:math id="m102">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.004). Neither the main effect of object size nor any interaction effects of collider size, object distance, and object size were significant (<italic>p</italic> <inline-formula id="inf103">
<mml:math id="m103">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.05).</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Outcomes of 5 &#xd7; 3 &#xd7; 3&#x20;rm-ANOVAs examining the effects of collider size (0.2, 0.4, 0.8, 1.2, and 1.4), object size (small, medium, and large), and object distance (near, middle, and far) on each kinematic variable in Study 2.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Variables</th>
<th align="center">Collider size (CS)</th>
<th align="center">Object size (OS)</th>
<th align="center">Object distance (OD)</th>
<th align="center">Interactions</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="3" align="left">MT (ms)</td>
<td align="center">
<italic>F</italic>
<sub>4,36</sub> &#x3d; 2.87</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>1.1,10</sub> &#x3d; 59.70</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.030</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf174">
<mml:math id="m174">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.24</td>
<td align="center">
<inline-formula id="inf175">
<mml:math id="m175">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.87</td>
</tr>
<tr>
<td rowspan="3" align="left">PA (cm)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>
<italic>1.1,10.4</italic>
</sub> &#x3d; 183.04</td>
<td rowspan="3" align="center">NS</td>
<td align="center">OS&#xd7;OD, F<sub>4,36</sub> &#x3d; 9.19</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf176">
<mml:math id="m176">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.95</td>
<td align="center">
<inline-formula id="inf177">
<mml:math id="m177">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.5</td>
</tr>
<tr>
<td rowspan="3" align="left">SN-PA (a.u.)</td>
<td align="center">
<italic>F</italic>
<sub>4,36</sub> &#x3d; 4.42</td>
<td align="center">
<italic>F</italic>
<sub>1.3,115</sub> &#x3d; 64.60</td>
<td rowspan="3" align="center">NS</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3d; 0.005</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.005</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf178">
<mml:math id="m178">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.33</td>
<td align="center">
<inline-formula id="inf179">
<mml:math id="m179">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.88</td>
</tr>
<tr>
<td rowspan="3" align="left">PV (cm/s)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 11.76</td>
<td align="center">
<italic>F</italic>
<sub>1.1,9.7</sub> &#x3d; 227.51</td>
<td align="center">OS&#xd7;OD, <italic>F</italic>
<sub>4,36</sub> &#x3d; 5.35</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf180">
<mml:math id="m180">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.57</td>
<td align="center">
<inline-formula id="inf181">
<mml:math id="m181">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.96</td>
<td align="center">
<inline-formula id="inf182">
<mml:math id="m182">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.37</td>
</tr>
<tr>
<td rowspan="3" align="left">T-PV (ms)</td>
<td align="center">F<sub>4,36</sub> &#x3d; 4.57</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 31.77</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3d; 0.004</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf183">
<mml:math id="m183">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.34</td>
<td align="center">
<inline-formula id="inf184">
<mml:math id="m184">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.78</td>
</tr>
<tr>
<td rowspan="3" align="left">TV-CO (cm/s)</td>
<td rowspan="3" align="center">NS</td>
<td align="center">
<italic>F</italic>
<sub>2,18</sub> &#x3d; 38.12</td>
<td align="center">
<italic>F</italic>
<sub>1.1,10.2</sub> &#x3d; 14.42</td>
<td rowspan="3" align="center">NS</td>
</tr>
<tr>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.001</td>
<td align="center">
<italic>p</italic>&#x20;&#x3c; 0.002</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf185">
<mml:math id="m185">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.81</td>
<td align="center">
<inline-formula id="inf186">
<mml:math id="m186">
<mml:mrow>
<mml:msup>
<mml:mi>&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.61</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>MT: movement time, PA: peak aperture, SN-PA: size-normalized peak aperture, PV: peak transport velocity, T-PV: time to peak transport velocity, and TV-CO: transport velocity at closure onset. NS: not significant.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Study 2. Effects of collider size on <bold>(A)</bold> movement time, <bold>(B)</bold> peak aperture, and <bold>(C)</bold> size-normalized peak aperture. Error bars indicate <inline-formula id="inf104">
<mml:math id="m104">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula>1<italic>SEM</italic> (<italic>n</italic>&#x20;&#x3d; 10). Data calculated across all levels of object size and object distance for each participant.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g006.tif"/>
</fig>
<p>Neither collider size nor object distance affected peak aperture (<italic>p</italic> <inline-formula id="inf105">
<mml:math id="m105">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="fig" rid="F6">Figure&#x20;6B</xref>). As expected, aperture differed across objects of different sizes (<italic>F</italic>1.1, 10.4&#x20;<inline-formula id="inf106">
<mml:math id="m106">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 183.04, <italic>p</italic> <inline-formula id="inf107">
<mml:math id="m107">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001). Bonferroni&#x2019;s post hoc tests revealed that peak aperture was larger for a larger object (medium vs. small: 1.2&#x20;<inline-formula id="inf108">
<mml:math id="m108">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula>0.1&#xa0;cm, <italic>p</italic> <inline-formula id="inf109">
<mml:math id="m109">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. small: 2.5&#x20;<inline-formula id="inf110">
<mml:math id="m110">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.1&#xa0;cm, <italic>p</italic> <inline-formula id="inf111">
<mml:math id="m111">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. medium: 1.3&#x20;<inline-formula id="inf112">
<mml:math id="m112">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.1&#xa0;ms, <italic>p</italic> <inline-formula id="inf113">
<mml:math id="m113">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001) confirming that the grasp was scaled to object size. None of the interaction effects of collider size, object distance, and object size on peak aperture was significant (<italic>p</italic>&#x20;&#x3e;&#x20;0.05).</p>
<p>Size-normalized peak aperture differed across collider sizes (<italic>F</italic>4, 36&#x20;<inline-formula id="inf114">
<mml:math id="m114">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 4.42, <italic>p</italic> <inline-formula id="inf115">
<mml:math id="m115">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.005). Bonferroni&#x2019;s post hoc tests revealed that size-normalized peak aperture was smaller for a larger collider (1.4 vs. 0.2&#xa0;cm colliders: &#x2212;0.6&#x20;<inline-formula id="inf116">
<mml:math id="m116">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic> <inline-formula id="inf117">
<mml:math id="m117">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.012; 1.4 vs. 0.4&#xa0;cm colliders: &#x2212;0.5&#x20;<inline-formula id="inf118">
<mml:math id="m118">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic> <inline-formula id="inf119">
<mml:math id="m119">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.043; 1.4 vs. 0.8&#xa0;cm colliders: &#x2212;0.5&#x20;<inline-formula id="inf120">
<mml:math id="m120">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic> <inline-formula id="inf121">
<mml:math id="m121">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.045, <xref ref-type="fig" rid="F6">Figure&#x20;6C</xref>). Size-normalized peak aperture also differed across objects of different sizes (<italic>F</italic>4, 36&#x20;<inline-formula id="inf122">
<mml:math id="m122">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 64.60, <italic>p</italic> <inline-formula id="inf123">
<mml:math id="m123">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.005). Bonferroni&#x2019;s post hoc tests revealed that, as opposed to peak aperture, size-normalized peak aperture was larger for a smaller object (medium vs. small: &#x2212;1.5&#x20;<inline-formula id="inf125">
<mml:math id="m125">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic>&#x20;<inline-formula id="inf126">
<mml:math id="m126">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. small: &#x2212;2.1&#x20;<inline-formula id="inf127">
<mml:math id="m127">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic> <inline-formula id="inf128">
<mml:math id="m128">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; large vs. medium: &#x2212;0.6&#x20;<inline-formula id="inf129">
<mml:math id="m129">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.2, <italic>p</italic>&#x20;<inline-formula id="inf130">
<mml:math id="m130">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.013), confirming that the grasp was scaled to object size. Neither the main effect of object distance nor any of the interaction effects of collider size, object distance, and object size on size-normalized peak aperture was significant (<italic>p</italic> <inline-formula id="inf131">
<mml:math id="m131">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula>&#x20;0.05; <xref ref-type="table" rid="T3">Table 3</xref>).</p>
<p>The only significant main effect of collider size was observed on time to peak transport velocity (<italic>F</italic>4, 36&#x20;<inline-formula id="inf132">
<mml:math id="m132">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 4.57, <italic>p</italic> <inline-formula id="inf133">
<mml:math id="m133">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.004). Bonferroni&#x2019;s post hoc tests revealed that time to peak transport velocity was larger for a larger collider (0.8 vs. 0.4&#xa0;cm colliders: 16&#x20;<inline-formula id="inf134">
<mml:math id="m134">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 4&#xa0;ms, <italic>p</italic> <inline-formula id="inf135">
<mml:math id="m135">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.007; 1.2 vs. 0.4&#xa0;cm colliders: 14&#x20;<inline-formula id="inf136">
<mml:math id="m136">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 4&#xa0;ms, <italic>p</italic> <inline-formula id="inf137">
<mml:math id="m137">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.027; 1.6 vs. 0.4&#xa0;cm colliders: 15&#x20;<inline-formula id="inf138">
<mml:math id="m138">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 4&#xa0;ms, <italic>p</italic> <inline-formula id="inf139">
<mml:math id="m139">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.015). With respect to these variables, peak transport velocity (<italic>F</italic>1.1,9.7&#x20;<inline-formula id="inf140">
<mml:math id="m140">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 227.51, <italic>p</italic> <inline-formula id="inf141">
<mml:math id="m141">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), time to peak transport velocity (<italic>F</italic>2, 18&#x20;<inline-formula id="inf142">
<mml:math id="m142">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 31.77, <italic>p</italic> <inline-formula id="inf143">
<mml:math id="m143">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), and transport velocity at CO (<italic>F</italic>1.1, 10.2&#x20;<inline-formula id="inf144">
<mml:math id="m144">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 14.42, <italic>p</italic> <inline-formula id="inf145">
<mml:math id="m145">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001) differed across objects placed at different distances. Bonferroni&#x2019;s post hoc tests revealed that the values were larger for a more distant object for peak transport velocity (middle vs. near: 12.5&#x20;<inline-formula id="inf146">
<mml:math id="m146">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf147">
<mml:math id="m147">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 24.1&#x20;<inline-formula id="inf148">
<mml:math id="m148">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf149">
<mml:math id="m149">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. middle: 11.6&#x20;<inline-formula id="inf150">
<mml:math id="m150">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.1&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf151">
<mml:math id="m151">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001), time to peak transport velocity (middle vs. near: 15&#x20;<inline-formula id="inf152">
<mml:math id="m152">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 3&#xa0;ms, <italic>p</italic> <inline-formula id="inf153">
<mml:math id="m153">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; far vs. near: 23&#x20;<inline-formula id="inf154">
<mml:math id="m154">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 3&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf155">
<mml:math id="m155">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; middle vs. near: 8&#x20;<inline-formula id="inf156">
<mml:math id="m156">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 3&#xa0;ms, <italic>p</italic> <inline-formula id="inf157">
<mml:math id="m157">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.044), and transport velocity at CO (middle vs. near: 4.8&#x20;<inline-formula id="inf158">
<mml:math id="m158">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.5&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf159">
<mml:math id="m159">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.013; far vs. near: 7.9&#x20;<inline-formula id="inf160">
<mml:math id="m160">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.5&#xa0;cm/s, <italic>p</italic> <inline-formula id="inf161">
<mml:math id="m161">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001).</p>
<p>To investigate whether reach-to-grasp coordination was influenced by collider size, LMEs were performed to test the relationship between time to peak transport velocity and time to peak aperture and between closure transport velocity at CO and closure distance and how it was influenced by collider size. Time to peak aperture increased with time to peak transport velocity (<italic>B</italic> <inline-formula id="inf162">
<mml:math id="m162">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.83&#x20;<inline-formula id="inf163">
<mml:math id="m163">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.11, <italic>t</italic> <inline-formula id="inf164">
<mml:math id="m164">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 7.34, <italic>p</italic> <inline-formula id="inf165">
<mml:math id="m165">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; <xref ref-type="fig" rid="F7">Figure&#x20;7C</xref>). The observed increase in time to peak aperture with an increase in time to peak transport velocity was not affected by collider size (<italic>p</italic> <inline-formula id="inf166">
<mml:math id="m166">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T4">Table&#x20;4</xref>). Likewise, closure distance increased with transport velocity at CO (<italic>B</italic> <inline-formula id="inf167">
<mml:math id="m167">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 0.17&#x20;<inline-formula id="inf168">
<mml:math id="m168">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 0.0061, <italic>t</italic> <inline-formula id="inf169">
<mml:math id="m169">
<mml:mo>&#x3d;</mml:mo>
</mml:math>
</inline-formula> 27.37, <italic>p</italic> <inline-formula id="inf170">
<mml:math id="m170">
<mml:mo>&#x3c;</mml:mo>
</mml:math>
</inline-formula> 0.001; <xref ref-type="fig" rid="F7">Figure&#x20;7D</xref>). The observed increase in closure distance with an increase in transport velocity at CO was not affected by collider size (<italic>p</italic> <inline-formula id="inf171">
<mml:math id="m171">
<mml:mo>&#x3e;</mml:mo>
</mml:math>
</inline-formula> 0.05; <xref ref-type="table" rid="T4">Table&#x20;4</xref>). Together, these results indicate that collider size did not affect the coordination between the transport and aperture components, including the initiation of closure based on the state estimate of transport velocity.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Effects of sensory feedback (Study 1, <bold>A</bold> &#x26; <bold>B</bold>) and collider size (Study 2, <bold>C</bold> &#x26; <bold>D</bold>) on reach-to-grasp coordination. <bold>(A</bold>,<bold>C)</bold> Temporal coordination: time to peak transport velocity vs. time to peak aperture. <bold>(B</bold>,<bold>D)</bold> Spatial coordination: transport velocity at CO vs. closure distance. Manipulation of sensory feedback and collider size did not alter reach-to-grasp coordination, indicating that the state-of-the-art hf-VE can support stable reach-to-grasp movement coordination patterns.</p>
</caption>
<graphic xlink:href="frvir-02-648529-g007.tif"/>
</fig>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Summary of LME models in Study 2.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Effects</th>
<th align="center">
<italic>B</italic>
</th>
<th align="center">
<inline-formula id="inf172">
<mml:math id="m172">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula>1<italic>SE</italic>
</th>
<th align="center">
<italic>t</italic>
</th>
<th align="center">
<italic>p</italic>
</th>
<th align="center">
<inline-formula id="inf173">
<mml:math id="m173">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">&#x3b7;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Time to peak aperture</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">&#x2003;Intercept</td>
<td align="center">254.08</td>
<td align="center">39.69</td>
<td align="center">6.40</td>
<td align="center">0.0000</td>
<td align="center">&#x2013;</td>
</tr>
<tr>
<td align="left">&#x2003;TPV</td>
<td align="center">0.83</td>
<td align="center">0.11</td>
<td align="center">7.34</td>
<td align="center">0.0000</td>
<td align="center">0.10</td>
</tr>
<tr>
<td align="left">&#x2003;Collider size</td>
<td align="center">&#x2212;35.93</td>
<td align="center">26.48</td>
<td align="center">&#x2212;1.36</td>
<td align="center">0.1760</td>
<td align="center">0.001</td>
</tr>
<tr>
<td align="left">&#x2003;TPV &#xd7; collider size</td>
<td align="center">0.12</td>
<td align="center">0.09</td>
<td align="center">1.36</td>
<td align="center">0.1750</td>
<td align="center">0.001</td>
</tr>
<tr>
<td align="left">Closure distance</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;Intercept</td>
<td align="center">&#x2212;1.10</td>
<td align="center">0.43</td>
<td align="center">&#x2212;2.58</td>
<td align="center">0.0168</td>
<td align="center">&#x2013;</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;TV-CO</td>
<td align="center">0.17</td>
<td align="center">0.01</td>
<td align="center">27.37</td>
<td align="center">0.0000</td>
<td align="center">0.62</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;Collider size</td>
<td align="center">&#x2212;0.06</td>
<td align="center">0.22</td>
<td align="center">&#x2212;0.28</td>
<td align="center">0.7792</td>
<td align="center">0.002</td>
</tr>
<tr>
<td align="left">&#xa0;&#xa0;TV-CO &#xd7; collider size</td>
<td align="center">0.00</td>
<td align="center">0.01</td>
<td align="center">&#x2212;0.94</td>
<td align="center">0.3492</td>
<td align="center">0.0002</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In summary, these results further confirm the known effects of object size and object distance on variables related to the aperture and transport components, respectively (<xref ref-type="bibr" rid="B49">Paulignan et&#x20;al., 1991a</xref>; <xref ref-type="bibr" rid="B48">Paulignan et&#x20;al., 1991b</xref>). Most importantly, we show that collider size also affects properties of the grasp relative to the object, specifically, a larger collider prompts a proportionally small aperture. Nonetheless, it appears that collider size has no bearing on reach-to-grasp coordination.</p>
</sec>
</sec>
<sec id="s4">
<title>4 Discussion</title>
<p>We investigated the effects of sensory feedback mode (Study 1) and collider size (Study 2) on the coordination of reach-to-grasp movements in hf-VE. Contrary to our expectation (H1), we found that visual, auditory, and audiovisual feedback did not differentially impact key features of reach-to-grasp kinematics in the absence of terminal haptic feedback. In Study 2, larger colliders led to a smaller size-normalized peak aperture (H2) suggesting a possible influence of spherical collider size on the perception of virtual object size and motor planning of reach-to-grasp. Critically, reach-to-grasp spatiotemporal coordination patterns were robust to manipulations of sensory modality and for haptic sensory substitution and spherical collider&#x20;size.</p>
<sec id="s4-1">
<title>4.1 Manipulations of Sensory Substitution</title>
<p>In Study 1, we did not observe any changes in the transport and aperture kinematics or in the reach-to-grasp coordination, as a function due to the type of sensory substitution that was provided (visual, auditory, or audiovisual) to indicate that the object had been grasped in the absence of haptic feedback about object properties. Our data did confirm the known effects of object size and object distance on variables related to the aperture and transport components, respectively (<xref ref-type="bibr" rid="B49">Paulignan et&#x20;al., 1991a</xref>; <xref ref-type="bibr" rid="B48">Paulignan et&#x20;al., 1991b</xref>), indicating that variation in reach-to-grasp patterns with respect to object properties in our hf-VE is comparable to that found in the real world as previously indicated in <xref ref-type="bibr" rid="B22">Furmanek et&#x20;al. (2019)</xref>. While many studies have explored the role of sensory substitution of haptic feedback in VR (<xref ref-type="bibr" rid="B60">Sikstr&#xf6;m et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B16">Cooper et&#x20;al., 2018</xref>), few studies have investigated the effect of sensory substitution for haptic feedback, specifically in the context of reach-to-grasp movements. One study that used simple spherical colliders for grasping reported faster movement time when sensory substitution for haptic feedback was provided with audio and audiovisual cues compared to visual or absent cues that the object was grasped (<xref ref-type="bibr" rid="B74">Zahariev and MacKenzie, 2007</xref>). Our findings that there were no differences in movement kinematics for different types of haptic sensory substitution conditions do not support these past findings, though differences in the outcomes may be explained, in part, by the VR technology utilized. For example, in <xref ref-type="bibr" rid="B74">Zahariev and MacKenzie (2007)</xref>, participants grasped mirror reflections of computer-generated projections of objects. Such setups have lower fidelity of object rendering than what is typical of HMD-VR and might result in greater salience to auditory feedback. In a more recent study using HMD-VR, participants performed reach-to-grasp movements as part of a pick and place task in less time with visual compared to auditory sensory substitution but interestingly indicated a preference for auditory cues that the object was grasped (<xref ref-type="bibr" rid="B12">Canales and J&#xf6;rg, 2020</xref>). Notably, differences between audio, visual, and audiovisual feedback were small, and since reach-to-grasp kinematics were not presented, interpretations as to why the movements were slower with audio feedback were not possible to make. In an immersive hf-VE like ours, participants might not have had to rely on one sensory modality over the other and hence did not show differences in reach-to-grasp coordination based on visual, auditory, and audiovisual feedback. Furthermore, the fact that we did not observe differences in movement kinematics and spatiotemporal reach-to-grasp coordination (<xref ref-type="fig" rid="F7">Figures 7A,B</xref>) suggests that, in a high-fidelity VR environment, the choice of modality for sensory substitution for haptic feedback may have relatively little bearing on behavior. We speculate that, with high-fidelity feedback of the hand-object interaction, visual feedback of the hand-object collision, rather than explicit feedback in the form of overt sensory substitution, may govern behavior.</p>
<p>The finding that visual information may be sufficient for haptic-free grasping is in agreement with the interesting line of research using a haptic-free robotic system. For instance, Meccariello and others (<xref ref-type="bibr" rid="B40">Meccariello et&#x20;al., 2016</xref>) showed that experienced surgeons perform conventional suturing faster and more accurately than nonexperts when only visual information was used. It has been proposed that experienced surgeons may create a perception of haptic feedback during haptic-free robotic surgery based on visual information and previously learned haptic sensations (<xref ref-type="bibr" rid="B26">Hagen et&#x20;al., 2008</xref>). This suggests that haptic feedback may be needed during skill acquisition, but not necessary for practiced movement.</p>
<p>Another parsimonious explanation for why we did not observe between-condition differences of sensory feedback type on grasp kinematics is related to the study design. As opposed to <xref ref-type="bibr" rid="B74">Zahariev and Mackenzie (2007)</xref> and <xref ref-type="bibr" rid="B72">Zahariev and Mackenzie (2008)</xref>, who randomized the order of object size trials, our participants performed reach-to-grasp actions to each object in a blocked manner (i.e.,&#x20;all trials for each object size-distance pair were completed consecutively within each block). Thus, in our study, subjects&#x2019; prior experience&#x2014;specifically, the proprioceptively perceived final aperture&#x2014;might have made reliance on explicit feedback of grasp less necessary. Indeed, the calibration of the current reach-to-grasp movement based on past movements is well documented (<xref ref-type="bibr" rid="B23">Gentilucci et&#x20;al., 1995</xref>; <xref ref-type="bibr" rid="B57">S&#xe4;fstr&#xf6;m and Edin, 2004</xref>; <xref ref-type="bibr" rid="B56">S&#xe4;fstr&#xf6;m and Edin, 2005</xref>; <xref ref-type="bibr" rid="B7">Bingham et&#x20;al., 2007</xref>; <xref ref-type="bibr" rid="B42">Mon-Williams and Bingham, 2007</xref>; <xref ref-type="bibr" rid="B15">Coats et&#x20;al., 2008</xref>; <xref ref-type="bibr" rid="B55">S&#xe4;fstr&#xf6;m and Edin, 2008</xref>; <xref ref-type="bibr" rid="B21">Foster et&#x20;al., 2011</xref>). Finally, the availability of continuous online feedback of the target object and colliders might have also reduced reliance on sensory feedback (<xref ref-type="bibr" rid="B74">Zahariev and MacKenzie, 2007</xref>; <xref ref-type="bibr" rid="B72">Zahariev and Mackenzie, 2008</xref>; <xref ref-type="bibr" rid="B67">Volcic and Domini, 2014</xref>). The present study was not designed to test such a hypothesis, but future work can explicitly investigate whether reliance on different modalities of terminal sensory feedback may be stronger in a randomized design, when anticipation and planning are less dependable.</p>
</sec>
<sec id="s4-2">
<title>4.2 Manipulations of Collider Size</title>
<p>In Study 2, there was a significant main effect of collider size for movement time, time to peak transport velocity, and size-normalized peak aperture indicating that collider size modified key features of the reach-to-grasp movement. It is likely that the collider size altered the perception of object size, an object might be perceived to be smaller when using a larger collider, and that this altered perception might have affected the planning of reach-to-grasp movements. Indeed, previous studies have shown that the hand avatar may act as a metric to scale the intrinsic object properties (e.g., object size) (<xref ref-type="bibr" rid="B34">Linkenauger et&#x20;al., 2011</xref>; <xref ref-type="bibr" rid="B33">Linkenauger et&#x20;al., 2013</xref>; <xref ref-type="bibr" rid="B45">Ogawa et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B46">Ogawa et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B32">Lin et&#x20;al., 2019</xref>). Interestingly, <xref ref-type="bibr" rid="B45">Ogawa et&#x20;al. (2017)</xref> found that perception of object size was affected by the realism of the avatar, with a biological avatar showing a greater effect on object size perception than an abstract avatar such as what was used in our study. However, in that study participants did not grasp the object; the task was simply to carry the virtual cube object on an open avatar palm. It may therefore be concluded that the effect of avatar size on perception is likely mediated by the requirements of the task, and the use of avatar size as a means to scale the dimension of the intrinsic object properties is more sensitive when the avatar is used to actually grasp the object. One caveat to our finding is that a collider size by object size interaction was not observed. If collider size caused a linear scaling of the perception of object size, then a collider size by object size interaction would be expected as the change in the ratio of collider size to object size will be different for different object sizes. Hand size manipulations do not affect the perceived size of objects that are too big to be grasped, suggesting that hand size may only be used as a scaling mechanism when the object affords the relevant action, in this case, grasping (<xref ref-type="bibr" rid="B34">Linkenauger et&#x20;al., 2011</xref>), providing further evidence of nonlinearities in the use of the hand avatar as a &#x201c;perceptual ruler.&#x201d; Therefore, our findings indicate that either the scaling of perception of object size by collider size is nonlinear or the changes we observed arise from different explicit strategies for different colliders independent of perception. Future research will test these competing hypotheses.</p>
<p>Assuming that collider size did in fact influence the perception of object size, it follows that the size of the colliders might have had a similar effect on altering the perceptual scaling of object distance. This interpretation provides a possible explanation for the significant main effect of collider size on time to peak transport velocity. However, given that the ratio of collider size to object distance was much smaller than the ratio of collider size to object size, we think that perceptual effects on distance were probably negligible, at least relative to the perceptual effects on object size. We therefore offer an alternative explanation for the scaling of peak transport velocity and associated movement time, with different collider sizes. If collider size affected the planning of aperture overshoot, as evidenced by the main effect of size-normalized peak aperture, then we may assume that this was also incorporated into the planning of transport to maintain the spatiotemporal coordination of reach-to-grasp. Our data indicate that this may be the case, as both temporal (the relationship between time to peak transport velocity and time to peak aperture) and spatial (the relationship between transport velocity at CO and closure distance) aspects of coordination were not influenced by collider size (<xref ref-type="fig" rid="F7">Figures&#x20;7C,D</xref>).</p>
<p>Agnostic to whether the effects of the colliders on aperture profiles were perceptual or strategic, we surmise that these effects were present at the beginning of the movement to ensure that the coordination of the reach and grasp component was not disrupted. Preservation of reach-to-grasp coordination as the primary goal of reach-to-grasp movements is something we have observed in our previous work (<xref ref-type="bibr" rid="B22">Furmanek et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Mangalam et&#x20;al., 2021</xref>). The block nature of our design likely facilitated the described effect on planning; however, we do not believe that proprioceptive memory had a large influence on the effects observed in Study 2. If proprioceptive memory did influence behavior, we can assume that it would be equal across all collider sizes and therefore cannot explain behavioral differences across collider sizes. Future research should test whether the observations here hold if object size and distance are randomized.</p>
<p>Our result that larger colliders led to a smaller size-normalized peak aperture can also be framed using the equilibrium point hypothesis (EPH) (<xref ref-type="bibr" rid="B20">Feldman, 1986</xref>). In this framework, the peak aperture at a location near the object may be considered a key point in the referent trajectory driving the limb and finger movements (<xref ref-type="bibr" rid="B69">Weiss and Jeannerod, 1998</xref>). Given the evidence that the referent configuration for a reach-to-grasp action is specified depending on the object shape, localization, and orientation to define a position-dimensional variable, threshold muscle length (<xref ref-type="bibr" rid="B71">Yang and Feldman, 2010</xref>), it is possible that collider size may also influence the referent configuration. One possibility is that collider size may influence the perceived force needed to grasp the object (<xref ref-type="bibr" rid="B50">Pilon et&#x20;al., 2007</xref>) despite the virtual object having no physical properties. Future studies may be specifically designed to test this hypothesis for hf-VE.</p>
</sec>
<sec id="s4-3">
<title>4.3 Limitations</title>
<p>Our studies had several limitations. Data were collected from only ten participants limiting the generalization of our findings and potentially exposing us to type 2 error if a certain outcome measure effect size is small. The sample involved only three female participants making it difficult to understand if there may be sex-dependent differences in reach-to-grasp performance, particularly in light of recent evidence that VR may be experienced differently between male and female participants (<xref ref-type="bibr" rid="B43">Munafo et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B18">Curry et&#x20;al., 2020</xref>). We used a simple hand avatar rendering of spheres to represent only the tips of the thumb and index finger, and the results of this study may not extrapolate to more anthropomorphic avatars. Our VE was simple comprising only the table, object to be grasped, and hand avatar. Use of the hand avatar as a &#x201c;perceptual ruler&#x201d; for objects in the scene may be different for richer environments, especially for those comprising objects with strong connotations of their size (e.g., a soda can). Finally, the degree of stereopsis, presence, and immersion and symptoms of cybersickness were not recorded, and therefore, the influence of these factors on individual participant behavior is unknown.</p>
</sec>
</sec>
<sec id="s5">
<title>5 Concluding Remarks</title>
<p>The results of our studies together suggest that spatiotemporal coordination of reach-to-grasp in a high-fidelity immersive hf-VE is robust to the type of modality (e.g., visual/auditory) used as a sensory substitute for the absence of haptic feedback and to the size of the avatar that represents the fingertips. Avatar size may modify the magnitude of peak aperture in hf-VE when using spheres to represent the fingertips, but this change did not affect spatiotemporal coordination between reach and grasp components of the movement. We suggest that the modulation of aperture associated with avatar size may be rooted in the use of the avatar as a &#x201c;perceptual ruler&#x201d; for intrinsic properties of virtual objects. These results have implications for commercial and clinical use of hf-VE and should be evaluated in relation to technological limitations of the VR system (i.e.,&#x20;tracking accuracy, update rate, and display latency) (<xref ref-type="bibr" rid="B62">Stanney, 2002</xref>). Specifically, when VR is used for manual skill training or neurorehabilitation (<xref ref-type="bibr" rid="B2">Adamovich et&#x20;al., 2005</xref>; <xref ref-type="bibr" rid="B1">Adamovich et&#x20;al., 2009</xref>; <xref ref-type="bibr" rid="B39">Massetti et&#x20;al., 2018</xref>), future work should consider the implications of avatar size on the transfer of learning from the VE to the real world especially in populations with deficits in multisensory integration.</p>
</sec>
</body>
<back>
<sec id="s6">
<title>Data Availability Statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation. Please contact the corresponding author by e-mail.</p>
</sec>
<sec id="s7">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by the Institutional Review Board (IRB) at Northeastern University. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>MF, MM, MY, and ET conceived and designed research; MF and AS performed experiments; MF, MM, KL, AS, and MY analyzed data; MF, MM, KL, MY, and ET interpreted results of experiments; MF prepared figures; MF, MY, and ET drafted manuscript; MF, MM, KL, AS, MY, and ET edited and revised manuscript; MF, MM, KL, AS, MY, and ET approved the final version of the manuscript.</p>
</sec>
<sec id="s9">
<title>Funding</title>
<p>This work was supported in part by NIH-2R01NS085122 (ET), NIH-2R01HD058301 (ET), NSF-CBET-1804540 (ET), NSF-CBET-1804550 (ET), and NSF-CMMI-M3X-1935337 (ET,&#x20;MY).</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ack>
<p>The authors thank Alex Huntoon and Samuel Berin for programming the VR platform used for this&#x20;study.</p>
</ack>
<sec id="s12">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frvir.2021.648529/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frvir.2021.648529/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Table1.DOCX" id="SM1" mimetype="application/DOCX" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adamovich</surname>
<given-names>S. V.</given-names>
</name>
<name>
<surname>Fluet</surname>
<given-names>G. G.</given-names>
</name>
<name>
<surname>Tunik</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Merians</surname>
<given-names>A. S.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Sensorimotor Training in Virtual Reality: a Review</article-title>. <source>Nre</source> <volume>25</volume>, <fpage>29</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.3233/nre-2009-0497</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adamovich</surname>
<given-names>S. V.</given-names>
</name>
<name>
<surname>Merians</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Boian</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lewis</surname>
<given-names>J.&#x20;A.</given-names>
</name>
<name>
<surname>Tremaine</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Burdea</surname>
<given-names>G. S.</given-names>
</name>
<etal/>
</person-group> (<year>2005</year>). <article-title>A Virtual Reality-Based Exercise System for Hand Rehabilitation Post-Stroke</article-title>. <source>Presence: Teleoperators &#x26; Virtual Environments</source> <volume>14</volume>, <fpage>161</fpage>&#x2013;<lpage>174</lpage>. <pub-id pub-id-type="doi">10.1162/1054746053966996</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Argelaguet</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Hoyet</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Trico</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>L&#xe9;cuyer</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The Role of Interaction in Virtual Embodiment: Effects of the Virtual Hand Representation</article-title>, <conf-name>2016 IEEE Virtual Reality (VR) (IEEE), 19-23 March 2016, Greenville, SC, USA</conf-name>, <fpage>3</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1109/vr.2016.7504682</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barrett</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Side Effects of Virtual Environments: A Review of the Literature. Command and control division information sciences laboratory. Defense science and technology organization Canberra, Australia, May 2004</article-title>. </citation>
</ref>
<ref id="B5">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Bates</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>M&#xe4;chler</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bolker</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Walker</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Fitting linear mixed-effects models using Lme4</article-title>. <comment>
<italic>arXiv preprint</italic> <ext-link ext-link-type="uri" xlink:href="http://arXiv:1406.5823">arXiv:1406.5823</ext-link>
</comment>. </citation>
</ref>
<ref id="B6">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ben-Shachar</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Makowski</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>L&#xfc;decke</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Package &#x2018;effectsize&#x2019;</source>.</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bingham</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Coats</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Mon-Williams</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Natural Prehension in Trials without Haptic Feedback but Only when Calibration Is Allowed</article-title>. <source>Neuropsychologia</source> <volume>45</volume>, <fpage>288</fpage>&#x2013;<lpage>294</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2006.07.011</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bingham</surname>
<given-names>G. P.</given-names>
</name>
<name>
<surname>Mon-Williams</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>The Dynamics of Sensorimotor Calibration in Reaching-To-Grasp Movements</article-title>. <source>J.&#x20;Neurophysiol.</source> <volume>110</volume>, <fpage>2857</fpage>&#x2013;<lpage>2862</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00112.2013</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bozzacchi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Brenner</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Smeets</surname>
<given-names>J.&#x20;B.</given-names>
</name>
<name>
<surname>Volcic</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>How Removing Visual Information Affects Grasping Movements</article-title>. <source>Exp. Brain Res.</source> <volume>236</volume>, <fpage>985</fpage>&#x2013;<lpage>995</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-018-5186-6</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bozzacchi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Volcic</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Effect of Visual and Haptic Feedback on Grasping Movements</article-title>. <source>J.&#x20;Neurophysiol.</source> <volume>112</volume>, <fpage>3189</fpage>&#x2013;<lpage>3196</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00439.2014</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bozzacchi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Volcic</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Grasping in Absence of Feedback: Systematic Biases Endure Extensive Training</article-title>. <source>Exp. Brain Res.</source> <volume>234</volume>, <fpage>255</fpage>&#x2013;<lpage>265</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-015-4456-9</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Canales</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>J&#xf6;rg</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Performance Is Not Everything: Audio Feedback Preferred over Visual Feedback for Grasping Task in Virtual Reality</article-title> MIG &#x0027;20: Motion, Interaction and Games, 16&#x2013;18, October 2020, Virtual Event SC USA. <pub-id pub-id-type="doi">10.1145/3424636.3426897</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Castiello</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Giordano</surname>
<given-names>B. L.</given-names>
</name>
<name>
<surname>Begliomini</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ansuini</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Grassi</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>When Ears Drive Hands: the Influence of Contact Sound on Reaching to Grasp</article-title>. <source>PloS one</source> <volume>5</volume>, <fpage>e12240</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0012240</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Castiello</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>The Neuroscience of Grasping</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>6</volume>, <fpage>726</fpage>&#x2013;<lpage>736</lpage>. <pub-id pub-id-type="doi">10.1038/nrn1744</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coats</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Bingham</surname>
<given-names>G. P.</given-names>
</name>
<name>
<surname>Mon-Williams</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Calibrating Grasp Size and Reach Distance: Interactions Reveal Integral Organization of Reaching-To-Grasp Movements</article-title>. <source>Exp. Brain Res.</source> <volume>189</volume>, <fpage>211</fpage>&#x2013;<lpage>220</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-008-1418-5</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cooper</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Milella</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Pinto</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cant</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>White</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Meyer</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The Effects of Substitute Multisensory Feedback on Task Performance and the Sense of Presence in a Virtual Reality Environment</article-title>. <source>PloS one</source> <volume>13</volume>, <fpage>e0191846</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0191846</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Culham</surname>
<given-names>J.&#x20;C.</given-names>
</name>
<name>
<surname>Cavina-Pratesi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Singhal</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>The Role of Parietal Cortex in Visuomotor Control: what Have We Learned from Neuroimaging?</article-title>. <source>Neuropsychologia</source> <volume>44</volume>, <fpage>2668</fpage>&#x2013;<lpage>2684</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2005.11.003</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Curry</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Peterson</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Stoffregen</surname>
<given-names>T. A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Cybersickness in Virtual Reality Head-Mounted Displays: Examining the Influence of Sex Differences and Vehicle Control</article-title>. <source>Int. J.&#x20;Human-Computer Interaction</source> <volume>36</volume>, <fpage>1161</fpage>&#x2013;<lpage>1167</lpage>. <pub-id pub-id-type="doi">10.1080/10447318.2020.1726108</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dubrowski</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bock</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Carnahan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>J&#xfc;ngling</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>The Coordination of Hand Transport and Grasp Formation during Single- and Double-Perturbed Human Prehension Movements</article-title>. <source>Exp. Brain Res.</source> <volume>145</volume>, <fpage>365</fpage>&#x2013;<lpage>371</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-002-1120-y</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feldman</surname>
<given-names>A. G.</given-names>
</name>
</person-group> (<year>1986</year>). <article-title>Once More on the Equilibrium-Point Hypothesis (&#x3bb; Model) for Motor Control</article-title>. <source>J.&#x20;Mot. Behav.</source> <volume>18</volume>, <fpage>17</fpage>&#x2013;<lpage>54</lpage>. <pub-id pub-id-type="doi">10.1080/00222895.1986.10735369</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Foster</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Fantoni</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Caudek</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Integration of Disparity and Velocity Information for Haptic and Perceptual Judgments of Object Depth</article-title>. <source>Acta Psychologica</source> <volume>136</volume>, <fpage>300</fpage>&#x2013;<lpage>310</lpage>. <pub-id pub-id-type="doi">10.1016/j.actpsy.2010.12.003</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Furmanek</surname>
<given-names>M. P.</given-names>
</name>
<name>
<surname>Schettino</surname>
<given-names>L. F.</given-names>
</name>
<name>
<surname>Yarossi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kirkman</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Adamovich</surname>
<given-names>S. V.</given-names>
</name>
<name>
<surname>Tunik</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Coordination of Reach-To-Grasp in Physical and Haptic-free Virtual Environments</article-title>. <source>J.&#x20;Neuroengineering Rehabil.</source> <volume>16</volume>, <fpage>78</fpage>. <pub-id pub-id-type="doi">10.1186/s12984-019-0525-9</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gentilucci</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Daprati</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Toni</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Chieffi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Saetti</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Unconscious Updating of Grasp Motor Program</article-title>. <source>Exp. Brain Res.</source> <volume>105</volume>, <fpage>291</fpage>&#x2013;<lpage>303</lpage>. <pub-id pub-id-type="doi">10.1007/BF00240965</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gentilucci</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chieffi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Scarpa</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Castiello</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>1992</year>). <article-title>Temporal Coupling between Transport and Grasp Components during Prehension Movements: Effects of Visual Perturbation</article-title>. <source>Behav. Brain Res.</source> <volume>47</volume>, <fpage>71</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/s0166-4328(05)80253-0</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Groen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Werkhoven</surname>
<given-names>P. J.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Visuomotor Adaptation to Virtual Hand Position in Interactive Virtual Environments</article-title>. <source>Presence</source> <volume>7</volume>, <fpage>429</fpage>&#x2013;<lpage>446</lpage>. <pub-id pub-id-type="doi">10.1162/105474698565839</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hagen</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Meehan</surname>
<given-names>J.&#x20;J.</given-names>
</name>
<name>
<surname>Inan</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Morel</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Visual Clues Act as a Substitute for Haptic Feedback in Robotic Surgery</article-title>. <source>Surg. Endosc.</source> <volume>22</volume>, <fpage>1505</fpage>&#x2013;<lpage>1508</lpage>. <pub-id pub-id-type="doi">10.1007/s00464-007-9683-0</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haggard</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wing</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Coordinated Responses Following Mechanical Perturbation of the Arm during Prehension</article-title>. <source>Exp. Brain Res.</source> <volume>102</volume>, <fpage>483</fpage>&#x2013;<lpage>494</lpage>. <pub-id pub-id-type="doi">10.1007/BF00230652</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haggard</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wing</surname>
<given-names>A. M.</given-names>
</name>
</person-group> (<year>1991</year>). <article-title>Remote Responses to Perturbation in Human Prehension</article-title>. <source>Neurosci. Lett.</source> <volume>122</volume>, <fpage>103</fpage>&#x2013;<lpage>108</lpage>. <pub-id pub-id-type="doi">10.1016/0304-3940(91)90204-7</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hosang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Davarpanah Jazi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Heath</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Grasping a 2d Object: Terminal Haptic Feedback Supports an Absolute Visuo-Haptic Calibration</article-title>. <source>Exp. Brain Res.</source> <volume>234</volume>, <fpage>945</fpage>&#x2013;<lpage>954</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-015-4521-4</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jeannerod</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>Intersegmental Coodination during Reaching at Natural Visual Objects</article-title>. In: <source>Long J, Baddeley A (eds) Attention and performance ix. Erlbaum, Hillsdale, NJ</source> <volume>234</volume>, <fpage>153</fpage>&#x2013;<lpage>169</lpage>. </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jeannerod</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1984</year>). <article-title>The Timing of Natural Prehension Movements</article-title>. <source>J.&#x20;Mot. Behav.</source> <volume>16</volume>, <fpage>235</fpage>&#x2013;<lpage>254</lpage>. <pub-id pub-id-type="doi">10.1080/00222895.1984.10735319</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Normovle</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Adkins</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Robb</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion</article-title>. In <conf-name>2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR) (IEEE). 23-27 March 2019, Osaka, Japan</conf-name>, <fpage>510</fpage>&#x2013;<lpage>518</lpage>. <pub-id pub-id-type="doi">10.1109/vr.2019.8797787</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Linkenauger</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Leyrer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>B&#xfc;lthoff</surname>
<given-names>H. H.</given-names>
</name>
<name>
<surname>Mohler</surname>
<given-names>B. J.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Welcome to Wonderland: The Influence of the Size and Shape of a Virtual Hand on the Perceived Size and Shape of Virtual Objects</article-title>. <source>PloS one</source> <volume>8</volume>, <fpage>e68594</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0068594</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Linkenauger</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Witt</surname>
<given-names>J.&#x20;K.</given-names>
</name>
<name>
<surname>Proffitt</surname>
<given-names>D. R.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Taking a Hands-On Approach: Apparent Grasping Ability Scales the Perception of Object Size</article-title>. <source>J.&#x20;Exp. Psychol. Hum. Perception Perform.</source> <volume>37</volume>, <fpage>1432</fpage>&#x2013;<lpage>1441</lpage>. <pub-id pub-id-type="doi">10.1037/a0024248</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>High-fidelity Grasping in Virtual Reality Using a Glove-Based System</article-title>. In <conf-name>2019 International Conference on Robotics and Automation (ICRA) (IEEE). 20-24 May 2019, Montreal, QC, Canada</conf-name>, <fpage>5180</fpage>&#x2013;<lpage>5186</lpage>. <pub-id pub-id-type="doi">10.1109/icra.2019.8794230</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lok</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Naik</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Whitton</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Brooks</surname>
<given-names>F. P.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Effects of Handling Real Objects and Self-Avatar Fidelity on Cognitive Task Performance and Sense of Presence in Virtual Environments</article-title>. <source>Presence: Teleoperators &#x26; Virtual Environments</source> <volume>12</volume>, <fpage>615</fpage>&#x2013;<lpage>628</lpage>. <pub-id pub-id-type="doi">10.1162/105474603322955914</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Luckett</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <source>A Quantitative Evaluation of the Htc Vive for Virtual Reality Research</source>. <comment>Ph.D. thesis</comment>. <publisher-loc>Oxford, MI</publisher-loc>: <publisher-name>The University of Mississippi</publisher-name>.</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mangalam</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yarossi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Furmanek</surname>
<given-names>M. P.</given-names>
</name>
<name>
<surname>Tunik</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Control of Aperture Closure during Reach-To-Grasp Movements in Immersive Haptic-free Virtual Reality</article-title>. <source>Exp. Brain Res.</source> <volume>239</volume>, <fpage>1651</fpage>&#x2013;<lpage>1665</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-021-06079-8</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Massetti</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Da Silva</surname>
<given-names>T. D.</given-names>
</name>
<name>
<surname>Crocetta</surname>
<given-names>T. B.</given-names>
</name>
<name>
<surname>Guarnieri</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>De Freitas</surname>
<given-names>B. L.</given-names>
</name>
<name>
<surname>Bianchi Lopes</surname>
<given-names>P.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>The Clinical Utility of Virtual Reality in Neurorehabilitation: a Systematic Review</article-title>. <source>J.&#x20;Cent. Nerv. Syst. Dis.</source> <volume>10</volume>, <fpage>1179573518813541</fpage>. <pub-id pub-id-type="doi">10.1177/1179573518813541</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Meccariello</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Faedi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>AlGhamdi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Montevecchi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Firinu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zanotti</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>An Experimental Study about Haptic Feedback in Robotic Surgery: May Visual Feedback Substitute Tactile Feedback?</article-title> <source>J.&#x20;Robotic Surg.</source> <volume>10</volume>, <fpage>57</fpage>&#x2013;<lpage>61</lpage>. <pub-id pub-id-type="doi">10.1007/s11701-015-0541-0</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Meulenbroek</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Rosenbaum</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Jansen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Vaughan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Vogt</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Multijoint Grasping Movements</article-title>. <source>Exp. Brain Res.</source> <volume>138</volume>, <fpage>219</fpage>&#x2013;<lpage>234</lpage>. <pub-id pub-id-type="doi">10.1007/s002210100690</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mon-Williams</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bingham</surname>
<given-names>G. P.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Calibrating Reach Distance to Visual Targets</article-title>. <source>J.&#x20;Exp. Psychol. Hum. Perception Perform.</source> <volume>33</volume>, <fpage>645</fpage>&#x2013;<lpage>656</lpage>. <pub-id pub-id-type="doi">10.1037/0096-1523.33.3.645</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Munafo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Diedrick</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Stoffregen</surname>
<given-names>T. A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The Virtual Reality Head-Mounted Display Oculus Rift Induces Motion Sickness and Is Sexist in its Effects</article-title>. <source>Exp. Brain Res.</source> <volume>235</volume>, <fpage>889</fpage>&#x2013;<lpage>901</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-016-4846-7</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Niehorster</surname>
<given-names>D. C.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Lappe</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The Accuracy and Precision of Position and Orientation Tracking in the Htc Vive Virtual Reality System for Scientific Research</article-title>. <source>Iperception</source> <volume>8</volume>, <fpage>2041669517708205</fpage>. <pub-id pub-id-type="doi">10.1177/2041669517708205</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ogawa</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Narumi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hirose</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Factors and Influences of Body Ownership over Virtual Hands</article-title>, <conf-name>International Conference on Human Interface and the Management of Information. July 9-14, 2017, Vancouver, BC, Canada</conf-name>. <publisher-name>Springer</publisher-name>, <fpage>589</fpage>&#x2013;<lpage>597</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-58521-5_46</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ogawa</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Narumi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hirose</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Object Size Perception in Immersive Virtual Reality: Avatar Realism Affects the Way We Perceive</article-title>. In <conf-name>2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR) (IEEE). 18-22 March 2018, Tuebingen/Reutlingen, Germany</conf-name>, <fpage>647</fpage>&#x2013;<lpage>648</lpage>. <pub-id pub-id-type="doi">10.1109/vr.2018.8446318</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Oldfield</surname>
<given-names>R. C.</given-names>
</name>
</person-group> (<year>1971</year>). <article-title>The Assessment and Analysis of Handedness: the edinburgh Inventory</article-title>. <source>Neuropsychologia</source> <volume>9</volume>, <fpage>97</fpage>&#x2013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/0028-3932(71)90067-4</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paulignan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jeannerod</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>MacKenzie</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Marteniuk</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>1991b</year>). <article-title>Selective Perturbation of Visual Input during Prehension Movements. 2. The Effects of Changing Object Size</article-title>. <source>Exp. Brain Res.</source> <volume>87</volume>, <fpage>407</fpage>&#x2013;<lpage>420</lpage>. <pub-id pub-id-type="doi">10.1007/BF00231858</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paulignan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>MacKenzie</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Marteniuk</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Jeannerod</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1991a</year>). <article-title>Selective Perturbation of Visual Input during Prehension Movements. 1. The Effects of Changing Object Position</article-title>. <source>Exp. Brain Res.</source> <volume>83</volume>, <fpage>502</fpage>&#x2013;<lpage>512</lpage>. <pub-id pub-id-type="doi">10.1007/BF00229827</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pilon</surname>
<given-names>J.-F.</given-names>
</name>
<name>
<surname>De Serres</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Feldman</surname>
<given-names>A. G.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Threshold Position Control of Arm Movement with Anticipatory Increase in Grip Force</article-title>. <source>Exp. Brain Res.</source> <volume>181</volume>, <fpage>49</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-007-0901-8</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prachyabrued</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C. W.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Effects and Optimization of Visual-Proprioceptive Discrepancy Reduction for Virtual Grasping</article-title>. In <conf-name>2013 IEEE Symposium on 3D User Interfaces (3DUI) (IEEE). 4-5 March 2012, Costa Mesa, CA, USA</conf-name>, <fpage>11</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1109/3dui.2013.6550190</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prachyabrued</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C. W.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Visual Feedback for Virtual Grasping</article-title>. In <conf-name>2014 IEEE symposium on 3D user interfaces (3DUI) (IEEE). 16-17 March 2013, Orlando, FL, USA</conf-name>, <fpage>19</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1109/3dui.2014.6798835</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prachyabrued</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C. W.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Visual Interpenetration Tradeoffs in Whole-Hand Virtual Grasping</article-title>. In <conf-name>2012 IEEE Symposium on 3D User Interfaces (3DUI) (IEEE). 29-30 March 2014, Minneapolis, MN, USA</conf-name>, <fpage>39</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1109/3dui.2012.6184182</pub-id> </citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rand</surname>
<given-names>M. K.</given-names>
</name>
<name>
<surname>Shimansky</surname>
<given-names>Y. P.</given-names>
</name>
<name>
<surname>Hossain</surname>
<given-names>A. B. M. I.</given-names>
</name>
<name>
<surname>Stelmach</surname>
<given-names>G. E.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Quantitative Model of Transport-Aperture Coordination during Reach-To-Grasp Movements</article-title>. <source>Exp. Brain Res.</source> <volume>188</volume>, <fpage>263</fpage>&#x2013;<lpage>274</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-008-1361-5</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>S&#xe4;fstr&#xf6;m</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Edin</surname>
<given-names>B. B.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Prediction of Object Contact during Grasping</article-title>. <source>Exp. Brain Res.</source> <volume>190</volume>, <fpage>265</fpage>&#x2013;<lpage>277</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-008-1469-7</pub-id> </citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>S&#xe4;fstr&#xf6;m</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Edin</surname>
<given-names>B. B.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Short-term Plasticity of the Visuomotor Map during Grasping Movements in Humans</article-title>. <source>Learn. Mem.</source> <volume>12</volume>, <fpage>67</fpage>&#x2013;<lpage>74</lpage>. <pub-id pub-id-type="doi">10.1101/lm.83005</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>S&#xe4;fstr&#xf6;m</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Edin</surname>
<given-names>B. B.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Task Requirements Influence Sensory Integration during Grasping in Humans</article-title>. <source>Learn. Mem.</source> <volume>11</volume>, <fpage>356</fpage>&#x2013;<lpage>363</lpage>. <pub-id pub-id-type="doi">10.1101/lm.71804</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schettino</surname>
<given-names>L. F.</given-names>
</name>
<name>
<surname>Adamovich</surname>
<given-names>S. V.</given-names>
</name>
<name>
<surname>Tunik</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Coordination of Pincer Grasp and Transport after Mechanical Perturbation of the index finger</article-title>. <source>J.&#x20;Neurophysiol.</source> <volume>117</volume>, <fpage>2292</fpage>&#x2013;<lpage>2297</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00642.2016</pub-id> </citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sedda</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Monaco</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bottini</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Goodale</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Integration of Visual and Auditory Information for Hand Actions: Preliminary Evidence for the Contribution of Natural Sounds to Grasping</article-title>. <source>Exp. Brain Res.</source> <volume>209</volume>, <fpage>365</fpage>&#x2013;<lpage>374</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-011-2559-5</pub-id> </citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sikstr&#xf6;m</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>H&#xf8;eg</surname>
<given-names>E. R.</given-names>
</name>
<name>
<surname>Mangano</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Nilsson</surname>
<given-names>N. C.</given-names>
</name>
<name>
<surname>De G&#xf6;tzen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Serafin</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Shop&#x2019;til You Hear it Drop: Influence of Interactive Auditory Feedback in a Virtual Reality Supermarket</article-title>. In <conf-name>Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology. November 2-4, 2016, Munich Germany</conf-name>. <fpage>355</fpage>&#x2013;<lpage>356</lpage>. </citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sivakumar</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Quinlan</surname>
<given-names>D. J.</given-names>
</name>
<name>
<surname>Stubbs</surname>
<given-names>K. M.</given-names>
</name>
<name>
<surname>Culham</surname>
<given-names>J.&#x20;C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Grasping Performance Depends upon the Richness of Hand Feedback</article-title>. <source>Exp. Brain Res.</source> <volume>239</volume>, <fpage>835</fpage>&#x2013;<lpage>846</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-020-06025-0</pub-id> </citation>
</ref>
<ref id="B62">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Stanney</surname>
<given-names>K. M.</given-names>
</name>
</person-group> (<year>2002</year>). <source>Handbook of Virtual Environments: Design, Implementation, and Applications</source>. <edition>1st Edition</edition>. <publisher-loc>Boca Raton, FL</publisher-loc>: <publisher-name>Lawrence Erlbaum Associates</publisher-name>.</citation>
</ref>
<ref id="B63">
<citation citation-type="book">
<collab>Team, R. C.</collab> (<year>2013</year>). <source>R: A Language and Environment for Statistical Computing</source>.</citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>van Polanen</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Tibold</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Nuruki</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Davare</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Visual Delay Affects Force Scaling and Weight Perception during Object Lifting in Virtual Reality</article-title>. <source>J.&#x20;Neurophysiol.</source> <volume>121</volume>, <fpage>1398</fpage>&#x2013;<lpage>1409</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00396.2018</pub-id> </citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vesia</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Crawford</surname>
<given-names>J.&#x20;D.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Specialization of Reach Function in Human Posterior Parietal Cortex</article-title>. <source>Exp. Brain Res.</source> <volume>221</volume>, <fpage>1</fpage>&#x2013;<lpage>18</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-012-3158-9</pub-id> </citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Volcic</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>On-line Visual Control of Grasping Movements</article-title>. <source>Exp. Brain Res.</source> <volume>234</volume>, <fpage>2165</fpage>&#x2013;<lpage>2177</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-016-4620-x</pub-id> </citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Volcic</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Domini</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>The Visibility of Contact Points Influences Grasping Movements</article-title>. <source>Exp. Brain Res.</source> <volume>232</volume>, <fpage>2997</fpage>&#x2013;<lpage>3005</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-014-3978-x</pub-id> </citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vosinakis</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Koutsabasis</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Evaluation of Visual Feedback Techniques for Virtual Grasping with Bare Hands Using Leap Motion and Oculus Rift</article-title>. <source>Virtual Reality</source> <volume>22</volume>, <fpage>47</fpage>&#x2013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.1007/s10055-017-0313-4</pub-id> </citation>
</ref>
<ref id="B69">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Weiss</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Jeannerod</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Getting a Grasp on Coordination</article-title>. <source>Physiology</source> <volume>13</volume>, <fpage>70</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1152/physiologyonline.1998.13.2.70</pub-id> </citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Whitwell</surname>
<given-names>R. L.</given-names>
</name>
<name>
<surname>Ganel</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Byrne</surname>
<given-names>C. M.</given-names>
</name>
<name>
<surname>Goodale</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Real-time Vision, Tactile Cues, and Visual Form Agnosia: Removing Haptic Feedback from a &#x201C;natural&#x201D; Grasping Task Induces Pantomime-like Grasps</article-title>. <source>Front. Hum. Neurosci.</source> <volume>9</volume>, <fpage>216</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2015.00216</pub-id> </citation>
</ref>
<ref id="B71">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Feldman</surname>
<given-names>A. G.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Reach-to-grasp Movement as a Minimization Process</article-title>. <source>Exp. Brain Res.</source> <volume>201</volume>, <fpage>75</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-009-2012-1</pub-id> </citation>
</ref>
<ref id="B72">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zahariev</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Mackenzie</surname>
<given-names>C. L.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Auditory Contact Cues Improve Performance when Grasping Augmented and Virtual Objects with a Tool</article-title>. <source>Exp. Brain Res.</source> <volume>186</volume>, <fpage>619</fpage>&#x2013;<lpage>627</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-008-1269-0</pub-id> </citation>
</ref>
<ref id="B73">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zahariev</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>MacKenzie</surname>
<given-names>C. L.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Auditory, Graphical and Haptic Contact Cues for a Reach, Grasp, and Place Task in an Augmented Environment</article-title>. In <conf-name>Proceedings of the 5th international conference on Multimodal interfaces. November 5-7, 2003, Vancouver, BC, Canada</conf-name>. <fpage>273</fpage>&#x2013;<lpage>276</lpage>
<pub-id pub-id-type="doi">10.1145/958432.958481</pub-id> </citation>
</ref>
<ref id="B74">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zahariev</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>MacKenzie</surname>
<given-names>C. L.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Grasping at &#x27;thin Air&#x27;: Multimodal Contact Cues for Reaching and Grasping</article-title>. <source>Exp. Brain Res.</source> <volume>180</volume>, <fpage>69</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-006-0845-4</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>