<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1436752</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2024.1436752</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>The impact of first-person avatar customization on embodiment in immersive virtual reality</article-title>
<alt-title alt-title-type="left-running-head">Gonzalez-Franco et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2024.1436752">10.3389/frvir.2024.1436752</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>Mar</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/32801/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes" equal-contrib="yes">
<name>
<surname>Steed</surname>
<given-names>Anthony</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/134824/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Berger</surname>
<given-names>Christopher C.</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/368449/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Tajadura-Jim&#x00E9;nez</surname>
<given-names>Ana</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/105400/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Microsoft Research</institution>, <addr-line>Redmond</addr-line>, <addr-line>WA</addr-line>, <country>United States</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Computer Science</institution>, <institution>University College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>California Institute of Technology</institution>, <addr-line>Pasadena</addr-line>, <addr-line>CA</addr-line>, <country>United States</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Computer Science and Engineering, University Carlos III of Madrid</institution>, <addr-line>Madrid</addr-line>, <country>Spain</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>UCL Interaction Centre, University College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/112421/overview">Maria Pyasik</ext-link>, University of Udine, Italy</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/47649/overview">Lorenzo Pia</ext-link>, University of Turin, Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/369014/overview">Pierre Bourdin-Kreitz</ext-link>, Open University of Catalonia, Spain</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Anthony Steed, <email>a.steed@ucl.ac.uk</email>
</corresp>
<fn fn-type="equal" id="fn001">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors have contributed equally to this work</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>21</day>
<month>08</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>5</volume>
<elocation-id>1436752</elocation-id>
<history>
<date date-type="received">
<day>22</day>
<month>05</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>11</day>
<month>07</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Gonzalez-Franco, Steed, Berger and Tajadura-Jim&#x00E9;nez.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Gonzalez-Franco, Steed, Berger and Tajadura-Jim&#x00E9;nez</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>In virtual reality (VR), users can embody a wide variety of avatars, from digital replicas of themselves through diverse human body styles and appearances to non-humanoid representations. Although choosing a body to inhabit is part of what makes VR such an engaging experience, various studies have shown how embodiment may change the way we perceive ourselves and others both inside and outside VR. In our study, we explored whether first-person versus third-person avatar customization would lead to changes in embodiment. Furthermore, participants were embodied in larger-sized avatars based on the hypothesis that embodiment would lead to a change in implicit bias toward larger-sized people. Our results show that third-person avatar customization led to a decrease in the perceived embodiment of the larger-sized avatar and that, on the contrary, higher embodiment was associated with a reduction in implicit biases toward larger-sized people in the first-person avatar customization mode. These findings suggest that third-person avatar customization leads to reduced feelings of embodiment, while first-person avatar customization may support more radical body changes.</p>
</abstract>
<kwd-group>
<kwd>body ownership</kwd>
<kwd>avatar customization</kwd>
<kwd>bias reduction</kwd>
<kwd>immersive virtual reality</kwd>
<kwd>embodiment</kwd>
</kwd-group>
<contract-sponsor id="cn001">Horizon 2020 Framework Programme<named-content content-type="fundref-id">10.13039/100010661</named-content>
</contract-sponsor>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality and Human Behaviour</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>In reality, we experience the world from the inside out, from a first-person perspective. Moreover, in the real world, we do not get to choose much about the body we inhabit. Instead, we are acquiesced to make the most of what we have, and that generally happens through long processes requiring physical effort and, sometimes, even surgery. More frequently, our daily interventions occur in the first person in front of a mirror. However, in virtual reality (VR), we can choose the body we want to embody from a much larger range. We can experience the world from any body size (<xref ref-type="bibr" rid="B3">Bailey et al., 2009;</xref> <xref ref-type="bibr" rid="B57">Piryankova et al., 2014</xref>), shape (<xref ref-type="bibr" rid="B40">Kilteni et al., 2012b;</xref> <xref ref-type="bibr" rid="B78">Yee et al., 2009;</xref> <xref ref-type="bibr" rid="B79">Yee et al., 2011</xref>), or color we wish (<xref ref-type="bibr" rid="B4">Banakou et al., 2016;</xref> <xref ref-type="bibr" rid="B47">Maloney, 2018;</xref> <xref ref-type="bibr" rid="B54">Peck et al., 2013</xref>). In many ways, this is part of what makes VR such a unique experience (<xref ref-type="bibr" rid="B21">Gonzalez-Franco and Lanier, 2017</xref>).</p>
<p>Research has found that the embodiment of a virtual avatar can have profound effects on how we perceive ourselves and the virtual environments in which we are situated (<xref ref-type="bibr" rid="B33">Jayaraj et al., 2017;</xref> <xref ref-type="bibr" rid="B42">Lenggenhager et al., 2007;</xref> <xref ref-type="bibr" rid="B30">Hoort et al., 2011</xref>). In VR, we can inhabit the bodies of people of different skin colors and different sexes to experience the world not only by &#x201c;walking a mile in someone else&#x2019;s shoes&#x201d; but by doing so in a facsimile of their body (<xref ref-type="bibr" rid="B26">Gonzalez-Liencres et al., 2020;</xref> <xref ref-type="bibr" rid="B54">Peck et al., 2013;</xref> <xref ref-type="bibr" rid="B65">Seinfeld et al., 2018</xref>).</p>
<p>Research into the behavioral effects of embodying different avatars has found that individuals who embodied avatars that were more attractive were more likely to stand closer to other avatars and that individuals embodied in taller bodies were more likely to act aggressively toward other virtual avatars, suggesting that the body we experience as our own can lead to changes in how we behave toward others (<xref ref-type="bibr" rid="B78">Yee et al., 2009</xref>). This line of work not only demonstrated altered behavior on the basis of the body we inhabit but also implicitly held beliefs and biases, such as racial bias (<xref ref-type="bibr" rid="B48">Maloney et al., 2019</xref>; <xref ref-type="bibr" rid="B54">Peck et al., 2013</xref>).</p>
<p>In this paper, we focus on the mechanisms by which users might modify the virtual body that they will embody. Prior experiments have shown a lot of body plasticity toward changing body parts in participants (<xref ref-type="bibr" rid="B40">Kilteni et al., 2012b</xref>; <xref ref-type="bibr" rid="B52">Normand et al., 2011</xref>; <xref ref-type="bibr" rid="B5">Berger et al., 2022</xref>). We introduce a novel <italic>first-person</italic> mechanism where they can grab and pull the avatar they see from a first-person perspective to resize it (see <xref ref-type="sec" rid="s3-4">Section 3.4</xref> for implementation details). This is contrasted with a <italic>third-person</italic> mechanism where they use sliders to manipulate an avatar they see in front of them. The third-person mechanism mimics the situation encountered in many VR experiences when the user selects and manipulates avatar components or properties but does not alter the self-embodied avatars, giving a disembodied approach to avatar personalization. Given the prior work on embodiment, we hypothesize that altering one&#x2019;s avatar from a first-person point of view will generate higher levels of embodiment. Because embodiment has been shown to change biases, we set the participants the task of changing their self-avatar to a larger-sized avatar (in our case, an avatar that represents a human shape that would be assessed as having a body mass index (BMI) in the obese range<xref ref-type="fn" rid="fn2">
<sup>1</sup>
</xref>), with the hypothesis that by supporting embodiment in a larger-sized avatar, the implicit bias of participants toward larger-sized people will be reduced as measured by an Implicit Association Test (IAT).</p>
<p>Our experiments show that first-person avatar customization led to an increase in the perceived embodiment of the larger-sized avatar. Furthermore, we find that higher embodiment was associated with a reduction in the implicit biases toward larger people in the first-person avatar customization mode. These results suggest that first-person avatar customization might not only be a useful technique when embodiment is important but also might be relevant to reducing disembodied effects on the populations of social avatar situations.</p>
<p>Furthermore, our protocol does not require an explicit body ownership induction (c.f. <xref ref-type="bibr" rid="B52">Normand et al. (2011)</xref>) but elicits this body ownership through task-based activity, consistent with previous work that showed that body ownership could be achieved through having participants engage in games (<xref ref-type="bibr" rid="B80">Yuan and Steed, 2010</xref>).</p>
<p>
<xref ref-type="sec" rid="s2">Section 2</xref> presents more background on avatar customization, embodiment, and implicit bias. <xref ref-type="sec" rid="s3">Section 3</xref> describes the method of an experiment that explores how avatar customization style impacts the relationship between body ownership and bias. <xref ref-type="sec" rid="s4">Section 4</xref> presents the main results of the study. <xref ref-type="sec" rid="s5">Section 5</xref> discusses and interprets these results in the context of the related work. <xref ref-type="sec" rid="s6">Section 6</xref> presents some conclusions and suggestions for new research directions.</p>
</sec>
<sec id="s2">
<title>2 Background</title>
<sec id="s2-1">
<title>2.1 Avatar customization</title>
<p>Avatar customization is most commonly explored within the context of social virtual reality or collaborative virtual environments (<xref ref-type="bibr" rid="B11">Churchill and Snowdon, 1998</xref>; <xref ref-type="bibr" rid="B64">Schroeder, 2010</xref>). The past few years have seen a surge of new social VR applications. Platforms vary greatly in the types of avatars they support (<xref ref-type="bibr" rid="B56">Phadnis et al., 2023</xref>). For example, <xref ref-type="bibr" rid="B61">Rec Room (2021)</xref> allowed users to select various parts of their self-representation from a relatively limited palette that fits a consistent style for the whole application; <xref ref-type="bibr" rid="B68">Spatial Systems Inc. (2021)</xref> allowed users to customize their avatars using a model fitted to an image of their head; <xref ref-type="bibr" rid="B74">VRChat Inc. (2021)</xref> allowed a broad range of anthropomorphic and non-anthropomorphic avatars. Recent surveys have started to examine how these features vary across platforms (<xref ref-type="bibr" rid="B36">Jonas et al., 2019</xref>; <xref ref-type="bibr" rid="B70">Tanenbaum et al., 2020</xref>; <xref ref-type="bibr" rid="B17">Freeman and Maloney, 2021</xref>; <xref ref-type="bibr" rid="B43">Liu and Steed, 2021</xref>). Many avatar customization systems (e.g., Rec Room) include a mirror so that the participant can see the immediate effect of changing their representation. Indeed, mirrors have long been shown to be a key to enhancing the body ownership of participants (<xref ref-type="bibr" rid="B24">Gonz&#xe1;lez-Franco et al., 2010</xref>). However, fine-scale modulation of body size is rarely included in customization tools. Pujades et al. described the virtual caliper technique to scale a body and limbs to fit the tracking of controllers (<xref ref-type="bibr" rid="B59">Pujades et al., 2019</xref>). Thaler et al. demonstrated that the perspective with which a user experiences their self-avatar has a significant impact on the accuracy of body size estimates (<xref ref-type="bibr" rid="B73">Thaler et al., 2019</xref>). BodyLab is an immersive system used for sculpting a wide variety of avatars (<xref ref-type="bibr" rid="B81">Zeidler and McGinity, 2023</xref>). Some work has been done to explore the generation of realistic virtual humans and shapes (<xref ref-type="bibr" rid="B2">Anguelov et al., 2005</xref>; <xref ref-type="bibr" rid="B1">Achenbach et al., 2017</xref>), using AI and/or surface deformation methods (<xref ref-type="bibr" rid="B8">Botsch and Sorkine, 2007</xref>).</p>
</sec>
<sec id="s2-2">
<title>2.2 Embodiment and bias</title>
<p>While a large body of work on avatars is concerned with presentation to others, the self-avatar (henceforth just &#x201c;avatar&#x201d;) representation has important impacts on the user. Inside VR, users have their bodies substituted by the avatar body seen from a first-person perspective, which moves as they move. That virtual body is at a visual, motor, and proprioceptive level, substituting their own body, and thus, participants experience an embodiment illusion (<xref ref-type="bibr" rid="B21">Gonzalez-Franco and Lanier, 2017;</xref> <xref ref-type="bibr" rid="B39">Kilteni et al., 2012a;</xref> <xref ref-type="bibr" rid="B53">Padrao et al., 2016</xref>).</p>
<p>The effects associated with such an embodiment of avatars (<xref ref-type="bibr" rid="B14">Dunn and Guadagno, 2012</xref>) include changes in implicit attitudes toward others. For example, light-skinned participants who experience embodiment over a dark-skinned avatar show a significant reduction in their racial bias (<xref ref-type="bibr" rid="B54">Peck et al., 2013</xref>), which can be sustained over time (<xref ref-type="bibr" rid="B4">Banakou et al., 2016</xref>). Moreover, aging a virtual body might reduce prejudice toward older people (<xref ref-type="bibr" rid="B77">Yee and Bailenson, 2007</xref>). Furthermore, it has been studied that when entering a VR, participants undergo a strong presence of illusion (<xref ref-type="bibr" rid="B62">Sanchez-Vives and Slater, 2005</xref>); they experience being in a new location where the events occurring are plausible, leading them to realistic responses (<xref ref-type="bibr" rid="B25">Gonzalez-Franco et al., 2018</xref>; <xref ref-type="bibr" rid="B66">Slater, 2009</xref>). Mottelson et al. recently contributed a meta-analysis of research on the effectiveness of body illusions in virtual reality (<xref ref-type="bibr" rid="B51">Mottelson et al., 2023</xref>).</p>
<p>Indeed, embodiment opens new doors for psychology experiments, allowing for a new level of perspective-taking (<xref ref-type="bibr" rid="B48">Maloney et al., 2019</xref>). Embodiment can also help increase compassion (<xref ref-type="bibr" rid="B15">Falconer et al., 2014</xref>) or reduce social bias (<xref ref-type="bibr" rid="B46">Maister et al., 2013;</xref> <xref ref-type="bibr" rid="B54">Peck et al., 2013</xref>). Embodiment may be very important to the current use of avatars in applications such as weight management consultation (<xref ref-type="bibr" rid="B31">Horne et al., 2020</xref>) and other body-related disorders (<xref ref-type="bibr" rid="B57">Piryankova et al., 2014</xref>). More recently, studies have also focused on altering photo-realistic self-avatars and their effects on body weight perception (<xref ref-type="bibr" rid="B76">Wolf et al., 2020;</xref> <xref ref-type="bibr" rid="B72">Thaler et al., 2018</xref>). These studies have found that participants with a lower BMI tend to underestimate the weight of their photo-realistic avatars, while participants with a higher BMI overestimate the body weight of the avatar.</p>
<p>While many of the works described emphasize changes in appearance to self, the specific avatar they are given is usually not customizable from a first-person perspective by the participants. Although the experiment might provide a small number of options (e.g., selecting gender, body size, or skin color) or even a large set of options for BMI morphing (<xref ref-type="bibr" rid="B32">Hudson et al., 2020</xref>), the avatar size is generally a metric or condition of the experiment. One exception is provided by <xref ref-type="bibr" rid="B13">D&#xf6;llinger et al. (2022)</xref>, who enabled the user to embody their self-avatar using the controller in different ways, including gestures. For the purpose of our study, we allow the user to manipulate a morphable body using a technique that allows the user to directly manipulate their own avatar. We constrain the manipulation to a pair of dimensions: upper body size and lower body size.</p>
</sec>
<sec id="s2-3">
<title>2.3 Implicit bias</title>
<p>Implicit bias in attitudes toward others (henceforth simply &#x201c;implicit bias&#x201d;) exists in multiple forms: gender, race, weight, sexual orientation, and age (<xref ref-type="bibr" rid="B28">Greenwald and Krieger, 2006</xref>). It is very difficult to introduce changes to such implicit bias precisely because of its deep roots in our society and the power with which the media and our cultures reinforce pre-existing stereotypes and prejudices (<xref ref-type="bibr" rid="B35">Jolls and Sunstein, 2006;</xref> <xref ref-type="bibr" rid="B37">Kang et al., 2011</xref>). However, its effects are so profound in collective society&#x2014;and also at individual levels&#x2014;&#x2013;that this problem has gained increasing relevance for scientists and the general public (<xref ref-type="bibr" rid="B71">Teachman and Brownell, 2001</xref>): we need to reduce existing social biases (implicit and explicit) to create more just and equal societies. In recent decades, awareness of the noxious effects of implicit bias has increased, and important anti-discrimination laws have been implemented in many parts of the globe. In some cases, successful policies in education, healthcare, and employment have helped change the course of implicit bias and established ways to reduce it.</p>
<p>It has been shown that most biases take root in broad cultural environmental factors (<xref ref-type="bibr" rid="B27">Greenwald and Banaji, 1995;</xref> <xref ref-type="bibr" rid="B28">Greenwald and Krieger, 2006</xref>) and can be drawn to original in-group favoritism and, in many cases, are reinforced by the economic benefits that are distilled from the original discrimination (<xref ref-type="bibr" rid="B9">Cain, 1986</xref>). The complexity of the problem increases even further for people encountering multiple forms of bias, such as the types of violence experienced by women of color (<xref ref-type="bibr" rid="B12">Davis, 2000</xref>). Therefore, the nature of the discrimination is different depending on the particular bias.</p>
<p>In that regard, some biases are more overt and explicit than others, and sometimes, they are more socially accepted. That is the case of age-related bias or weight-based bias (<xref ref-type="bibr" rid="B71">Teachman and Brownell, 2001</xref>). Many people assume that, as opposed to other nature-given aspects, such as gender or race, weight is a choice. Hence, overweight people not only suffer from the effects of implicit bias&#x2014;such as discrimination&#x2014;but also from blame and bullying (<xref ref-type="bibr" rid="B71">Teachman and Brownell, 2001</xref>). In many cases, this stigma increases the probability of mental problems such as depression (<xref ref-type="bibr" rid="B10">Carels et al., 2010;</xref> <xref ref-type="bibr" rid="B58">Puhl et al., 2007</xref>) in these people and reduces their ability to overcome the situation.</p>
</sec>
</sec>
<sec sec-type="methods" id="s3">
<title>3 Methods</title>
<sec id="s3-1">
<title>3.1 Participants</title>
<p>Twenty male participants participated in the experiment (mean age &#x3d; 38.59 years and SD &#x3d; 11.56). All participants were healthy, none of them had a BMI greater than 30, and they reported no history of psychiatric illness or neurological disorder and had normal vision (or had corrected-to-normal vision). The participants were recruited internally. They provided their written informed consent and received monetary compensation in exchange for their participation. The experimental protocol was approved by the Microsoft Research Review Board and followed the ethical guidelines of the Declaration of Helsinki. Informed consent from participants was also obtained to publish images or videos captured during their participation in subsequent research publications (including online open-access publications).</p>
</sec>
<sec id="s3-2">
<title>3.2 Experimental design</title>
<p>Half (n &#x3d; 10, mean age &#x3d; 38.75 years, and SD &#x3d; 11.75) of the 20 participants were randomly assigned to the third-person avatar customization condition, and the other half (n &#x3d; 10, mean age &#x3d; 40.6 years, and SD &#x3d; 12.05) customized their avatars inside the VR in front of a mirror in the first-person avatar customization condition. The experiment followed a between-subject experimental design. Under each condition, the participants had two VR experiences: a self-avatar virtual experience and a larger avatar virtual experience. At the beginning of each experience, the participants created their avatar from either a first-person or third-person perspective in the first- or third-person conditions, respectively.</p>
</sec>
<sec id="s3-3">
<title>3.3 Apparatus</title>
<p>The participants used an HTC VIVE Pro VR System driven by a desktop PC. They wore the head-mounted display and carried the two hand controllers. In addition, the participant wore a waist belt with two additional VIVE Puck trackers attached. These were positioned symmetrically at the front, above the participant&#x2019;s pockets. These trackers provided a position that was used to control the waist orientation and also a reference line that was used in the waist measuring task (see Section 3.6.3 for details). The scene was modeled and run in Unity3D software. A generic male avatar was created using DAZ 3D software, and it was rigged with a standard skeleton. The avatar model selected was a male avatar without hair, wearing shorts and a t-shirt (see <xref ref-type="fig" rid="F1">Figure 1</xref>). This was appropriate for the scenario set in a gymnasium.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Skinny and large avatar blend shapes. Participants customized the avatar between two blend shapes. <bold>(A)</bold> Blend shape of the thinnest avatar. <bold>(B)</bold> Blend shape of the largest avatar.</p>
</caption>
<graphic xlink:href="frvir-05-1436752-g001.tif"/>
</fig>
<p>The avatar was animated using the Final IK animation system, which uses the tracked positions from the VIVE system. The feet were not tracked, so a simple built-in stepping animation from Final IK was used to have the feet follow underneath the head. However, participants were not asked to walk around the scene. The avatar had two blend shapes incorporated: one representing a very large person (<xref ref-type="fig" rid="F1">Figure 1B</xref>) and the other a very skinny person (<xref ref-type="fig" rid="F1">Figure 1A</xref>). The Unity run-time system implements blend shapes using linear-blend skinning. This means that the final mesh rendered was a linear blend of the blend shapes, where each shape is also modified by the weighting of individual vertices to the bones of the skeletal rig. In subsequent frames of animation after selection, we needed to find the blend weighting value that moves the selected vertex as close as possible to the current position of the hand-held controller. Because the blending is linear, the closest vertex will lie on a line between the vertex positions when the target blend shape value is set to 0 and when the target blend shape value is set to 1. We found the closest point on this line, and the corresponding linear interpolation value (clamped to [0,1]) was then set as the target weight for the corresponding blend shape. Because of the way the Unity animation system works, it was necessary to extract two full avatar meshes in each frame (even though, theoretically, only one was needed to find two vertex positions); however, no run-time issues were incurred by using this technique, and the application ran at the native frame rate of the display (90&#xa0;Hz).</p>
</sec>
<sec id="s3-4">
<title>3.4 Avatar configuration</title>
<p>The avatar was configured with two blend shapes. The first blend shape modified the size of the upper body between thin and larger (see <xref ref-type="fig" rid="F1">Figure 1</xref>). The second blend shape modified the lower body in a similar manner. The blend shapes thus form a space of two independent parameters, both in the range [0,1]. Participants controlled the same two underlying parameters in both first-person and third-person conditions but by different mechanisms:<list list-type="simple">
<list-item>
<p>
<inline-formula id="inf1">
<mml:math id="m1">
<mml:mo>&#x2022;</mml:mo>
</mml:math>
</inline-formula> In the <italic>first-person condition</italic>, when modifying an avatar, the participant faced a mirror and could pull and push their own avatar. Participants could grab their self-avatar by either hand by pulling on the trigger of the hand-held controller (see <xref ref-type="fig" rid="F2">Figure 2</xref>). To implement this, on the trigger pull, the nearest vertex point of the current avatar mesh was found. If the participant grabbed below their navel, we subsequently modified the weighting of the lower blend shape; otherwise, we modified the upper blend shape.</p>
</list-item>
<list-item>
<p>
<inline-formula id="inf2">
<mml:math id="m2">
<mml:mo>&#x2022;</mml:mo>
</mml:math>
</inline-formula> In the <italic>third-person condition</italic>, when modifying an avatar, the participant interacted with two 3D slider bars just in front of them (see <xref ref-type="fig" rid="F2">Figure 2</xref>). They modified a mannequin facing them. This mannequin used the same avatar blend shapes as the participant&#x2019;s self-avatar. Participants had to grab the sliders with either controller. The percentage values of the sliders were directly mapped to the two parameters of the blend shapes of the mannequin.</p>
</list-item>
</list>
</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Experimental protocol. Each participant underwent two VR experiences to create first a self-avatar and then a larger avatar. Participants were assigned either a first-person or a third-person condition. All participants completed the condition pipeline for each VR experience comprising pre-post measurements of the IAT and waist estimation and a post-embodiment questionnaire.</p>
</caption>
<graphic xlink:href="frvir-05-1436752-g002.tif"/>
</fig>
</sec>
<sec id="s3-5">
<title>3.5 Protocol</title>
<p>
<xref ref-type="fig" rid="F2">Figure 2</xref> shows the experiment protocol. The participants arrived, read a participant information sheet, confirmed and provided their written consent, and completed a short demographic questionnaire. They then completed the Implicit Association Test for the first time (see Section 3.6 and <xref ref-type="fig" rid="F5">Figure 5B</xref>).</p>
<p>Next, the participants donned the waist belt and the head-mounted display (HMD) and then they were passed the handheld controllers. They underwent two VR experiences (<xref ref-type="fig" rid="F2">Figure 2</xref>). In the first experience, they were asked to create an avatar to match themselves (self-avatar experience), and in the second experience, they were asked to create a very large avatar (large-avatar experience).</p>
<p>Each VR experience comprised four stages (<xref ref-type="fig" rid="F2">Figure 2</xref>), and transitions between stages were masked by the visuals fading to and from black over 1&#xa0;s:<list list-type="simple">
<list-item>
<p>1. Waist perception scene 1: In this stage, we asked participants to indicate where they perceived their real waist to be. We also configured the height of the self-avatar used for the rest of the experiment.</p>
</list-item>
<list-item>
<p>2. Avatar configuration scene: In the second stage, participants transitioned to a scene where they would configure their self-avatar in either a first-person or third-person manner. In the self-avatar experience, they were asked to make the avatar the same shape as themselves (<xref ref-type="fig" rid="F2">Figure 2</xref>). In the larger-avatar experience, they were asked to match the self-avatar to a larger model (<xref ref-type="fig" rid="F2">Figure 2</xref>).</p>
</list-item>
<list-item>
<p>3. Embodiment scene: Right after the avatar configuration, the participants embodied their recently created avatar from stage 2 and played a simple Whack-A-Mole-type game where they had to repeatedly reach cylinders that appeared in front of them. The aim of the game was to generate high embodiment over the avatars as triggered by synchronous sensorimotor stimulation (<xref ref-type="bibr" rid="B24">Gonz&#xe1;lez-Franco et al., 2010</xref>; <xref ref-type="bibr" rid="B41">Kokkinara and Slater, 2014</xref>). The game was played for 90&#xa0;s, after which the participants transitioned to the fourth stage.</p>
</list-item>
<list-item>
<p>4. Waist perception scene 2: In this stage, participants repeated the waist measuring task (<xref ref-type="fig" rid="F3">Figure 3A</xref>).</p>
</list-item>
</list>
</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Perception and IAT measures. <bold>(A)</bold> Participant completing the blind waist perception estimation. <bold>(B)</bold> Instructions and examples of silhouettes used in the IAT.</p>
</caption>
<graphic xlink:href="frvir-05-1436752-g003.tif"/>
</fig>
<p>The participants then removed the HMD and other devices and completed an embodiment questionnaire and another IAT (<xref ref-type="fig" rid="F3">Figure 3B</xref>).</p>
<p>Immediately after the self-avatar experience, the participants geared up again and completed the second VR experience, making their avatar larger. The difference between both experiences was that in the second stage of the procedure, they were asked to manipulate the avatar to match another model placed off to the left that had a larger body shape (perceived BMI <inline-formula id="inf3">
<mml:math id="m3">
<mml:mo>&#x3e;</mml:mo>
<mml:mn>30</mml:mn>
</mml:math>
</inline-formula>) (see <xref ref-type="fig" rid="F1">Figure 1B</xref>; <xref ref-type="fig" rid="F2">Figure 2</xref>). After completing this second VR experience, they completed a second embodiment questionnaire and a third IAT.</p>
</sec>
<sec id="s3-6">
<title>3.6 Measures</title>
<sec id="s3-6-1">
<title>3.6.1 Avatar acceptance</title>
<p>We use a standard embodiment questionnaire to evaluate avatar acceptance (<xref ref-type="bibr" rid="B23">Gonzalez-Franco and Peck, 2018</xref>). Embodiment questionnaires are a common way to assess the level to which participants have accepted their self-avatars as their own bodies. Low embodiment indicates rejection of the avatar body. High embodiment scores indicate high acceptance of the avatar body.</p>
<p>We used 15 items from 25 questions from the Peck and Gonzalez-Franco questionnaire (<xref ref-type="bibr" rid="B23">Gonzalez-Franco and Peck, 2018</xref>). Participants filled out questions probing different aspects of embodiment (i.e., body ownership, sense of agency, and sense of spatial co-location), following the self-avatar and larger-avatar virtual experiences in both the first- and third-person conditions. We did not include the sub-scales of touch and external stimuli. The questions included were as follows:</p>
<p>Q1: I felt as if the virtual body I saw when I looked down was my body.</p>
<p>Q2: It felt as if the virtual body I saw was someone else.</p>
<p>Q3: It seemed as if I might have more than one body.</p>
<p>Q4: I felt as if the virtual body I saw when looking at myself in the mirror was my own body.</p>
<p>Q5: I felt as if the virtual body I saw when looking at myself in the mirror was another person.</p>
<p>Q6: I felt like I could control the virtual body as if it were my own body.</p>
<p>Q7: The movements of the virtual body were caused by my movements.</p>
<p>Q8: I felt as if the movements of the virtual body were influencing my own movements.</p>
<p>Q9: I felt as if the virtual body was moving by itself.</p>
<p>Q10: I felt as if my body was located where I saw the virtual body.</p>
<p>Q11: I felt out of my body.</p>
<p>Q12: I felt as if my (real) body were drifting toward the virtual body or as if the virtual body were drifting toward my real body.</p>
<p>Q13: It felt as if my (real) body were turning into an &#x201c;avatar&#x201d; body.</p>
<p>Q14: I felt like I was wearing different clothes from when I came to the laboratory.</p>
<p>Q15: I felt as if the size of the world changed during the experience.</p>
<p>The participants rated their agreement with the above statements on a &#x2212;3&#x2013;3-point Likert scale, where &#x2212;3 was anchored to strong disagreement and 3 to strong agreement. Questions 1&#x2013;5 probed feelings of ownership (questions 2, 3, and 5 served as control questions) over the virtual avatar. Questions 6&#x2013;9 probed feelings of agency over the virtual avatar (questions 8&#x2013;9 served as control questions). Questions 10&#x2013;12 probed the participants&#x2019; sense of co-location with the virtual avatar (questions 11 and 12 were designed as control questions), and questions 13&#x2013;15 probed the extent to which participants felt they took on the physical characteristics of the virtual avatar and were designed as control questions. Control questions served to rule out response bias or demand characteristics.</p>
</sec>
<sec id="s3-6-2">
<title>3.6.2 Change in body size ratings</title>
<p>We asked two additional questions following each VR experience. In these questions, participants reported the extent to which they felt larger in the virtual environment:</p>
<p>Q1: At some point, it felt as if my real body was starting to take on the posture or shape of the virtual body that I saw.</p>
<p>Q2: I felt as if the size of my body changed during the experience.</p>
<p>The participants were asked to rate their agreement with the statements on a &#x2212;3&#x2013;&#x2b;3-point Likert scale, as they did on the embodiment questionnaire above. Responses to these questions were then averaged together to obtain a body-size rating estimate for each VR experience for each condition.</p>
</sec>
<sec id="s3-6-3">
<title>3.6.3 Real waist perception measurement</title>
<p>In addition to the body-size ratings, participants were also asked to report the perceived location of their real waist before and after the experience in VR (see <xref ref-type="fig" rid="F3">Figure 3A</xref>). As soon as participants put on their HMD, they were asked to set their waist size using their right-hand controller. They could neither see a self-avatar nor their body during this process. The visual representation of the right-hand controller had a small ball on a short rod, which they needed to place where they thought their waist would be. The distance between the ball and the line on the actual waist, as measured using the two VIVE Puck trackers on the waist, was recorded.</p>
</sec>
<sec id="s3-6-4">
<title>3.6.4 Implicit association test</title>
<p>The implicit negativity bias toward larger people was measured by requiring the participants to quickly categorize silhouette images of people (larger or thin) and words (positive or negative) into groups following a balanced paired test (see <xref ref-type="fig" rid="F3">Figure 3B</xref>). Each time a participant ran the IAT, they underwent six blocks:</p>
<p>Block 1) Image learning trial of association. A silhouette of a fat or thin body is associated with either the words &#x201c;fat&#x201d; or &#x201c;thin.&#x201d;</p>
<p>Block 2) Word learning trial of association. A positive or negative word &#x201c;good, joy, love, peace, wonderful, pleasure, glorious, laughter, happy, bad, agony, terrible, horrible, nasty, evil, awful, failure, and hurt&#x201d; is associated with positive or negative.</p>
<p>Block 3) First paired test. Positive words/images are to be associated with being thin, and negative words/images are to be associated with being fat.</p>
<p>Block 4) Reverse image and word learning trial.</p>
<p>Block 5) Second paired test. Positive words/images are to be associated with being fat, and negative words/images are to be associated with being thin.</p>
<p>The implicit bias can then be calculated from the differences in accuracy and speed between these categorizations (e.g., thin persons and positive words and overweight persons and negative words, compared to the opposite pairings). The scores were obtained using FreeIAT software<xref ref-type="fn" rid="fn3">
<sup>2</sup>
</xref>, and the GNB score was calculated using the method described by <xref ref-type="bibr" rid="B29">Greenwald et al. (2003)</xref>, which can be summarized as follows: computes the mean and SD of reaction time (RT) for items in blocks 3 and 5. The GNB score is the average corrected RT from block 5 minus the average corrected RT from block 3 and divided by the pooled SD.</p>
<p>Elimination of outliers: In &#x201c;TooSlow&#x201d; trials with RTs &#x3e; 10,000&#xa0;ms, a participant would be discarded from analysis if they had more than 10% of &#x201c;TooFast&#x201d; RTs &#x3c;300-ms trials.</p>
<p>More details on the computation are given by <xref ref-type="bibr" rid="B29">Greenwald et al. (2003)</xref> (Table 4).</p>
<p>Higher IAT scores indicate a greater association with overweight body images and positive words and thin body images and negative words, whereas negative scores indicate a greater association with overweight body images and negative words and thin body images and positive words (see <xref ref-type="fig" rid="F3">Figure 3B</xref>).</p>
</sec>
</sec>
<sec id="s3-7">
<title>3.7 Analyses</title>
<p>Two-factor ANOVAs (with avatar customization condition, i.e., first- vs. third-person avatar customization as a between-subject factor; and VR experience, i.e., self-avatar vs. larger avatar, as a within-subject factor) were used to examine the questionnaires, IAT, and waist estimation data. The normality of the residuals was assessed using a Shapiro&#x2013;Wilk test for normality and visual inspection of the qq-plots. Paired comparisons were made using a <italic>t</italic>-test or non-parametric Wilcoxon signed-rank tests when normality was not met. The normality of the paired differences for all planned comparisons was assessed using a Shapiro&#x2013;Wilk test for normality. Further assessment of our results was carried out using Bayes factor (BF) <italic>t</italic>-tests, i.e.,<disp-formula id="equ1">
<mml:math id="m4">
<mml:mi>B</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>10</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>/</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
</disp-formula>These tests were conducted comparing the relative evidence of the alternative hypothesis, i.e.,<disp-formula id="equ2">
<mml:math id="m5">
<mml:msub>
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">self&#x2212;avatar</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">larger&#x2212;avatar</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2260;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
</mml:math>
</disp-formula>over the null hypothesis, i.e.,<disp-formula id="equ3">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">self&#x2212;avatar</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">larger&#x2212;avatar</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:math>
</disp-formula>using BayesFactor of the R package (<xref ref-type="bibr" rid="B50">Morey et al., 2018</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<title>4 Results</title>
<sec id="s4-1">
<title>4.1 Embodiment scores</title>
<p>To examine whether there were differences in the overall experience of embodiment across conditions, we averaged the responses to embodiment questions and the reverse-coded control questions. Calculating an embodiment score in this way provides an unbiased estimate of the feeling of the embodiment of the virtual avatar that controls for potential response bias or demand characteristics (see <xref ref-type="sec" rid="s13">Appendix Figure 6</xref>). A two-factor ANOVA revealed that there was a significant effect of VR experience (F(1, 20) &#x3d; 6.20 and <italic>p</italic> &#x3d; .023) (see Appendix A).</p>
<p>Planned comparisons between VR experiences in the third-person avatar customization condition revealed a significant decrease in embodiment during the larger-avatar VR experience compared to the self-avatar VR experience (t(10) &#x3d; 2.69, <italic>p</italic> &#x3d; 0.023, d &#x3d; 0.95, and 95% CI &#x3d; [0.10, 1.07]). A Wilcoxon signed-rank test showed no significant difference in embodiment between self-avatar and larger-avatar experiences in the first-person avatar customization condition (V(10) &#x3d; 48, <italic>p</italic> &#x3d; 0.196, d &#x3d; 0.25, and 95% CI &#x3d; [-0.26, 0.44]) (see <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Embodiment scores and summarized questionnaire ratings. Box and whisker plots of the embodiment score and ratings to key factors underlying embodiment. Median values are displayed as horizontal bars within colored boxes, interquartile ranges are represented by the upper and lower bounds of the boxes, and <inline-formula id="inf4">
<mml:math id="m7">
<mml:mo>&#xb1;</mml:mo>
</mml:math>
</inline-formula> 1.5-times the upper and lower quartiles are shown as the upper and lower whiskers, with outliers beyond this shown as single points. Asterisks between bars indicate significant differences between self- and larger-body avatar experiences (&#x2a;ps &#x3c; 0.05).</p>
</caption>
<graphic xlink:href="frvir-05-1436752-g004.tif"/>
</fig>
<p>These results suggest that participants who configured their avatar in the first person were more likely to accept and less likely to disembody the larger avatar than those in the third person. To further corroborate this interpretation, we ran an additional <italic>post hoc</italic> BF <italic>t</italic>-test between self- and larger-avatar VR experiences in the third-person and first-person avatar conditions, which revealed anecdotal evidence (BF<sub>10</sub> &#x3d; 0.35) (<xref ref-type="bibr" rid="B34">Jeffreys, 1998</xref>) in favor of the null hypothesis over the alternative hypothesis in the first-person avatar customization condition and moderate evidence in favor of the alternative hypothesis in the third-person avatar customization condition (BF<sub>10</sub> &#x3d; 3.15).</p>
<p>These findings show that creating a larger avatar from a third-person perspective significantly reduced the subsequent experience of the overall embodiment in a larger avatar compared to when a self-avatar was created from a first-person perspective and that creating a larger avatar from a first-person perspective did not change the overall experience of embodiment compared to creating a self-avatar from a first-person perspective.</p>
<p>Embodiment is traditionally described as the combination of three factors: body ownership, agency, and self-location (<xref ref-type="bibr" rid="B39">Kilteni et al., 2012a</xref>). Thus, in order to explore which aspects of embodiment triggered the decrease in acceptance of the body for the third-person avatar design, we analyzed the questionnaire items associated with each of these subcategories:</p>
<p>
<bold>Ownership rating</bold>: Planned comparisons checked the mean ratings to ownership questions between self- and larger-avatar VR experiences in the first- and third-person avatar customization conditions. A paired <italic>t</italic>-test revealed a significant decrease in the experience of body ownership of the larger avatar compared to the self-avatar VR experience in the third-person avatar customization condition (t(10) &#x3d; 3.65, <italic>p</italic> &#x3d; 0.004, d &#x3d; 1.50, and 95% CI &#x3d; [0.83, 3.43]); however, no significant difference was observed between the ownership ratings between self- and larger-avatar VR experiences in the first-person avatar customization condition (t(10) &#x3d; 1.66, <italic>p</italic> &#x3d; 0.127, d &#x3d; 0.63, and 95% CI &#x3d; [&#x2013;0.27, 1.91]) (see <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<p>Additionally, Bayes factor <italic>t</italic>-tests comparing the relative evidence of the alternative over the null hypothesis of a change in ownership ratings between self- and larger-body avatar conditions revealed anecdotal evidence in favor of the null hypothesis (BF<sub>10</sub> &#x3d; .86) in the first-person avatar condition and strong evidence in favor of the alternative hypothesis in the third-person avatar customization condition (BF<sub>10</sub> &#x3d; 11.78). These findings suggest that creating a larger avatar in the third person led to a significant decrease in the experience of body ownership compared to when a self-avatar was created in the third-person perspective and that creating a larger avatar in the first-person perspective had no effect on the experience of ownerships or larger or self-avatars.</p>
<p>
<bold>Agency ratings</bold>: The mean ratings for questions probing the sense of agency in the self- and larger-avatar conditions were compared using planned comparisons for participants in the first- and third-person avatar customization conditions. This analysis revealed that there were no significant differences in the experience of the sense of agency in the first-person (V(10) &#x3d; 6, <italic>p</italic> &#x3d; 0.78, d &#x3d; 0.21, and 95% CI &#x3d; [&#x2013;0.95, 0.21]) or third-person (t(10) &#x3d; 1.61, <italic>p</italic> &#x3d; 0.137, d &#x3d; 0.38, and 95% CI &#x3d; [&#x2013;0.17, 1.08]) conditions.</p>
<p>
<bold>Self-location ratings</bold>: Responses to the self-location questions were compared between VR experiences in the first- and third-avatar customization conditions using Wilcoxon signed-rank tests. No significant differences between self- and larger-avatar VR experiences were found in planned comparisons on participants who perceived co-location with the virtual avatar for participants in the first-person (V(10) &#x3d; 16, <italic>p</italic> &#x3d; 0.79, d &#x3d; 0.12, and 95% CI &#x3d; [&#x2013;1.49, 2.00]) and third-person (V(10) &#x3d; 8, <italic>p</italic> &#x3d; 1, d &#x3d; 0.0, and 95% CI &#x3d; [&#x2013;0.53, 0.53]) avatar customization conditions.</p>
</sec>
<sec id="s4-2">
<title>4.2 Change in body size ratings</title>
<p>To examine whether there were significant changes in the experience of the size of the body between the self- and larger-avatar conditions, the mean body size ratings were compared using planned comparisons in the third- and first-person avatar customization conditions. A paired <italic>t</italic>-test revealed that there was a significant increase in the experience of body size in the larger-avatar condition compared to the self-avatar condition for participants in the first-person avatar customization condition (t(10) &#x3d; 2.22, <italic>p</italic> &#x3d; 0.05, d &#x3d; 0.93, and 95% CI &#x3d; [&#x2013;3.00, 0.00]). However, no significant difference in the perceived body size was found between self- and larger-avatar conditions for participants in the third-person avatar customization condition (t(10) &#x3d; 1.61, <italic>p</italic> &#x3d; 0.13, d &#x3d; 0.56, and 95% CI &#x3d; [&#x2013;1.73, 0.27]) (see <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<p>Additional Bayes factor <italic>t</italic>-tests revealed anecdotal evidence (BF<sub>10</sub> &#x3d; 1.70) in favor of the alternative hypothesis over the null hypothesis in the first-person avatar customization condition and anecdotal evidence (BF<sub>10</sub> &#x3d; 0.81) in favor of the null hypothesis in the third-person avatar customization condition. These findings suggest that while participants in the first-person avatar customization condition experienced a significant increase in their body size when embodying the larger avatar compared to their self-avatar, participants in the third-person avatar customization condition did not experience an increase in their body size when embodying the larger avatar.</p>
</sec>
<sec id="s4-3">
<title>4.3 Waist measurement perception</title>
<p>We examined whether the experimental manipulation significantly altered the perception of the participants&#x2019; real waist by first calculating the post- minus pre-VR experience (i.e., self-avatar and larger avatar) waist location estimations for both the first- and third-person avatar customization conditions. A two-way analysis of variance with the avatar customization condition as a between-subject factor and VR experience as a within-subject factor revealed that there was no significant main effect of avatar customization condition (F(1, 40) &#x3d; 1.54 and <italic>p</italic> &#x3d; 0.22), no main effect of VR experience (F(1, 40) &#x3d; 1.12 and <italic>p</italic> &#x3d; 0.296), and no significant interaction (F(1, 40) &#x3d; 0.163 and <italic>p</italic> &#x3d; 0.688). These results are interesting because they suggest that although avatar embodiment affected the perception of body size within the virtual environment in the first-person avatar customization condition, it did not significantly alter the participants&#x2019; perception of their real waist location when they were asked to explicitly report it during the waist-size estimation task.</p>
<p>Although there was no significant difference between the waist estimations in the self- and larger-avatar experiences for the first- or third-person avatar customization conditions, we were interested in examining whether individual differences in participants&#x2019; self-reported waist size measurements were positively correlated with the experience of body size. This analysis (Pearson&#x2019;s r) revealed that there was a positive correlation between the sense of ownership and the perceived increase in body size in the larger-body experience in the first-person avatar customization condition (r(9) &#x3d; 0.49, <italic>p</italic> &#x3d; 0.06, one-tailed). That is, the larger the participants indicated their real waist size to be following the larger-body VR experience, the more they reported experiencing an increase in their body size during the experiment. In contrast, no significant correlation was found between the waist size estimates and body size ratings following the larger-body VR experience in the third-person avatar customization condition (r(9) &#x3d; &#x2212;0.64, <italic>p</italic> &#x3d; 0.98, one-tailed). This result suggests that although there was no significant difference in waist size estimates between the self- and larger-body avatar VR experiences, there was a significant positive relationship between individual differences in the waist estimations and the experience of feeling larger in the first-person condition. This relationship was, however, absent in the third-body condition.</p>
</sec>
<sec id="s4-4">
<title>4.4 Implicit association test</title>
<p>A two-factor ANOVA was conducted on the IAT data with conditions (first-person and third-person avatar customization) as a between-subject factor and order (pre-VR experience, post-self-avatar experience, and post-larger-avatar experience) as a within-subject factor. The ANOVA revealed that there was a significant effect of order (F(1, 20) &#x3d; 5.35 and <italic>p</italic> &#x3d; 0.007). However, no significant effect of the condition was observed (F(1, 20) &#x3d; 1.27 and <italic>p</italic> &#x3d; 0.264). Additionally, no significant interaction between the condition and order was observed (F(1, 20) &#x3d; 0.128 and <italic>p</italic> &#x3d; 0.879).</p>
<p>Planned comparisons revealed there was no significant difference between IAT data following the self-avatar VR experience compared to the pre-VR experience IAT data (t(10) &#x3d; 1.67 and <italic>p</italic> &#x3d; 0.125) or between the larger-avatar VR experience IAT data compared to the pre-VR experience IAT data (t(10) &#x3d; 1.52 and <italic>p</italic> &#x3d; 0.158). Therefore, the order effect could be due to a learning effect, which has also been reported previously as a weakness of IATs in general and the reason why sometimes only the last trials of each IAT block are used for computing the biases (<xref ref-type="bibr" rid="B29">Greenwald et al., 2003</xref>).</p>
<p>Although no significant differences were observed in the IAT data obtained following the self- and larger-avatar experiences in either the first- or third-person avatar customization conditions, we also wanted to examine whether the individual differences in the IAT were significantly related to the perception of increased body size in the first-person avatar customization condition following the larger-avatar body experience. Previously, individual differences have been found to change the outputs of VR experiences and results when it comes to embodiment and size perception (<xref ref-type="bibr" rid="B18">Gonzalez-Franco et al., 2019</xref>).</p>
<p>A linear regression analysis revealed a significant positive relationship between body-size ratings and the IAT scores, whereby the perceived body-size ratings significantly predicted the IAT scores (<inline-formula id="inf5">
<mml:math id="m8">
<mml:mi>&#x3b2;</mml:mi>
</mml:math>
</inline-formula> &#x3d; 2.41, t(9) &#x3d; 3.75, and <italic>p</italic> &#x3d; 0.005) and that body size ratings also explained a significant proportion of variance in IAT scores (R<sup>2</sup> &#x3d; 0.60, F(1, 9) &#x3d; 14.04, and <italic>p</italic> &#x3d; 0.005) (see <xref ref-type="fig" rid="F5">Figure 5</xref>). No significant relationship between the perceived body size ratings and the IAT scores was observed following the larger-avatar VR experience for participants in the third-person avatar customization condition (<inline-formula id="inf6">
<mml:math id="m9">
<mml:mi>&#x3b2;</mml:mi>
</mml:math>
</inline-formula> &#x3d; &#x2212;0.88, t(9) &#x3d; &#x2212;0.91, and <italic>p</italic> &#x3d; 0.384), and there was no significant proportion of variance in IAT scores explained by the perceived body size ratings (R<sup>2</sup> &#x3d; 0.09, F(1, 9) &#x3d; 0.84, and <italic>p</italic> &#x3d; 0.384). These findings suggest that the greater the experience of having a larger body in the larger-avatar VR experience for participants, the more strongly larger people were associated with positive words in the IAT task. However, this reduced bias only existed in the first-person avatar customization condition, and that association was absent in the third-person avatar customization condition.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Body perception correlates with the IAT. Positive relationship between perceived body size and the IAT in the first-person avatar customization condition. There was a significant positive relationship between the perception of an increased body size and the IAT score for participants following the larger-avatar virtual experience in the first-person avatar customization condition. Each point represents each participant in the first-person avatar customization condition. The solid line depicts the regression slope and intercept and included the 95% confidence interval bands (shaded area). The dotted line represents the division between positive implicit associations (all values above the dotted line) toward larger people and negative implicit associations (all values below the dotted line).</p>
</caption>
<graphic xlink:href="frvir-05-1436752-g005.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s5">
<title>5 Discussion</title>
<p>Our research shows how the avatar customization processes, whether in the first person or third person, affect the embodied experience in VR. This has potential implications for bias reduction in empathetic applications. Previous experiments have established that bias reductions based on the appearance of self-avatars depend on the embodiment level that participants experience toward the avatar (<xref ref-type="bibr" rid="B46">Maister et al., 2013;</xref> <xref ref-type="bibr" rid="B54">Peck et al., 2013</xref>); however, these experiments did not ask participants to modify the appearance of their own avatars. The avatars were simply given to them. On the other hand, several experiments have dealt with body shape alteration and larger avatars, but they generally did not focus on implicit bias but on other aspects such as body schema, body anxiety, and other body disorders (<xref ref-type="bibr" rid="B32">Hudson et al., 2020</xref>). Although our morphing approach based on blend shapes is not complex and was not designed to discriminate sportive people with high BMIs from people who would be classified by their BMI as being overweight or obese, we believe that our findings would also transfer to that context.</p>
<p>There are significant behavioral implications for users&#x2019; avatar appearance and self-customization (<xref ref-type="bibr" rid="B3">Bailey et al., 2009;</xref> <xref ref-type="bibr" rid="B19">Gonzalez-Franco et al., 2016</xref>). For example, in a study using video games, researchers found that children were more aroused toward junk-food advertisements when they had designed their own avatars (<xref ref-type="bibr" rid="B3">Bailey et al., 2009</xref>). This work, despite not being in VR, found that users&#x2019; sense of presence was the key factor in their arousal response (<xref ref-type="bibr" rid="B3">Bailey et al., 2009</xref>). In our study, we explored the importance of self-customization of avatars and its implications for bias reduction within VR. We found that if the avatars were customized from a third-person perspective, larger-avatar embodiment significantly decreased compared to self-body avatar embodiment. In other words, participants had a reduced acceptance of the larger body. On the contrary, we found that if the customization of the larger-avatar had occurred from the first-person perspective, the reduction in embodiment would not exist. In other words, there was no difference in embodiment between the larger- and self-avatar VR experiences when the avatars were created inside out, suggesting that participants felt equally embodied in both experiences, but that was only true when they had customized the avatar in first person.</p>
<p>An examination of the perceived body-size ratings revealed that only participants in the first-person avatar customization condition experienced a significant increase in their perceived body size when embodying the larger virtual avatar and that participants in the third-person avatar customization condition did not. These findings suggest that the reduced sense of ownership over the larger virtual avatar may have prevented participants from having the experience of being larger in the third-person avatar customization condition. We further found that the increase in perceived body size was positively correlated with the IAT scores following the larger-avatar body condition for avatars customized in the first person, whereas no significant difference in body-size ratings or correlation with the IAT was observed for participants in the third-person avatar customization condition.</p>
<p>The lack of a significant difference in the IAT scores directly following the self-avatar and larger-avatar VR experiences might be due to the fact that altering the shape of the body to achieve significant bias reductions may require more time of embodiment than the 90&#xa0;s that lasted our exposure. Considering that all participants come from similar demographics (men of normal BMI), the null IAT results also show that both groups were similar in their original bias toward body size. Nevertheless, even with the short exposure time, we observed that individual variability in the participant experience of having an increased body size during the larger-avatar VR experience was significantly predicted by participant IAT scores in the first-person avatar customization condition. In other words, the greater the feelings of body-size ratings were, the stronger the association was between overweight images and positive words.</p>
<p>We can relate our findings to the ongoing discussion about embodiment and agency in immersive systems (<xref ref-type="bibr" rid="B39">Kilteni et al., 2012a</xref>). A sense of agency has been distinguished from embodiment as encompassing &#x201c;global motor control, including the subjective experience of action, control, intention, motor selection, and the conscious experience of will&#x201d; (<xref ref-type="bibr" rid="B7">Blanke and Metzinger, 2009</xref>). The agency thus seems to be a natural consequence of being active in the environment and using one&#x2019;s body. Indeed, early demonstrations of a body ownership illusion in virtual reality used self-motion rather than tactile inductions (<xref ref-type="bibr" rid="B63">Sanchez-Vives et al., 2010;</xref> <xref ref-type="bibr" rid="B80">Yuan and Steed, 2010</xref>), and some have even found that bodily illusions might be more powerful with motion than with tactile stimulation (<xref ref-type="bibr" rid="B41">Kokkinara and Slater, 2014;</xref> <xref ref-type="bibr" rid="B67">Spanlang et al., 2014</xref>). The agency can also create a stronger self-avatar follower effect (<xref ref-type="bibr" rid="B20">Gonzalez-Franco et al., 2020a</xref>) and can be used to redirect the actions of participants (<xref ref-type="bibr" rid="B49">Maselli et al., 2023</xref>). This can be contrasted with methods that use self-observation and reflection on one&#x2019;s body (<xref ref-type="bibr" rid="B69">Tajadura-Jim&#xe9;nez et al., 2017</xref>). Our work somewhat switches between the two modes of engagement with the self-avatar: in the first-person mode, participants actively engage with their bodies when changing the shape, but this is only a small component of the experience. During the cylinder-grabbing task, they are not focused on their own bodies. Indeed, one of the participants commented that they were very aware of their body during the avatar phase, but once they started the game, they were just &#x201c;there&#x201d; participating in the game. We can draw two hypotheses from this: either being embodied in a task that does not involve reflection on the avatar still has an unconscious bias effect, or the initial embodiment phase has an impact over a duration of at least a few minutes. Determining which factors could have a significant impact on embodiment systems is important. In the former case, the location and behavior of the self-avatar might have an impact on embodiment, regardless of the amount of time the user spends customizing it. This suggests that, as a field, we might need to reflect on the default avatars that users choose or even whether users should be required to do some customization. In the latter, then, the key question is the length of time that the induction has and whether it can be reinforced by occasional reflection (e.g., seeing oneself in a mirror). This sort of implicit embodiment with altering effects on later behavior was also patent in the Pinocchio VR Illusion, where researchers found that although the focus was on the nose, changes in arm size were internalized by users who later exhibited extended reach perception, despite most being unaware of the arm manipulation. This shows that even if little attention is paid to the avatar&#x2019;s body, we still internalize the avatar we embody (<xref ref-type="bibr" rid="B5">Berger et al., 2022</xref>).</p>
<p>We can make a connection between our work and the work on the attitudes of users toward avatars over longer exposure. Although this has only recently been studied in an immersive context (<xref ref-type="bibr" rid="B17">Freeman and Maloney, 2021</xref>), there is a rich literature on how avatar customization motivates engagement and motivation in games (<xref ref-type="bibr" rid="B6">Birk et al., 2016</xref>) and how this is related to identity (<xref ref-type="bibr" rid="B75">Waggoner, 2009</xref>). We can contrast that work with studies by Fitton et al., who showed that minimal customization has a large impact on a training task (<xref ref-type="bibr" rid="B16">Fitton et al., 2023</xref>). Additionally, the results from neuro&#x2013;physiology studies of look-alike avatars have also shown that over time, self-avatars are perceived more like themselves in the visual cortex, even if they are not photo-realistic (<xref ref-type="bibr" rid="B19">Gonzalez-Franco et al., 2016</xref>), further showing that adaptation to the virtual body increases over time. As opposed to disembodied interactions in social VR, it is important that participants feel connected to their avatars and their actions to perhaps maintain their moral compass. Similar recommendations have been made by philosophers <xref ref-type="bibr" rid="B45">Madary and Metzinger (2016)</xref>, who discuss &#x201c;Illusions of Embodiment and Their Lasting Effect.&#x201d;</p>
<p>Our work provides users a different way to engage with their self-representation, allowing for a wide range of achievable avatars. This raises an interesting question of how much precision and variety are required to support the different impacts (embodiment, task engagement, performance, etc.) that a system designer might want. Modifying avatars takes the user time, and while it is a rewarding activity in itself for some users, we speculate that some users will be content with very crude controls as long as they can reach something approaching their desired avatars, be it a representation of themselves at the current time or a fictitious representation. In some situations, when self-identity should be preserved, users may opt for real scans and reproductions of their own bodies, while in other cases, parameterized avatars might be a solution (<xref ref-type="bibr" rid="B22">Gonzalez-Franco et al., 2020b</xref>). However, independent of the approach that generates the first iteration of the user avatars, the results from our experiments show that if we want users to feel like this is their body, they should rather customize and adjust these avatars from a first-person perspective.</p>
<p>A limitation to our study is that the participants were all male, and we used an avatar that had a male appearance. We note that there are significant differences in response to body image between men and women (e.g., <xref ref-type="bibr" rid="B44">MacNeill et al., 2017</xref>; <xref ref-type="bibr" rid="B60">Quittkat et al., 2019</xref>). We acknowledge that the study would need to be re-run with female participants in order to generalize. Indeed, virtual reality interaction might itself have a gendered effect (<xref ref-type="bibr" rid="B55">Peck et al., 2020</xref>).</p>
<p>Finally, we reflect on the first-person embodiment mechanism itself. Although participants had no trouble changing their size as instructed, because the scaling was based on contact, they would often scale the avatar to the ends of the scales quite quickly. We often saw that users had to grab their upper or lower body and then gauge the small movements required. We suggest that other mechanisms might be superior, such as pushing and pulling on relative rather than absolute scales and having an impact on a small region spread over time or using an indirect mechanism that shrinks or inflates the parts. Our mechanism should work for larger changes. We would also be very interested in other mechanisms, such as putting on or removing clothes. In the end, our appearances are not only shaped by our bodies but also by how we dress. Furthermore, our performance in virtual worlds is highly affected by appearance (<xref ref-type="bibr" rid="B38">Kilteni et al., 2013</xref>).</p>
</sec>
<sec sec-type="conclusion" id="s6">
<title>6 Conclusion</title>
<p>In this paper, we explored the impact of self-avatar customization on embodiment and bias. Our findings suggest that selecting and designing a larger avatar from a first-person perspective is positively associated with the experience of being larger when embodied in the larger virtual avatar.</p>
<p>We believe that having users create and embody larger-body avatars from a first-person perspective may not only lead to greater embodiment but also greater feelings of empathy toward larger people in the long term. Our findings also show that the standard method of selecting and designing avatars from a third-person perspective may work fine when the avatars are designed to represent oneself but backfire and reduce the experience of embodiment when the avatar is unlike oneself. Indeed, these findings are also important for our understanding of the sense of embodiment in virtual avatars, how much the external appearance can affect the embodiment, and, more importantly, how the choice of design of avatars has potential ethical implications for VR applications.</p>
<p>There are important practical and ethical implications derived from our research regarding the current use, design, and development of virtual avatars, with particular implications for the cases in which a therapeutic, empathetic, or bias reduction effect is sought. Specifically, our results show that users were less embodied in the larger-body avatar, and this might suggest that users were &#x201c;othering&#x201d; the virtual avatar (i.e., psychologically distancing themselves) when creating it from the third-person perspective. Users are, therefore, likely to be less empathetic to larger-size avatars and people during and after the VR experience in a larger-sized avatar in that condition. In contrast, our findings showed a significant positive relationship between positive implicit associations with larger-size bodies and feelings of having a larger body if the customization happened inside out, i.e., in the first-person condition, when participants experienced the larger-size body truly as themselves. This interpretation is in line with previous work; however, future research will need to be carried out to further understand the short- and long-term behavioral consequences of self-avatar customization and implicit biases.</p>
<p>Finally, we highlight that the first-person avatar customization suggests that how the self-avatar is manipulated or chosen should be explored in more depth. We expect that this can be extended to other aspects such as more degrees of freedom of the body shape and potentially other characteristics, such as skin color, clothing, and jewelry. An active engagement in the process of self-avatar change might aid the embodiment of the changed avatar.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s13">Supplementary Material;</xref> further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s8">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Microsoft Research Institutional Review Board. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s9">
<title>Author contributions</title>
<p>MG-F: conceptualization, data curation, formal analysis, investigation, methodology, supervision, writing&#x2013;original draft, and writing&#x2013;review and editing. AS: conceptualization, data curation, investigation, methodology, project administration, software, supervision, validation, writing&#x2013;original draft, and writing&#x2013;review and editing. CB: formal analysis, visualization, writing&#x2013;original draft, and writing&#x2013;review and editing. AT-J: methodology, supervision, writing&#x2013;original draft, and writing&#x2013;review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s10">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. AT-J was funded by the European Research Council (ERC) under the European Union&#x2019;s Horizon 2020 research and innovation program (Grant Agreement No. 101002711; project BODYinTRANSIT).</p>
</sec>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of interest</title>
<p>Authors MGF and AS were employed by company Microsoft Research.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors, and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s13">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frvir.2024.1436752/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frvir.2024.1436752/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Video1.MP4" id="SM2" mimetype="application/MP4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn2">
<label>1</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://www.cdc.gov/healthyweight/assessing/bmi">https://www.cdc.gov/healthyweight/assessing/bmi</ext-link>
</p>
</fn>
<fn id="fn3">
<label>2</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://meade.wordpress.ncsu.edu/freeiat-home/">https://meade.wordpress.ncsu.edu/freeiat-home/</ext-link>; version 1.3.3.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Achenbach</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Waltemate</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Latoschik</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Botsch</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Fast generation of realistic virtual humans</article-title>,&#x201d; in <conf-name>Proceedings of the 23rd acm symposium on virtual reality software and technology</conf-name>. <conf-loc>Gothenburg, Sweden</conf-loc>, <conf-date>November 8 - 10, 2017</conf-date> (<publisher-name>ACM</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>10</lpage>.</citation>
</ref>
<ref id="B2">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Anguelov</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Srinivasan</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Koller</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Thrun</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Rodgers</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2005</year>). &#x201c;<article-title>Scape: shape completion and animation of people</article-title>,&#x201d; in <conf-name>ACM SIGGRAPH 2005 papers</conf-name>, <fpage>408</fpage>&#x2013;<lpage>416</lpage>.</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bailey</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wise</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bolls</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>How avatar customizability affects children&#x2019;s arousal and subjective presence during junk food&#x2013;sponsored online video games</article-title>. <source>CyberPsychology Behav.</source> <volume>12</volume>, <fpage>277</fpage>&#x2013;<lpage>283</lpage>. <pub-id pub-id-type="doi">10.1089/cpb.2008.0292</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Hanumanthu</surname>
<given-names>P. D.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Virtual embodiment of white people in a black virtual body leads to a sustained reduction in their implicit racial bias</article-title>. <source>Front. Hum. Neurosci.</source> <volume>10</volume>, <fpage>601</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00601</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berger</surname>
<given-names>C. C.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Lenggenhager</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Lanier</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Follow your nose: extended arm reach after pinocchio illusion in virtual reality</article-title>. <source>Front. Virtual Real.</source> <volume>3</volume>, <fpage>712375</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2022.712375</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Birk</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Atkins</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Bowey</surname>
<given-names>J. T.</given-names>
</name>
<name>
<surname>Mandryk</surname>
<given-names>R. L.</given-names>
</name>
</person-group> (<year>2016</year>) &#x201c;<article-title>Fostering intrinsic motivation through avatar identification in digital games</article-title>,&#x201d; in <conf-name>CHI &#x27;16: Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems</conf-name>. <conf-loc>California, San Jose, USA</conf-loc>, <conf-date>May 7 - 12, 2016</conf-date> <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>, <fpage>2982</fpage>&#x2013;<lpage>2995</lpage>.</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blanke</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Metzinger</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Full-body illusions and minimal phenomenal selfhood</article-title>. <source>Trends Cognitive Sci.</source> <volume>13</volume>, <fpage>7</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2008.10.003</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Botsch</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sorkine</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>On linear variational surface deformation methods</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>14</volume>, <fpage>213</fpage>&#x2013;<lpage>230</lpage>. <pub-id pub-id-type="doi">10.1109/tvcg.2007.1054</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Cain</surname>
<given-names>G. G.</given-names>
</name>
</person-group> (<year>1986</year>). <article-title>Chapter 13 the economic analysis of labor market discrimination: a survey</article-title>. in <source>Handbook of Labor Economics</source> (<publisher-loc>Netherlands</publisher-loc>: <publisher-name>Elsevier</publisher-name>), <fpage>693</fpage>&#x2013;<lpage>785</lpage>. <pub-id pub-id-type="doi">10.1016/S1573-4463(86)01016-7</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carels</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Wott</surname>
<given-names>C. B.</given-names>
</name>
<name>
<surname>Young</surname>
<given-names>K. M.</given-names>
</name>
<name>
<surname>Gumble</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Koball</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Oehlhof</surname>
<given-names>M. W.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Implicit, explicit, and internalized weight bias and psychosocial maladjustment among treatment-seeking adults</article-title>. <source>Eat. Behav.</source> <volume>11</volume>, <fpage>180</fpage>&#x2013;<lpage>185</lpage>. <pub-id pub-id-type="doi">10.1016/j.eatbeh.2010.03.002</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Churchill</surname>
<given-names>E. F.</given-names>
</name>
<name>
<surname>Snowdon</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Collaborative virtual environments: an introductory review of issues and systems</article-title>. <source>Virtual Real.</source> <volume>3</volume>, <fpage>3</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1007/BF01409793</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Davis</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2000</year>) <source>The color of violence against women</source>. <publisher-name>Colorlines</publisher-name>, <fpage>4</fpage>.</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>D&#xf6;llinger</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Mal</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wenninger</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Botsch</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Latoschik</surname>
<given-names>M. E.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Resize Me! Exploring the user experience of embodied realistic modulatable avatars for body image intervention in virtual reality</article-title>. <source>Front. Virtual Real.</source> <volume>3</volume>. <pub-id pub-id-type="doi">10.3389/frvir.2022.935449</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dunn</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Guadagno</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>My avatar and me &#x2013; gender and personality predictors of avatar-self discrepancy</article-title>. <source>Comput. Hum. Behav.</source> <volume>28</volume>, <fpage>97</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2011.08.015</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Falconer</surname>
<given-names>C. J.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rovira</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>King</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Gilbert</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Antley</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <article-title>Embodying compassion: a virtual reality paradigm for overcoming excessive self-criticism</article-title>. <source>PLOS ONE</source> <volume>9</volume>, <fpage>e111933</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0111933</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Fitton</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Clarke</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Dalton</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Proulx</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Lutteroth</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>) &#x201c;<article-title>Dancing with the avatars: minimal avatar customisation enhances learning in a psychomotor task</article-title>,&#x201d; in <conf-name>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>.</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Freeman</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Maloney</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Body, avatar, and me: the presentation and perception of self in social virtual reality</article-title>. <source>Proc. ACM Human-Computer Interact.</source> <volume>4</volume> (<issue>239</issue>), <fpage>1</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1145/3432938</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Abtahi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Individual differences in embodied distance estimation in virtual reality</article-title>,&#x201d; in <conf-name>2019 IEEE conference on virtual reality and 3D user interfaces (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>941</fpage>&#x2013;<lpage>943</lpage>.</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bellido</surname>
<given-names>A. I.</given-names>
</name>
<name>
<surname>Blom</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rodriguez-Fornells</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The neurological traces of look-alike avatars</article-title>. <source>Front. Hum. Neurosci.</source> <volume>10</volume>, <fpage>392</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00392</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cohn</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ofek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Burin</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Maselli</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020a</year>). &#x201c;<article-title>The self-avatar follower effect in virtual reality</article-title>,&#x201d; in <conf-name>2020 IEEE conference on virtual reality and 3D user interfaces (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>18</fpage>&#x2013;<lpage>25</lpage>.</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lanier</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Model of illusions and virtual reality</article-title>. <source>Front. Psychol.</source> <volume>8</volume>, <fpage>1125</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01125</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ofek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Antley</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2020b</year>). <article-title>The rocketbox library and the utility of freely available rigged avatars</article-title>. <source>Front. virtual Real.</source> <volume>1</volume>, <fpage>20</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2020.561558</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Peck</surname>
<given-names>T. C.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Avatar embodiment. Towards a standardized questionnaire</article-title>. <source>Front. Robotics AI</source> <volume>5</volume>, <fpage>74</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00074</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Gonz&#xe1;lez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>P&#xe9;rez-Marcos</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>The contribution of real-time mirror reflections of motor actions on virtual body ownership in an immersive virtual environment</article-title>,&#x201d; in <conf-name>2010 IEEE virtual reality conference (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>111</fpage>&#x2013;<lpage>114</lpage>.</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Birney</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Swapp</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Haslam</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Reicher</surname>
<given-names>S. D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Participant concerns for the learner in a virtual reality replication of the milgram obedience study</article-title>. <source>PLOS ONE</source> <volume>13</volume>, <fpage>e0209704</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0209704</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Liencres</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zapata</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Iruretagoyena</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Seinfeld</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Perez-Mendez</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Arroyo-Palacios</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Being the victim of intimate partner violence in virtual reality: first- versus third-person perspective</article-title>. <source>Front. Psychol.</source> <volume>11</volume>, <fpage>820</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2020.00820</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Greenwald</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Banaji</surname>
<given-names>M. R.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Implicit social cognition: attitudes, self-esteem, and stereotypes</article-title>. <source>Psychol. Rev.</source> <volume>102</volume>, <fpage>4</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1037/0033-295X.102.1.4</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Greenwald</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Krieger</surname>
<given-names>L. H.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Implicit bias: scientific foundations</article-title>. <source>Calif. Law Rev.</source> <volume>94</volume>, <fpage>945</fpage>&#x2013;<lpage>967</lpage>. <pub-id pub-id-type="doi">10.2307/20439056</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Greenwald</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Nosek</surname>
<given-names>B. A.</given-names>
</name>
<name>
<surname>Banaji</surname>
<given-names>M. R.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Understanding and using the Implicit Association Test: I. An improved scoring algorithm</article-title>. <source>J. Personality Soc. Psychol.</source> <volume>85</volume>, <fpage>197</fpage>&#x2013;<lpage>216</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.85.2.197</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hoort</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Guterstam</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Being barbie: the size of one&#x2019;s own body determines the perceived size of the world</article-title>. <source>PLOS ONE</source> <volume>6</volume>, <fpage>e20195</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0020195</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Horne</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hill</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Murells</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ugail</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Chinnadorai</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Hardy</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Using avatars in weight management settings: a systematic review</article-title>. <source>Internet interv.</source> <volume>19</volume>, <fpage>100295</fpage>. <pub-id pub-id-type="doi">10.1016/j.invent.2019.100295</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hudson</surname>
<given-names>G. M.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Hahn</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zabal</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Latif</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>The development of a bmi-guided shape morphing technique and the effects of an individualized figure rating scale on self-perception of body size</article-title>. <source>Eur. J. Investigation Health, Psychol. Educ.</source> <volume>10</volume>, <fpage>579</fpage>&#x2013;<lpage>594</lpage>. <pub-id pub-id-type="doi">10.3390/ejihpe10020043</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Jayaraj</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wood</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gibson</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Improving the immersion in virtual reality with real-time avatar and haptic feedback in a cricket simulation</article-title>,&#x201d; in <conf-name>2017 IEEE international symposium on mixed and augmented reality (ISMAR-Adjunct)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>310</fpage>&#x2013;<lpage>314</lpage>.</citation>
</ref>
<ref id="B34">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Jeffreys</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>1998</year>). <source>The theory of probability</source>. <publisher-loc>OUP Oxford</publisher-loc>: <publisher-name>Google-Books-ID: vh9Act9rtzQC</publisher-name>.</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jolls</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Sunstein</surname>
<given-names>C. R.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>The law of implicit bias</article-title>. <source>Calif. Law Rev.</source> <volume>94</volume>, <fpage>969</fpage>&#x2013;<lpage>996</lpage>. <pub-id pub-id-type="doi">10.2307/20439057</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Jonas</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Said</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Aiello</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Furlo</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Zytko</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Towards a taxonomy of social VR application design</article-title>,&#x201d; in <conf-name>Extended abstracts of the annual symposium on computer-human interaction in play companion extended abstracts</conf-name> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>437</fpage>&#x2013;<lpage>444</lpage>.</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bennett</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Carbado</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Casey</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Levinson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Implicit bias in the courtroom</article-title>. <source>UCLA Law Rev.</source> <volume>59</volume>, <fpage>1124</fpage>&#x2013;<lpage>1187</lpage>.</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bergstrom</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Drumming in immersive virtual reality: the body shapes the way we play</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>19</volume>, <fpage>597</fpage>&#x2013;<lpage>605</lpage>. <pub-id pub-id-type="doi">10.1109/tvcg.2013.29</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Groten</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2012a</year>). <article-title>The sense of embodiment in virtual reality</article-title>. <source>Presence Teleoperators Virtual Environ.</source> <volume>21</volume>, <fpage>373</fpage>&#x2013;<lpage>387</lpage>. <pub-id pub-id-type="doi">10.1162/PRES_a_00124</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Normand</surname>
<given-names>J.-M.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2012b</year>). <article-title>Extending body space in immersive virtual reality: a very long arm illusion</article-title>. <source>PLOS ONE</source> <volume>7</volume>, <fpage>e40867</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0040867</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kokkinara</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Measuring the effects through time of the influence of visuomotor and visuotactile synchronous stimulation on a virtual body ownership illusion</article-title>. <source>Perception</source> <volume>43</volume>, <fpage>43</fpage>&#x2013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1068/p7545</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lenggenhager</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Tadi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Metzinger</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Blanke</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Video ergo sum: manipulating bodily self-consciousness</article-title>. <source>Science</source> <volume>317</volume>, <fpage>1096</fpage>&#x2013;<lpage>1099</lpage>. <pub-id pub-id-type="doi">10.1126/science.1143439</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Social virtual reality platform comparison and evaluation using a guided group walkthrough method</article-title>. <source>Front. Virtual Real.</source> <volume>2</volume>, <fpage>52</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2021.668181</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>MacNeill</surname>
<given-names>L. P.</given-names>
</name>
<name>
<surname>Best</surname>
<given-names>L. A.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>L. L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The role of personality in body image dissatisfaction and disordered eating: discrepancies between men and women</article-title>. <source>J. Eat. Disord.</source> <volume>5</volume>, <fpage>44</fpage>. <pub-id pub-id-type="doi">10.1186/s40337-017-0177-8</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Madary</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Metzinger</surname>
<given-names>T. K.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Real virtuality: a code of ethical conduct. recommendations for good scientific practice and the consumers of vr-technology</article-title>. <source>Front. Robotics AI</source> <volume>3</volume> (<issue>3</issue>). <pub-id pub-id-type="doi">10.3389/frobt.2016.00003</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Maister</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Sebanz</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Knoblich</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Tsakiris</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Experiencing ownership over a dark-skinned body reduces implicit racial bias</article-title>. <source>Cognition</source> <volume>128</volume>, <fpage>170</fpage>&#x2013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2013.04.002</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Maloney</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Mitigating negative effects of immersive virtual avatars on racial bias</article-title>,&#x201d; in <conf-name>Proceedings of the 2018 annual symposium on computer-human interaction in play companion extended abstracts</conf-name> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>39</fpage>&#x2013;<lpage>43</lpage>.</citation>
</ref>
<ref id="B48">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Maloney</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Rajasabeson</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Moore</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Caldwell</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Archer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Robb</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Ethical concerns of the use of virtual avatars in consumer entertainment</article-title>,&#x201d; in <conf-name>2019 IEEE conference on virtual reality and 3D user interfaces (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>1489</fpage>&#x2013;<lpage>1492</lpage>.</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Maselli</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ofek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Cohn</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Hinckley</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Enhanced efficiency in visually guided online motor control for actions redirected towards the body midline</article-title>. <source>Philosophical Trans. R. Soc. B</source> <volume>378</volume>, <fpage>20210453</fpage>. <pub-id pub-id-type="doi">10.1098/rstb.2021.0453</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Morey</surname>
<given-names>R. D.</given-names>
</name>
<name>
<surname>Rouder</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Jamil</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Urbanek</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Forner</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ly</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <source>BayesFactor: Computation of Bayes Factors for Common Designs</source>.</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mottelson</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Muresan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hornb&#xe6;k</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Makransky</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A systematic review and meta-analysis of the effectiveness of body ownership illusions in virtual reality</article-title>. <source>ACM Trans. Comput.-Hum. Interact.</source> <volume>30</volume> (<issue>76</issue>), <fpage>1</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1145/3590767</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Normand</surname>
<given-names>J.-M.</given-names>
</name>
<name>
<surname>Giannopoulos</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Multisensory stimulation can induce an illusion of larger belly size in immersive virtual reality</article-title>. <source>PLOS ONE</source> <volume>6</volume>, <fpage>e16128</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0016128</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Padrao</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rodriguez-Fornells</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Violating body movement semantics: neural signatures of self-generated and external-generated errors</article-title>. <source>NeuroImage</source> <volume>124</volume>, <fpage>147</fpage>&#x2013;<lpage>156</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.08.022</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peck</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Seinfeld</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Aglioti</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Putting yourself in the skin of a black avatar reduces implicit racial bias</article-title>. <source>Conscious. Cognition</source> <volume>22</volume>, <fpage>779</fpage>&#x2013;<lpage>787</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2013.04.016</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peck</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Sockol</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Hancock</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Mind the gap: the underrepresentation of female participants and authors in virtual reality research</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>26</volume>, <fpage>1945</fpage>&#x2013;<lpage>1954</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2020.2973498</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Phadnis</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Moore</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Franco</surname>
<given-names>M. G.</given-names>
</name>
</person-group> (<year>2023</year>). <source>The work avatar face-off: a survey of knowledge worker preferences for realism in meetings</source>.</citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Piryankova</surname>
<given-names>I. V.</given-names>
</name>
<name>
<surname>Wong</surname>
<given-names>H. Y.</given-names>
</name>
<name>
<surname>Linkenauger</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Stinson</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Longo</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>B&#xfc;lthoff</surname>
<given-names>H. H.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <article-title>Owning an overweight or underweight body: distinguishing the physical, experienced and virtual body</article-title>. <source>PLOS ONE</source> <volume>9</volume>, <fpage>e103428</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0103428</pub-id>
</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Puhl</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>Moss-Racusin</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Schwartz</surname>
<given-names>M. B.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Internalization of weight bias: implications for binge eating and emotional well-being</article-title>. <source>Obesity</source> <volume>15</volume>, <fpage>19</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1038/oby.2007.521</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pujades</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Mohler</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Thaler</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tesch</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mahmood</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hesse</surname>
<given-names>N.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>The virtual caliper: rapid creation of metrically accurate avatars from 3D measurements</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>25</volume>, <fpage>1887</fpage>&#x2013;<lpage>1897</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2019.2898748</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Quittkat</surname>
<given-names>H. L.</given-names>
</name>
<name>
<surname>Hartmann</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>D&#xfc;sing</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Buhlmann</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Vocks</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Body dissatisfaction, importance of appearance, and body appreciation in men and women over the lifespan</article-title>. <source>Front. Psychiatry</source> <volume>10</volume>, <fpage>864</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyt.2019.00864</pub-id>
</citation>
</ref>
<ref id="B61">
<citation citation-type="web">
<collab>Rec Room</collab> (<year>2021</year>). <article-title>Rec Room</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://recroom.com/">https://recroom.com/</ext-link>.</comment>
</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>From presence to consciousness through virtual reality</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>6</volume>, <fpage>332</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1038/nrn1651</pub-id>
</citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Frisoli</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bergamasco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Virtual hand illusion induced by visuomotor correlations</article-title>. <source>PLOS ONE</source> <volume>5</volume>, <fpage>e10381</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0010381</pub-id>
</citation>
</ref>
<ref id="B64">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Schroeder</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2010</year>). <source>Being there together: social interaction in shared virtual environments</source>. <publisher-name>Oxford University Press</publisher-name>.</citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seinfeld</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Arroyo-Palacios</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Iruretagoyena</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Hortensius</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Zapata</surname>
<given-names>L. E.</given-names>
</name>
<name>
<surname>Borland</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Offenders become the victim in virtual reality: impact of changing perspective in domestic violence</article-title>. <source>Sci. Rep.</source> <volume>8</volume>, <fpage>2692</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-19987-7</pub-id>
</citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Place illusion and plausibility can lead to realistic behaviour in immersive virtual environments</article-title>. <source>Philosophical Trans. R. Soc. B Biol. Sci.</source> <volume>364</volume>, <fpage>3549</fpage>&#x2013;<lpage>3557</lpage>. <pub-id pub-id-type="doi">10.1098/rstb.2009.0138</pub-id>
</citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Normand</surname>
<given-names>J.-M.</given-names>
</name>
<name>
<surname>Borland</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Giannopoulos</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pom&#xe9;s</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2014</year>). <article-title>How to build an embodiment lab: achieving body representation illusions in virtual reality</article-title>. <source>Front. Robotics AI</source> <volume>1</volume>, <fpage>9</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2014.00009</pub-id>
</citation>
</ref>
<ref id="B68">
<citation citation-type="web">
<collab>Spatial Systems Inc</collab> (<year>2021</year>). <article-title>Spatial</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://spatial.io/">https://spatial.io/</ext-link>.</comment>
</citation>
</ref>
<ref id="B69">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tajadura-Jim&#xe9;nez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bianchi-Berthouze</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Embodiment in a child-like talking virtual body influences object size perception, self-identification, and subsequent real speaking</article-title>. <source>Sci. Rep.</source> <volume>7</volume>, <fpage>9637</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-09497-3</pub-id>
</citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tanenbaum</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Hartoonian</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bryan</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>&#x201c;How do I make this thing smile?&#x201d;: an inventory of expressive nonverbal communication in commercial social virtual reality platforms</article-title>. <source>Proc. 2020. CHI. Conf. Hum. Comput. Syst.</source> <volume>20</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1145/3313831.3376606</pub-id>
</citation>
</ref>
<ref id="B71">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Teachman</surname>
<given-names>B. A.</given-names>
</name>
<name>
<surname>Brownell</surname>
<given-names>K. D.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Implicit anti-fat bias among health professionals: is anyone immune?</article-title> <source>Int. J. Obes.</source> <volume>25</volume>, <fpage>1525</fpage>&#x2013;<lpage>1531</lpage>. <pub-id pub-id-type="doi">10.1038/sj.ijo.0801745</pub-id>
</citation>
</ref>
<ref id="B72">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Thaler</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Piryankova</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Stefanucci</surname>
<given-names>J. K.</given-names>
</name>
<name>
<surname>Pujades</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>de La Rosa</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Streuber</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Visual perception and evaluation of photo-realistic self-avatars from 3d body scans in males and females</article-title>. <source>Front. ICT</source> <volume>5</volume>, <fpage>18</fpage>. <pub-id pub-id-type="doi">10.3389/fict.2018.00018</pub-id>
</citation>
</ref>
<ref id="B73">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Thaler</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Pujades</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Stefanucci</surname>
<given-names>J. K.</given-names>
</name>
<name>
<surname>Creem-Regehr</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>Tesch</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Black</surname>
<given-names>M. J.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>The influence of visual perspective on body size estimation in immersive virtual reality</article-title>, in <conf-name>ACM symposium on applied perception 2019</conf-name> <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>.</citation>
</ref>
<ref id="B74">
<citation citation-type="web">
<collab>VRChat Inc</collab> (<year>2021</year>). <article-title>VRChat</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.vrchat.com/">https://www.vrchat.com/</ext-link>.</comment>
</citation>
</ref>
<ref id="B75">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Waggoner</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2009</year>). <source>My avatar, my self: Identity in video role-playing games</source>. <publisher-loc>McFarland</publisher-loc>: <publisher-name>Google-Books-ID</publisher-name>.</citation>
</ref>
<ref id="B76">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wolf</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>D&#xf6;llinger</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Mal</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wienrich</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Botsch</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Latoschik</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Body weight perception of females using photorealistic avatars in virtual and augmented reality</article-title>,&#x201d; in <conf-name>2020 IEEE international symposium on mixed and augmented reality (ISMAR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>462</fpage>&#x2013;<lpage>473</lpage>.</citation>
</ref>
<ref id="B77">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yee</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>The Proteus effect: the effect of transformed self-representation on behavior</article-title>. <source>Hum. Commun. Res.</source> <volume>33</volume>, <fpage>271</fpage>&#x2013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1111/j.1468-2958.2007.00299.x</pub-id>
</citation>
</ref>
<ref id="B78">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yee</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Ducheneaut</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>The Proteus effect: implications of transformed digital self-representation on online and offline behavior</article-title>. <source>Commun. Res.</source> <volume>36</volume>, <fpage>285</fpage>&#x2013;<lpage>312</lpage>. <pub-id pub-id-type="doi">10.1177/0093650208330254</pub-id>
</citation>
</ref>
<ref id="B79">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Yee</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Ducheneaut</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Yao</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Nelson</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Do men heal more when in drag? conflicting identity cues between user and avatar</article-title>,&#x201d; in <conf-name>Proceedings of the SIGCHI conference on human factors in computing systems</conf-name> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>773</fpage>&#x2013;<lpage>776</lpage>.</citation>
</ref>
<ref id="B80">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Yuan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>Is the rubber hand illusion induced by immersive virtual reality?</article-title>,&#x201d; in <conf-name>2010 IEEE virtual reality conference (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>95</fpage>&#x2013;<lpage>102</lpage>.</citation>
</ref>
<ref id="B81">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeidler</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>McGinity</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Bodylab: in virtuo sculpting, painting and performing of full-body avatars</article-title>. <source>Proc. ACM Comput. Graph. Interact. Tech.</source> <volume>6</volume> (<issue>22</issue>), <fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1145/3597631</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>