<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frvir.2020.561558</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Technology and Code</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>The Rocketbox Library and the Utility of Freely Available Rigged Avatars</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Gonzalez-Franco</surname> <given-names>Mar</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/32801/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ofek</surname> <given-names>Eyal</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/838499/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Pan</surname> <given-names>Ye</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/304135/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Antley</surname> <given-names>Angus</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/5870/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Steed</surname> <given-names>Anthony</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/134824/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Spanlang</surname> <given-names>Bernhard</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/6582/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Maselli</surname> <given-names>Antonella</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/22926/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Banakou</surname> <given-names>Domna</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Pelechano</surname> <given-names>Nuria</given-names></name>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/815961/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Orts-Escolano</surname> <given-names>Sergio</given-names></name>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Orvalho</surname> <given-names>Veronica</given-names></name>
<xref ref-type="aff" rid="aff10"><sup>10</sup></xref>
<xref ref-type="aff" rid="aff11"><sup>11</sup></xref>
<xref ref-type="aff" rid="aff12"><sup>12</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Trutoiu</surname> <given-names>Laura</given-names></name>
<xref ref-type="aff" rid="aff13"><sup>13</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/407353/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wojcik</surname> <given-names>Markus</given-names></name>
<xref ref-type="aff" rid="aff14"><sup>14</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1019103/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Sanchez-Vives</surname> <given-names>Maria V.</given-names></name>
<xref ref-type="aff" rid="aff15"><sup>15</sup></xref>
<xref ref-type="aff" rid="aff16"><sup>16</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1171/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bailenson</surname> <given-names>Jeremy</given-names></name>
<xref ref-type="aff" rid="aff17"><sup>17</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Slater</surname> <given-names>Mel</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1114/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lanier</surname> <given-names>Jaron</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Microsoft Research</institution>, <addr-line>Redmond, WA</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Disney Research</institution>, <addr-line>Los Angeles, CA</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Computer Science Department, University College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff4"><sup>4</sup><institution>Virtual Bodyworks S.L.</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff5"><sup>5</sup><institution>Institute of Cognitive Sciences and Technologies, National Research Council</institution>, <addr-line>Rome</addr-line>, <country>Italy</country></aff>
<aff id="aff6"><sup>6</sup><institution>Department of Psychology, Institute of Neurosciences of the University of Barcelona</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff7"><sup>7</sup><institution>EventLab, Universitat de Barcelona</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff8"><sup>8</sup><institution>Computer Science Department, Universitat Polit&#x000E8;cnica de Catalunya</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff9"><sup>9</sup><institution>Google</institution>, <addr-line>Mountain View, CA</addr-line>, <country>United States</country></aff>
<aff id="aff10"><sup>10</sup><institution>Faculdade de Ci&#x000EA;ncias, Universidade do Porto</institution>, <addr-line>Porto</addr-line>, <country>Portugal</country></aff>
<aff id="aff11"><sup>11</sup><institution>Instituto de Telecomunica&#x000E7;&#x000F5;es</institution>, <addr-line>Porto</addr-line>, <country>Portugal</country></aff>
<aff id="aff12"><sup>12</sup><institution>Didimo Inc.</institution>, <addr-line>Porto</addr-line>, <country>Portugal</country></aff>
<aff id="aff13"><sup>13</sup><institution>Independent Researcher</institution>, <addr-line>Seattle, WA</addr-line>, <country>United States</country></aff>
<aff id="aff14"><sup>14</sup><institution>Independent Researcher</institution>, <addr-line>Hannover</addr-line>, <country>Germany</country></aff>
<aff id="aff15"><sup>15</sup><institution>Institut d&#x00027;Investigacions Biom&#x000E8;diques August Pi i Sunyer</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff16"><sup>16</sup><institution>Instituci&#x000F3; Catalana de Recerca i Estudis Avan&#x000E7;ats</institution>, <addr-line>Barcelona</addr-line>, <country>Spain</country></aff>
<aff id="aff17"><sup>17</sup><institution>Department of Communication, Stanford University</institution>, <addr-line>Stanford, CA</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Stefania Serafin, Aalborg University Copenhagen, Denmark</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Daniel Roth, Technical University of Munich, Germany; Mark Billinghurst, University of South Australia, Australia</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Mar Gonzalez-Franco <email>margon&#x00040;microsoft.com</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Technologies for VR, a section of the journal Frontiers in Virtual Reality</p></fn></author-notes>
<pub-date pub-type="epub">
<day>03</day>
<month>11</month>
<year>2020</year>
</pub-date>
<pub-date pub-type="collection">
<year>2020</year>
</pub-date>
<volume>1</volume>
<elocation-id>561558</elocation-id>
<history>
<date date-type="received">
<day>12</day>
<month>05</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>16</day>
<month>09</month>
<year>2020</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2020 Gonzalez-Franco, Ofek, Pan, Antley, Steed, Spanlang, Maselli, Banakou, Pelechano, Orts-Escolano, Orvalho, Trutoiu, Wojcik, Sanchez-Vives, Bailenson, Slater and Lanier.</copyright-statement>
<copyright-year>2020</copyright-year>
<copyright-holder>Gonzalez-Franco, Ofek, Pan, Antley, Steed, Spanlang, Maselli, Banakou, Pelechano, Orts-Escolano, Orvalho, Trutoiu, Wojcik, Sanchez-Vives, Bailenson, Slater and Lanier</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract><p>As part of the open sourcing of the Microsoft Rocketbox avatar library for research and academic purposes, here we discuss the importance of rigged avatars for the Virtual and Augmented Reality (VR, AR) research community. Avatars, virtual representations of humans, are widely used in VR applications. Furthermore many research areas ranging from crowd simulation to neuroscience, psychology, or sociology have used avatars to investigate new theories or to demonstrate how they influence human performance and interactions. We divide this paper in two main parts: the first one gives an overview of the different methods available to create and animate avatars. We cover the current main alternatives for face and body animation as well introduce upcoming capture methods. The second part presents the scientific evidence of the utility of using rigged avatars for embodiment but also for applications such as crowd simulation and entertainment. All in all this paper attempts to convey why rigged avatars will be key to the future of VR and its wide adoption.</p></abstract>
<kwd-group>
<kwd>avatars</kwd>
<kwd>virtual reality</kwd>
<kwd>augmented reality</kwd>
<kwd>rigging</kwd>
<kwd>animation</kwd>
<kwd>motion capture</kwd>
<kwd>blendshapes</kwd>
<kwd>Microsoft Rocketbox</kwd>
</kwd-group>
<counts>
<fig-count count="14"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="212"/>
<page-count count="23"/>
<word-count count="19258"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1. Introduction</title>
<p>When representing users or computer-controlled agents within computer graphics systems we have a range of alternatives from abstract and cartoon-like, through human-like to fantastic creations from our imagination. However, in this paper we focus on anthropomorphically correct digital human representations: avatars. These digital avatars are a collection of geometry (meshes, vertex) and textures (images) combined to look like real humans in three dimensions (3D). When these avatars are rigged they have a skeleton system, can walk and be animated to resemble people. A human-like avatar is defined by its morphology and behavior. The morphology of an avatar refers to the definition of the shape and structure of the geometry of the 3D model, and it usually complies with the anatomical structure of the human body. The behavior of an avatar is defined by the movements the 3D model can perform. Avatars created with computer graphics can reach such a level of realism that they can substitute real humans inside Virtual Reality (VR) or Augmented Reality (AR). An avatar can represent a real live participating person, or an underlying software agent. When digital humans are controlled by algorithms they are referred to as embodied agents (Bailenson and Blascovich, <xref ref-type="bibr" rid="B20">2004</xref>).</p>
<p>When people enter an immersive virtual environment through a VR/AR system they may experience an illusion of being in the place depicted by the virtual environment, typically referred to as &#x0201C;presence&#x0201D; (Sanchez-Vives and Slater, <xref ref-type="bibr" rid="B171">2005</xref>). Presence has been decomposed into two different aspects, the illusion of &#x0201C;being there,&#x0201D; referred to as &#x0201C;Place Illusion,&#x0201D; and the illusion that the events that are occurring are really happening, referred to as &#x0201C;Plausibility&#x0201D; (Slater, <xref ref-type="bibr" rid="B179">2009</xref>). These illusions and their consequences occur in spite of the person knowing that nothing real is happening. However, typically, the stronger these illusions the more realistically people will respond to the events inside the VR and AR (Gonzalez-Franco and Lanier, <xref ref-type="bibr" rid="B67">2017</xref>).</p>
<p>A key contributor to plausibility is the representation and behavior of the avatars in the environment (Slater, <xref ref-type="bibr" rid="B179">2009</xref>). Are those avatars realistic? Do their appearance, behavior, and actions match with the plot? Do they behave and move according to expectations in the given context? Do they respond appropriately and according to expectations with the participant? Do they initiate interactions with the participant of their own accord? A simple example is that a character should smile back, or at least react, when a person smiles toward it. Another example is that a character moves out of the way, or acknowledges the participant in some way, as she or he walks by.</p>
<p>Avatars are key to every social VR and AR interaction (Schroeder, <xref ref-type="bibr" rid="B173">2012</xref>). They can be used to recreate social psychology scenarios that would be very hard or impossible to recreate in reality to evaluate human responses. Avatars have helped researchers in further studying bystander effects during violent scenarios (Rovira et al., <xref ref-type="bibr" rid="B166">2009</xref>; Slater et al., <xref ref-type="bibr" rid="B184">2013</xref>) or paradigms of obedience to authority (Slater et al., <xref ref-type="bibr" rid="B180">2006</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B71">2019b</xref>), to explore the effects of self-compassion (Falconer et al., <xref ref-type="bibr" rid="B55">2014</xref>, <xref ref-type="bibr" rid="B54">2016</xref>), crowd simulation (Pelechano et al., <xref ref-type="bibr" rid="B150">2007</xref>), or even experiencing the world from the embodied viewpoint of another (Osimo et al., <xref ref-type="bibr" rid="B139">2015</xref>; Hamilton-Giachritsis et al., <xref ref-type="bibr" rid="B77">2018</xref>; Seinfeld et al., <xref ref-type="bibr" rid="B174">2018</xref>). In many cases of VR social interaction, researchers use embodied agents (i.e., procedural avatars). Note that in this paper we do not use the term &#x0201C;procedural&#x0201D; to refer to how they were created, but rather how they are animated to represent agents in the scene, for example, following a series of predefined animations potentially driven by AI tools.</p>
<p>A particular case of avatars inside VR are self-avatars, or embodied avatars. A self-avatar is a 3D representation of a human model that is co-located with the user&#x00027;s body, as if it were to replace or hide the real body. When wearing a VR Head Mounted Display (HMD) the user cannot see the real environment around her and in particular, cannot see her own body. The same is true in some AR configurations. Self-avatars provide users with a virtual body that can be visually coincident with their real body (<xref ref-type="fig" rid="F1">Figure 1</xref>). This substitution of the self-body with a self-avatar is often referred to as embodiment (Longo et al., <xref ref-type="bibr" rid="B107">2008</xref>; Kilteni et al., <xref ref-type="bibr" rid="B86">2012a</xref>). We use the term &#x0201C;virtual embodiment&#x0201D; (or just &#x0201C;embodiment&#x0201D;) to describe the physical process that employs the VR hardware and software to substitute a person&#x00027;s body with a virtual one. Embodiment under a variety of conditions may give rise to the subjective illusions of body ownership and agency (Slater et al., <xref ref-type="bibr" rid="B183">2009</xref>; Spanlang et al., <xref ref-type="bibr" rid="B189">2014</xref>). Body ownership is enabled by multisensory processing and plasticity of body representation in the brain (Kilteni et al., <xref ref-type="bibr" rid="B87">2015</xref>). For example, if we see a body from a first-person perspective that moves as we move (i.e., synchronous visuo-motor correlations) (Gonzalez-Franco et al., <xref ref-type="bibr" rid="B70">2010</xref>), or is touched with the same spatio-temporal pattern as our real body (i.e., synchronous visuo-tactile correlations) (Slater et al., <xref ref-type="bibr" rid="B187">2010b</xref>), or is just static but co-located with our own body (i.e., congruent visuo-proprioceptive correlation) (Maselli and Slater, <xref ref-type="bibr" rid="B122">2013</xref>), then an embodiment experience is generated. In fact, even if the participant is not moving, nor being touched, in some setups, the first-person co-location will be sufficient to generate embodiment (Gonz&#x000E1;lez-Franco et al., <xref ref-type="bibr" rid="B69">2014</xref>; Maselli and Slater, <xref ref-type="bibr" rid="B123">2014</xref>; Gonzalez-Franco and Peck, <xref ref-type="bibr" rid="B68">2018</xref>), to result in the perceptual illusion that this is our body (even though we know for sure that it is not). Interestingly, and highly useful as a control, if there is asynchrony or incongruence between sensory inputs (either in space or in time) or in sensorimotor correlations, the illusion breaks (Berger et al., <xref ref-type="bibr" rid="B32">2018</xref>; Gonzalez-Franco and Berger, <xref ref-type="bibr" rid="B65">2019</xref>). This body ownership effect that was first demonstrated with a rubber hand (Botvinick and Cohen, <xref ref-type="bibr" rid="B37">1998</xref>), has now been replicated in a large number of instances with virtual avatars, (for a review see the Slater and Sanchez-Vives, <xref ref-type="bibr" rid="B185">2016</xref>; Gonzalez-Franco and Lanier, <xref ref-type="bibr" rid="B67">2017</xref>).</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Two of the Microsoft Rocketbox avatars being used for a first-person substitution of gender matched bodies in an embodiment experiment at University of Barcelona by Maselli and Slater (<xref ref-type="bibr" rid="B122">2013</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0001.tif"/>
</fig>
<p>This means that self-avatars not only allow us to interact with others as we would do in the real world, but are also critical for non-social VR experiences. The body is a basic aspect required for perceptual, cognitive, and bodily interactions. Inside VR, the avatar becomes our body, our &#x0201C;self&#x0201D;. Indeed participants that have virtual body show better perceptual ability when estimating distances than non-embodied participants in VR (Mohler et al., <xref ref-type="bibr" rid="B129">2010</xref>; Phillips et al., <xref ref-type="bibr" rid="B157">2010</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B63">2019a</xref>). Self-avatars also change how we perceive touch inside VR (Maselli et al., <xref ref-type="bibr" rid="B121">2016</xref>; Gonzalez-Franco and Berger, <xref ref-type="bibr" rid="B65">2019</xref>). Even more interestingly, self-avatars can even help users to better perform cognitive tasks (Steed et al., <xref ref-type="bibr" rid="B191">2016b</xref>; Banakou et al., <xref ref-type="bibr" rid="B23">2018</xref>), modify implicit racial bias (Groom et al., <xref ref-type="bibr" rid="B74">2009</xref>; Peck et al., <xref ref-type="bibr" rid="B149">2013</xref>; Banakou et al., <xref ref-type="bibr" rid="B22">2016</xref>; Hasler et al., <xref ref-type="bibr" rid="B78">2017</xref>; Salmanowitz, <xref ref-type="bibr" rid="B169">2018</xref>) or even change, for example, their body weight perception (Piryankova et al., <xref ref-type="bibr" rid="B158">2014</xref>).</p>
<p>Such examples of research are just the tip of the iceberg, but show the importance of avatars that are controllable for developing VR/AR experiments, games, and applications. Hence, multiple commercial and non-commercial tools (such as the Microsoft Rocketbox library, Autodesk Character Generator, Mixamo/Adobe Fuse or iClone Character Creator, to name a few) aim to democratize and extend their use of avatars among developers and researchers.</p>
<p>Avatars can be created in different ways and in this paper we will detail how they can be fitted to a skeleton, and animated. We give an overview of previous work as well as future avenues for avatar creation. We also describe the particularities of the use and creation of the Microsoft Rocketbox avatar library and we discuss the consequences of the open source release (Mic, <xref ref-type="bibr" rid="B8">2020</xref>).</p>
</sec>
<sec id="s2">
<title>2. Avatar Creation</title>
<p>The creation of avatars that can move and express emotions is a complex task and relies on the definition of both the morphology and behavior of its 3D representation. The generation of believable avatars requires a technical pipeline (<xref ref-type="fig" rid="F2">Figure 2</xref>), that can create the geometry, textures, control structure (rig) and movements of the avatar (Roth et al., <xref ref-type="bibr" rid="B163">2017</xref>). Anthropomorphic avatars might be sculpted by artists, scanned from real people or a combination of both. Microsoft Rocketbox avatars are based on sculpting. With current technological advances it is also possible to create avatars automatically from a set of input images or through manipulation of a small set of parameters. At the core rigging is the set of control structures attached to selected areas of the avatar, allowing its manipulation and animation. A rig is usually represented by a skeleton (bone structure). However, rigs can be attached to any structure that is useful for the task needed (animators may use much more complex rigs than a skeleton). The main challenge when rigging an avatar is to accurately mimic the deformation of an anthropomorphic shape, so artists and developers can manipulate and animate the avatar. Note that non-anthropomorphic avatars, such as those lacking legs or other body parts, are widely used in social VR. However, although previous research has shown that non-anthropomorphic avatars can also be quite successful in creating self-identification, there are more positive effects to using full anthropomorphic avatars (Aymerich-Franch, <xref ref-type="bibr" rid="B18">2012</xref>).</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Pipeline for the creation of Microsoft Rocketbox Rigged Avatars and main libraries and software available with pre-created avatars of different types. The mesh creation produces a basic sculpted humanoid form. The rigging attaches the vertices to different bones corresponding to the skeleton of the digital human.</p></caption>
<graphic xlink:href="frvir-01-561558-g0002.tif"/>
</fig>
<sec>
<title>2.1. Mesh Creation</title>
<sec>
<title>2.1.1. Sculpting</title>
<p>Much like the ancient artists sculpted humans in stone, today digital artists can create avatars by combining and manipulating digital geometric primitives and deforming the resulting collection of vertex or meshes. Manually sculpting, texturing, and rigging using 3D content creation tools, such as Autodesk 3ds Max (3dm, <xref ref-type="bibr" rid="B1">2020</xref>), Maya (May, <xref ref-type="bibr" rid="B6">2020</xref>), or Blender (Ble, <xref ref-type="bibr" rid="B4">2020</xref>), has been the traditional way to achieve high-quality avatars.</p>
<p>This work requires artists specializing in character design and animation, and though this can be a long and tedious process, the results can be optimized for high-level quality output. Most of the avatars currently used for commercial applications, such as AAA games and most VR/AR applications, are based on a combination of sculpted and scanned work with a strong artistic involvement.</p>
<p>In fact, at the time of writing, specialized artistic work is generally still used to fine tune avatar models to avoid artifacts produced with the other methods listed here, even though some of these avatars can still suffer from the uncanny valley effect (Mori, <xref ref-type="bibr" rid="B131">1970</xref>), where extreme but not perfect realism can cause a negative reaction to the avatar.</p>
</sec>
<sec>
<title>2.1.2. Data Driven Methods and Scanning</title>
<p>In many applications, geometry scanning is used to generate an avatar that can be rigged and animated later (Kobbelt and Botsch, <xref ref-type="bibr" rid="B89">2004</xref>). Geometry scanning is the acquisition of physical topologies by imaging techniques and accurate translation of this surface information to digital 3D geometry; that is, a mesh (Thorn et al., <xref ref-type="bibr" rid="B195">2016</xref>).</p>
<p>That later animation might itself use a motion capture system to provide data to animate the avatar. The use of one-time scanning enables the artist to stylize the scanned model and optimize it for animation and the application. Scanning is often a pre-processing step where the geometry and potentially animation is recorded and used for reference or in the direct production of the final mesh and animation.</p>
<p>Scanning is particularly useful if an avatar needs to be modeled on existing real people (Waltemate et al., <xref ref-type="bibr" rid="B198">2018</xref>). Indeed, the creation of avatars this way is extremely common in special effects rendering, for sports games or for content around celebrities.</p>
<p>Depth cameras or laser techniques allow developers and researchers to capture their own avatars, and create look-alike avatars (Gonzalez-Franco et al., <xref ref-type="bibr" rid="B64">2016</xref>). These new avatars can later be rigged in the same way as sculpted ones. Scanning is a fast alternative to modeling of avatars. But depending on the quality, it comes with common flaws that need to be tweaked afterwards.</p>
<p>Indeed, special care needs to be taken to scan the model from all directions to minimize the number and size of occluded regions. To enable future relighting of the avatar, textures have to be scanned under known natural and sometimes varying illuminations to recover the original albedo or bi-directional reflectance function (BRDF) of the object (Debevec et al., <xref ref-type="bibr" rid="B47">2000</xref>).</p>
<p>To capture the full body (<xref ref-type="fig" rid="F3">Figure 3</xref>), the scanning requires a large set of images to be taken around the model by a moving camera (Aitpayev and Gaber, <xref ref-type="bibr" rid="B14">2012</xref>). Alternatively a large number of cameras in a capture stage can collect the required images at one instant (Esteban and Schmitt, <xref ref-type="bibr" rid="B53">2004</xref>). Surface reconstruction using multi-view stereo infers depth by finding matching neighborhoods in the multiple images (Steve et al., <xref ref-type="bibr" rid="B192">2006</xref>). When there is a need to capture both the geometry and textures of the avatar, as well as the motion of the model, a large set of cameras can also enable the capture of a large set of images that covers most, yet but not all, of the model surface at each time frame sometimes this is referred to as fusion4d or volumetric capturing (Dou et al., <xref ref-type="bibr" rid="B49">2016</xref>; Orts-Escolano et al., <xref ref-type="bibr" rid="B137">2016</xref>). More details on volumetric performance capture are presented in section 4.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>A Microsoft Rocketbox avatar mesh with less than 10,000 triangle polygons with two texture modes: only diffuse and diffuse, specular and normal (bump mapping). The models include multiple Levels of Detail (LODs) which can be optionally used for performance optimization (on the right).</p></caption>
<graphic xlink:href="frvir-01-561558-g0003.tif"/>
</fig>
<p>Special care should be taken when modeling avatar hair. While hair may be an important recognizable feature of a person, it is notoriously hard to scan and model due to its fine structure and reluctance properties. Tools such as those suggested by Wei et al. (<xref ref-type="bibr" rid="B202">2005</xref>) use visible hair features as seen by multiple images capturing the head from multiple directions, to fill a volume similar to the image. However, hair and translucent clothing remains a research challenge.</p>
<p>Recent deep learning methods also using retrieved data can be used at many stages of the avatar creation process. Some methods have been successfully used to create avatars from pictures by recreating full 3D meshes from a photo (Hu et al., <xref ref-type="bibr" rid="B81">2017</xref>; Saito et al., <xref ref-type="bibr" rid="B167">2019</xref>), meshes from multiple cameras Collet et al. (<xref ref-type="bibr" rid="B44">2015</xref>); Guo et al. (<xref ref-type="bibr" rid="B75">2019</xref>), reduce the generated artifacts (Blanz and Vetter, <xref ref-type="bibr" rid="B35">1999</xref>; Ichim et al., <xref ref-type="bibr" rid="B82">2015</xref>), as well as to improve rigging (Weng et al., <xref ref-type="bibr" rid="B204">2019</xref>). Deep learning methods can also generate completely new avatars that are not representations of existing people, by using adversarial networks (Karras et al., <xref ref-type="bibr" rid="B84">2019</xref>).</p>
</sec>
<sec>
<title>2.1.3. Parametric Avatars</title>
<p>Another way to reduce the effort needed to model an avatar of an actual person is to use a parametric avatar and fit the parameters to images or scans of the person.</p>
<p>Starting from an existing avatar guarantees that the fitting process will end up with a valid avatar model that can be rendered and animated correctly. Such methods are able to reduce the modeling process to a few images or even a single 2D image (Saito et al., <xref ref-type="bibr" rid="B168">2016</xref>; Shysheya et al., <xref ref-type="bibr" rid="B178">2019</xref>). They can be used to recover physically correct models that can be animated of finer details, even with fine and semi-transparent objects, such as hair (Wei et al., <xref ref-type="bibr" rid="B203">2019</xref>). This is at the cost of some differences compared to the actual person&#x00027;s geometry. There are many things these systems still cannot do well without additional artist ic work: hair, teeth, and fine lines in the face, to name a few.</p>
<p>There are some commercial and non-commercial applications for parametric avatars that provide some of these features, including Autodesk Character Generator, Mixamo/Adobe Fuse or iClone Character Creator.</p>
</sec>
</sec>
<sec>
<title>2.2. Texturing and Materials</title>
<p>At the time of rendering, each vertex and face of the mesh is assigned a value. That value is computed from retrieving the textures in combination with the shaders that might incorporate enhancements and GPU mixing of multiple layers with information about specular, albedo, normal mapping, and transparency among other material properties. While the specular map states where the reflection should and should not appear, the albedo is very similar to a diffuse map, but with one extra benefit: all the shadows and highlights have been removed. The normal map is used to add details without using more polygons; the computer graphics simulates high-detail bumps and dents using a bitmap image.</p>
<p>Therefore, texturing the avatar is another challenge in the creation process (<xref ref-type="fig" rid="F4">Figure 4</xref>). Adding textures requires specific skills as the vertexes and unwrapping are often based on real images that might also need additional processing, but in some occasions this can be automated.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>A close-up of an avatar face from Microsoft Rocketbox and corresponding set of textures that are mapped to the avatar mesh to produce the best possible appearance. Textures need to be mapped per each vertex and can contain information about the diffuse, specular, normal, and transparency colors of each vertex.</p></caption>
<graphic xlink:href="frvir-01-561558-g0004.tif"/>
</fig>
<p>When captured independently, each image covers only part of the object, and there is a need to fuse all sources to a single coherent texture. One possibility is to blend available images into a merged texture (Wang et al., <xref ref-type="bibr" rid="B199">2001</xref>; Baumberg, <xref ref-type="bibr" rid="B27">2002</xref>). However, any misalignment or inaccurate geometry (such as the case of fitting a parametric model) might lead to ghosting and blurring artifacts when the textures are geometrically misaligned.</p>
<p>To reduce such artifacts, texturing has been addressed as an image stitching problem (Lempitsky and Ivanov, <xref ref-type="bibr" rid="B97">2007</xref>; Gal et al., <xref ref-type="bibr" rid="B61">2010</xref>). This approach targets each surface triangle that is then projected onto the images from which it is visible, and the final texture is assigned entirely from one image in this set. The goal is to select the best texture source and to penalize mismatches across triangle boundaries. Lighting variations are corrected at post-processing using a piece-wise continuous function over the triangles.</p>
<p>A shader program runs in the graphics pipeline and determines how the computer will render each pixel of the screen based on the different texture images and the material and physical properties associated with the different objects. Shader programs are often used to control lighting and shading effects, and are programmed using GLSL (OpenGL Shading Language).</p>
<p>Shader programming also plays a very important role in applying all the additional textures (albedo, specular, bump mapping etc.). Most of the rendering engines will provide some basic shaders that will map the textures of the material assigned to the avatar.</p>
</sec>
<sec>
<title>2.3. Rigging Avatars</title>
<p>A key step in most animation systems is to attach a hierarchy of bones to the mesh in a process called rigging. The intention is that animations are defined by moving the bones, not the individual vertices. Therefore during rigging, each vertex of the mesh is attached to one or more bones. If a vertex is attached to one bone, it is either rigidly attached to that bone or has a fall-out region of influence. Alternatively if a vertex is attached to multiple bones then the effect of each bone on the vertex&#x00027;s positions is defined by weights. This allows for a mesh to smoothly interpolate as the bones move. These vertexes affected by multiple bones are typical in the joint sections.</p>
<p>Animators may also use much less complex rigs than a skeleton by combining, for example, different expressions (i.e., blendshapes) and movements to a combined trigger (e.g., happy expression).</p>
<sec>
<title>2.3.1. Bones</title>
<p>Artists typically structure the bones to follow, crudely, the skeletal structure of the human or animal (<xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Bones in one of the Microsoft Rocketbox avatars (53 body bones and 28 facial bones).</p></caption>
<graphic xlink:href="frvir-01-561558-g0005.tif"/>
</fig>
<p>There are several pipelines available today for rigging a mesh so that it can be imported into a game engine such as Unreal or Unity. One common way is using the CATS plugin for Blender (Cat, <xref ref-type="bibr" rid="B5">2020</xref>) that can export in a compatible manner with the Mecanim retargeting system used in Unity. Another method is to import the mesh into Maya or another 3D design program and rig it by hand, or alternatively, use Adobe Fuse (Fus, <xref ref-type="bibr" rid="B2">2020</xref>) to rig the mesh. Nonetheless, each of these methods has its advantages and disadvantages. As with any professional creation tool, they have steep learning curves in becoming familiar with the user interfaces and with all of the foibles of misplacing bones in the mesh that can lead to unnatural poses. Although with the CATS plugin and Fuse the placement of bones is automated, if they are not working initially it can be difficult to fix issues.</p>
<p>Mixamo (Mix, <xref ref-type="bibr" rid="B9">2020</xref>) makes rigging and animation easier for artists and developers. Users upload the mesh of a character and then place joint locators (wrists, elbows, knees, and groin) by following the onscreen instructions. Mixamo is not only an auto-rigging software but also provides a character library.</p>
<p>Independently of the tool used, the main problems usually encountered during the animation of avatars are caused by various interdependent design steps in the 3D character creation process. For example, poor deformation can come from bad placement of the bones/joints, badly structured mesh polygons, incorrect weighting of mesh vertex to their bones or non-fitting animations. Even for an avatar model that looks extremely realistic in a static pose, bad deformation/rigging would lead to a significant loss of plausibility and immersion in the virtual scenario as soon as the avatar moves its body. In fact, if not done carefully rigged avatars can exhibit pinched joints or unnatural body poses when animated. This depends on the level of sophistication of the overall process, weight definition, deformation specs, and kinematic solver. The solver software can also create such effects.</p>
<p>Overall, creating a high quality rig is not trivial. There is a trend toward easier tools and automatic systems for rigging meshes and avatars (Baran and Popovi&#x00107;, <xref ref-type="bibr" rid="B26">2007</xref>; Feng et al., <xref ref-type="bibr" rid="B56">2015</xref>). They will be easier and more accessible as the technology evolves but having access to professional libraries of avatars, such as Microsoft Rocketbox, can simplify production and allow researchers to focus on animating and controlling avatars at a high level rather than at the level of mesh movement.</p>
</sec>
<sec>
<title>2.3.2. Blendshapes</title>
<p>While bones are intuitive control systems for bodies, for the face, bones are not created analogously to real bones, but rather to pull and push on different mesh parts of the face. While bones can be used for face animation on their own, a common alternative to bone-based facial animation is to use blendshape animation. (Vinayagamoorthy et al., <xref ref-type="bibr" rid="B197">2006</xref>; Lewis et al., <xref ref-type="bibr" rid="B100">2014</xref>).</p>
<p>Blendshapes are variants of an original mesh with each variant representing a different non-rigid deformation or, in this context, a different isolated facial expression. The meshes have the same number of vertices and the same topology. Facial animation is created as a linear combination of blendshapes. At key instances in time, blendshapes are combined as a weighted sum into a keypose mesh. Different types of algebraic methods are used to interpolate between keyposes for all frames of the animation (Lewis et al., <xref ref-type="bibr" rid="B100">2014</xref>). For example, one can select 10% of a smile, and 100% of left eye blink and the system would combine both as in <xref ref-type="fig" rid="F6">Figure 6</xref>. Blendshapes can then be considered as the units of facial expressions and despite the fact that they are seldomly followed there are standards proposed such as the Facial Action Coding System (FACS) proposed by Ekman and Friesen (<xref ref-type="bibr" rid="B52">1976</xref>). The number of blendshapes in facial rigs varies. For example, some facial rigs use 19 blendshapes plus neutral (Cao et al., <xref ref-type="bibr" rid="B40">2013</xref>). However, if facial expressions have more complex semantics the sliders can reach much higher numbers, and 45 or more poses are often used (Orvalho et al., <xref ref-type="bibr" rid="B138">2012</xref>). For reference, high-end cinematic quality facial rigs often require hundreds of blendshapes to fully represent subtle deformations (the Gollum character in the Lord of the Rigns movies had over 600 blendshapes for the face).</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>One of the Microsoft Rocketbox avatars&#x02014;Blendshapes by Pan et al. (<xref ref-type="bibr" rid="B146">2014</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0006.tif"/>
</fig>
<p>Blendshapes are mostly used for faces because (i) they capture well non-rigid deformations common for the face as well as small details that might not be represented by a skeleton model, and (ii) the physical range of motion in the face is limited, and in many instances can be triggered by emotions or particular facial expressions that can be later merged with weights (Joshi et al., <xref ref-type="bibr" rid="B83">2006</xref>).</p>
<p>Blendshapes can be designed manually by artists but also captured using camera tracking systems (RGB-D) from an actor and then blended through parameters at the time of rendering (Casas et al., <xref ref-type="bibr" rid="B42">2016</xref>). In production settings, hybrid blendshape and bones based on facial rigs are sometimes used.</p>
</sec>
</sec>
</sec>
<sec id="s3">
<title>3. Motion Capturing and Animation</title>
<p>At run-time, rigged avatars can be driven using pre-recorded animations or procedural programs to create movements during the VR/AR experience (Roth et al., <xref ref-type="bibr" rid="B165">2019</xref>). A self-avatar can be animated to follow real-time motion tracking of the participants. The fidelity of this type of animation depends on the number of joints being captured and the techniques used to do extrapolations and heuristics between those joints (Spanlang et al., <xref ref-type="bibr" rid="B189">2014</xref>).</p>
<p>VR poses a unique challenge for animating avatars that are user driven as this needs to happen at low latency so there is little opportunity for sophisticated animation control. It is particularly tricky if the avatar is used as a replacement for the user&#x00027;s unseen body. In such cases, to maximize the feeling of body ownership of the user, VR should use a low latency motion capture of the user&#x00027;s body and animate the avatar in the body&#x00027;s place. Due to limitations of most commercial VR systems, the capture of the body is typically restricted to measuring the 6 degrees of freedom of two controllers and the HMD. Other techniques, such as inverse kinematics, can be used to infer movements of the body that are not actually tracked. From these, the locations of two hands and head can be determined and other parts of the body can be inferred.</p>
<p>In recent years some systems have offered additional sensors such as finger tracking around the hand-held controllers, eye tracking in the HMD, and optional stand-alone trackers. Research is examining the use of additional sensors and tracking systems such as external (Spanlang et al., <xref ref-type="bibr" rid="B189">2014</xref>; Cao et al., <xref ref-type="bibr" rid="B41">2017</xref>; Mehta et al., <xref ref-type="bibr" rid="B127">2017</xref>) or wearable cameras (Ahuja et al., <xref ref-type="bibr" rid="B13">2019</xref>) for sampling more information about the user&#x00027;s pose and perhaps in the future we may see full-body sensing.</p>
<sec>
<title>3.1. Motion Capture</title>
<sec>
<title>3.1.1. Full Body Motion Capture</title>
<p>Motion capture is the processes of sensing of a person&#x00027;s pose and movement. Pose is usually represented by a defining skeletal structure, and then for each joint giving its 3 or 6 degrees of freedom transformation (rotations and sometimes position) from its parent in the skeletal structure. A common technique for motion capture has the actor wearing easily recognized markers such as retro-reflective markers or active markers that are observed by multiple high-speed cameras (Ma et al., <xref ref-type="bibr" rid="B112">2006</xref>). Optitrack (Opt, <xref ref-type="bibr" rid="B10">2020</xref>) and Vicon (Vic, <xref ref-type="bibr" rid="B11">2020</xref>) are two commercial motion capture systems that are commonly used in animation production and human- computer interaction research (<xref ref-type="fig" rid="F7">Figure 7</xref>).</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Body motion capture via OptiTrack motion capture system (Opt, <xref ref-type="bibr" rid="B10">2020</xref>) and the corresponding mapping to one of the Microsoft Rocketbox avatars in a singing experiment by Steed et al. (<xref ref-type="bibr" rid="B190">2016a</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0007.tif"/>
</fig>
<p>Another set of techniques uses computer vision to track a person&#x00027;s movement without markers, such as using the Microsoft Kinect sensor (Wei et al., <xref ref-type="bibr" rid="B201">2012</xref>), and more recently RGB cameras (Shiratori et al., <xref ref-type="bibr" rid="B177">2011</xref>; Ahuja et al., <xref ref-type="bibr" rid="B13">2019</xref>), RF reflection (Zhao et al., <xref ref-type="bibr" rid="B209">2018a</xref>,<xref ref-type="bibr" rid="B210">b</xref>), and capacitive sensing (Zheng et al., <xref ref-type="bibr" rid="B211">2018</xref>). A final set of techniques uses Worn sensors to recover the user&#x00027;s motion with no external sensors. Examples include inertial measurement units (IMUs) (Ha et al., <xref ref-type="bibr" rid="B76">2011</xref>), wearable cameras (Shiratori et al., <xref ref-type="bibr" rid="B177">2011</xref>) or mechanical suits such as METAmotion&#x00027;s Gypsy (Met, <xref ref-type="bibr" rid="B7">2020</xref>).</p>
</sec>
<sec>
<title>3.1.2. Facial Motion Capture</title>
<p>For facial motion capture we have similar options to the ones proposed in the full body setup: either marker-less with a camera and computer vision tracking algorithms, such as Faceshift (Bouaziz et al., <xref ref-type="bibr" rid="B38">2013</xref>) (<xref ref-type="fig" rid="F8">Figure 8</xref>), or using marker-based systems such as Optitrack.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>One of the Microsoft Rocketbox avatars animated with markerless facial motion capture via Faceshift by Steed et al. (<xref ref-type="bibr" rid="B190">2016a</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0008.tif"/>
</fig>
<p>However, the problem with facial animation is that on instances, when needed in real-time, users might be wearing HMDs that occlude their real face, and therefore capturing their expressions is very hard (Lou et al., <xref ref-type="bibr" rid="B110">2019</xref>). Some researchers have shown that it is possible to add sensors to the HMDs to record facial expressions (Li et al., <xref ref-type="bibr" rid="B101">2015</xref>). The BinaryVR face tracking device attaches to your HMD and tracks facial expressions (Bin, <xref ref-type="bibr" rid="B3">2020</xref>).</p>
</sec>
</sec>
<sec>
<title>3.2. Animation</title>
<p>For animating an avatar, translations and rotations of each of the bones are changed to match the desired pose at an instant in time. If the motions have been prerecorded, then this constitutes a sort of playback. However, data-driven approaches can be much more sophisticated than simple playback.</p>
<p>Independently of whether the animation is performed in real-time or using pre-recorded animations, in both cases at some point there has been a motion capture that either sensed the full body and created prerecorded animations, or a subset of body parts to later do motion retargeting. If is done offline the animations are then saved and accessed during runtime. Aternatively, if the motion capture is done in real-time then this can be used directly to animate the avatars that match the users&#x00027; motions.</p>
<sec>
<title>3.2.1. Motion Tracking</title>
<p>When each joint of the participant can be recorded in real-time through a full body motion capturing system (Spanlang et al., <xref ref-type="bibr" rid="B189">2014</xref>), they can be transferred directly to the avatar (Spanlang et al., <xref ref-type="bibr" rid="B188">2013</xref>). For best performance this assumes that the avatar is resized to the size of the participant. In that situation participants achieve the maximum level of agency over the self-avatar and rapidly embody it (Slater et al., <xref ref-type="bibr" rid="B186">2010a</xref>). However, this requires wearing a motion capturing suit or having outside sensors monitoring the motions of the users, which can be expensive and/or hard to setup (Spanlang et al., <xref ref-type="bibr" rid="B189">2014</xref>). Since direct mapping between the tracking and avatar skeletons can sometimes be non trivial, intermediate solvers are typically put in place in most approaches today Spanlang et al. (<xref ref-type="bibr" rid="B189">2014</xref>).</p>
<p>Most commercial VR systems capture only the rotation and position of the HMD and two hand controllers. In some cases the hands are well-tracked with finger capacitive sensing around the controller. Some new systems are introducing more sensors such as eye tracking. However, in most systems, most of the body degrees of freedom are not monitored. Even if only using partial tracking (Badler et al., <xref ref-type="bibr" rid="B19">1993</xref>) it is possible to infer a good approximation of the full body, but the end-effectors need to be well-tracked. Nevertheless, many VR applications limit the rendering of the user&#x00027;s body to hands only. Although such representations may suffice for many tasks, this lowers the realism of the virtual scenario and may reduce the level of embodiment of the user (De Vignemont, <xref ref-type="bibr" rid="B46">2011</xref>) and even have further impact even on their cognitive load (Steed et al., <xref ref-type="bibr" rid="B191">2016b</xref>).</p>
</sec>
<sec>
<title>3.2.2. Inverse Kinematics</title>
<p>A common technique to generate a complete avatar motion given the limited sensing of the body motion, is Inverse Kinematics (IK). The position and orientations of avatar joints that are not monitored are estimated given the known degrees of freedom. IK originated from the field of robotics and exploits the inherent movement limitations of each of the skeleton&#x00027;s joints to retrieve a plausible pose (Roth et al., <xref ref-type="bibr" rid="B164">2016</xref>; Aristidou et al., <xref ref-type="bibr" rid="B16">2018</xref>; Parger et al., <xref ref-type="bibr" rid="B148">2018</xref>). The pose fits the position and direction of the sensed data and lies within the body&#x00027;s possible poses. IK addresses the challenge that there are many possible combinations and sometimes it is not a perfect one-to-one method. Additionally it can do a reasonable job in reconstructing motions in joints that are within ground truth nodes, but does a worse job in extrapolating to other joints, for example to know the leg motions from the hand trackers is harder than it is to know the elbow motions.</p>
</sec>
<sec>
<title>3.2.3. Prerecorded Animations</title>
<p>Most applications of avatars in games and movie productions use a mix of prerecorded motion captures of actors and professionals (Moeslund et al., <xref ref-type="bibr" rid="B128">2006</xref>) and artists&#x00027; manual animations, together with AI, algorithm ic and procedural animations. The main focus of these approaches is the generation of performances that are believable, expressive, and effective. Added animation aims at minimizing artifacts that can result due to a lack of small motions that may exist during a real person&#x00027;s motions, such as clothes bending, underlying facial movements (Li et al., <xref ref-type="bibr" rid="B101">2015</xref>) and others.</p>
<p>Prerecorded animations alone can be a bit limiting in the interactivity delivered by the VR, especially if they are driving a first-person embodied avatar where the user wants to retain agency over the body. For that reason, prerecorded animations are generally not used for self-avatars; however, recent research has shown that they can be used to trigger enfacement illusions and realistic underlying facial movements (Gonzalez-Franco et al., <xref ref-type="bibr" rid="B72">2020b</xref>).</p>
<p>Indeed, the recombination of existing animations can become quite complex and procedural or deep learning systems can be in charge of generating the correct animations for each scene.</p>
</sec>
<sec>
<title>3.2.4. Interactive Procedural, Deep Learning and Data-Driven Animation</title>
<p>A combination of procedural animation with data-driven animation is a common strategy for real-time systems. However, this approach also allows compensation for incomplete sensing data for real-time animation, especially self-avatars.</p>
<p>In some scenarios the procedural animation systems can use the context of the application itself to animate the avatars. For example, in a driving application where the legs can be rendered sitting in the driver seat performing actions consistent with the driving inputs even if the user is not actually in a correct driving position.</p>
<p>In other scenarios the approach is more interactive and depends on the user input. It can incorporate tracking information from the user motion capture and start to complete parts of the self-avatar or their motions if they are not complete. This could be an improvement over traditional IK, in which a data-driven animation can incorporate the popularity of the sampled data and retrieve as a result the most likely pose within the whole range of possible motions of the IK (Lee et al., <xref ref-type="bibr" rid="B96">2010</xref>).</p>
<p>Using data-driven machine learning algorithms the system can pick the most likely pose out of the known data given the limited capture information. In many cases this might result in a motion retargeting or blending scenario.</p>
<p>More recently, we have seen rigged avatars that can be trained through deep learning to perform certain motion actions, such as dribbling (Liu and Hodgins, <xref ref-type="bibr" rid="B102">2018</xref>), adaptive walking and running (Holden et al., <xref ref-type="bibr" rid="B80">2017</xref>), or physics behaviors (Peng et al., <xref ref-type="bibr" rid="B153">2018</xref>). These are great tools for procedural driven avatars and can also solve some issues with direct manipulation.</p>
<p>However, with any such procedural animation, care needs to be taken that the animation doesn&#x00027;t stray too far from other plausible interpretations of the tracker input as discrepancies in representation of the user&#x00027;s body may reduce their sense of body ownership and agency (Padrao et al., <xref ref-type="bibr" rid="B141">2016</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B66">2020a</xref>).</p>
</sec>
</sec>
</sec>
<sec id="s4">
<title>4. Volumetric Avatars</title>
<p>There are other forms of avatar creation beyond the pipeline defined previously. For example using volumetric capture, we can capture meshes and their animations without the need for rigging (<xref ref-type="fig" rid="F9">Figure 9</xref>). Volumetric capturing can be considered an advanced form of scanning in which the external optical sensors (often numerous) allow a reconstruction of the whole point cloud and mesh of a real person and can record the movement of the person over time, essentially leading to a 3D video.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Two volumetric avatars captured in the Microsoft Mixed Reality studio Collet et al. (<xref ref-type="bibr" rid="B44">2015</xref>). <bold>(A)</bold> Input setup. <bold>(B)</bold> Avatar output. <bold>(C)</bold> Scanned point cloud. <bold>(D)</bold> Processed mesh.</p></caption>
<graphic xlink:href="frvir-01-561558-g0009.tif"/>
</fig>
<p>When we talk about volumetric avatars, we refer to different types of 3D representations, such as point clouds, meshes (Orts-Escolano et al., <xref ref-type="bibr" rid="B137">2016</xref>), voxel grids (Loop et al., <xref ref-type="bibr" rid="B108">2016</xref>), or light fields (Wang et al., <xref ref-type="bibr" rid="B200">2017</xref>; Overbeck et al., <xref ref-type="bibr" rid="B140">2018</xref>). However, the 3D triangle mesh is still one of the most common 3D representations that is used in current volumetric capture systems, mostly because GPUs are traditionally designed to render triangulated surface meshes. Moreover, from a temporal point of view, we can also distinguish between capturing a single frame, and then performing rigging on the single frame, or capturing a sequence of these 3D representations, which is usually known as volumetric video. Finally, volumetric avatars can also be categorized as those that are captured offline (Collet et al., <xref ref-type="bibr" rid="B44">2015</xref>; Guo et al., <xref ref-type="bibr" rid="B75">2019</xref>) and played back as volumetric streams of data or those that are being captured and streamed in real-time (Orts-Escolano et al., <xref ref-type="bibr" rid="B137">2016</xref>).</p>
<p>Either real-time or offline, volumetric video capture is a complex process that involves many stages, from low-level hardware setups and camera calibration, to sophisticated machine learning and computer vision processing pipelines. A traditional volumetric capture pipeline includes the following steps: image acquisition, image preprocessing, depth estimation, 3D reconstruction, texture UV atlas parameterization, and data compression. Some additional steps are usually performed to remove flickering effects in the final volumetric video, as mesh tracking (Guo et al., <xref ref-type="bibr" rid="B75">2019</xref>) or non-rigid fusion (Dou et al., <xref ref-type="bibr" rid="B49">2016</xref>, <xref ref-type="bibr" rid="B48">2017</xref>) in the case of offline approaches and real-time ones, respectively. More detailed information about state-of-the-art volumetric performance capture systems can be found in Guo et al. (<xref ref-type="bibr" rid="B75">2019</xref>), Collet et al. (<xref ref-type="bibr" rid="B44">2015</xref>), and Orts-Escolano et al. (<xref ref-type="bibr" rid="B137">2016</xref>).</p>
<p>The latest frontier in volumetric avatars has been for the computation to be performed in real-time, and this was achieved in the Holoportation system (Orts-Escolano et al., <xref ref-type="bibr" rid="B137">2016</xref>). When these real-time systems are combined with mixed reality displays such as HoloLens, this technology also allows users to see, hear, and interact with remote participants in 3D as if they are actually present in the same physical space.</p>
<p>Both pre-recorded and real-time volumetric avatars offer a different level of realism that cannot be currently achieved by using rigging techniques. For example, volumetric avatars automatically capture object interaction, extremely realistic cloth motion capture and accurate facial expressions.</p>
<p>Real-time volumetric avatars can enable a more interactive communication with remote users, allowing also the user to interact and perform eye contact with the other participants. However, these systems have to deal with a huge amount of computing, sacrificing the quality of the reconstructed avatar for a more interactive experience, which can also affect the perception of eye gaze or other facial clues (MacQuarrie and Steed, <xref ref-type="bibr" rid="B113">2019</xref>). A good rigged avatar with animated facial animation can do an equal or better job for communication (Garau et al., <xref ref-type="bibr" rid="B62">2003</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B72">2020b</xref>). Moreover, it is important to note that if the user is wearing a HMD while being captured, either a VR or an AR device, it will require additional processing to remove the headset in real-time (Frueh et al., <xref ref-type="bibr" rid="B60">2017</xref>). For this purpose, eye-tracking cameras are often mounted within the headset, which allows in-painting of occluded areas of the face in a realistic way in the final rendered 3D model. Another common problem that real-time capture systems need to solve is latency. Volumetric captures tend to involve heavy computational processing and result in larger data sets. Thus, they require high bandwidth and this increases the latency between the participants in the experience.</p>
<p>Most recent techniques or generating real-time volumetric avatars are focusing on learning-based approaches. By leveraging neural rendering techniques these novel methods have achieved unprecedented results, enabling the modeling of view-dependent effects such as specularity and also correcting for imperfect geometry (Lombardi et al., <xref ref-type="bibr" rid="B105">2018</xref>, <xref ref-type="bibr" rid="B106">2019</xref>; Pandey et al., <xref ref-type="bibr" rid="B147">2019</xref>). Compared to traditional graphics pipelines, these new methods require less computation and also can deal with a coarser geometry, which is usually used as a proxy for rendering the final 3D model. Most of these approaches are able to achieve compelling results, with substantially less infrastructure than previously required.</p>
</sec>
<sec id="s5">
<title>5. Microsoft Rocketbox Avatars</title>
<p>The Microsoft Rocketbox avatar library creation process deployed a lot of research and prototyping work into &#x0201C;base models&#x0201D; for male and female character meshes (<xref ref-type="fig" rid="F10">Figure 10</xref>). These base models were already rigged and tested with various (preferably extreme) animations in an early stage of development. The base models were used as a starting point for creation of all character types later on so as to guarantee a high-quality standard and consistent specifications throughout the whole library. Optimization opportunities identified during the production phase of the library also flowed back into these base models. In order to be able to mix different heads with different bodies of the avatars more easily, one specific polygon edge around the neck area of the characters is identical in all avatars of the same gender.</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p>Several of the 115 Microsoft Rocketbox rigged avatars released in the library that show a diversity in race, gender, and age, as well as attire and occupation.</p></caption>
<graphic xlink:href="frvir-01-561558-g0010.tif"/>
</fig>
<p>UV Mapping was also predefined in the base meshes already. UV Mapping is the process of &#x0201C;unfolding&#x0201D; or &#x0201C;unwrapping&#x0201D; the mesh to a 2D coordinate system that maps onto the texture. To allow for mixing and matching texture elements of different characters from the library, many parts of the character UV coordinates were standardized; for example, the hands or the face. Therefore, it is possible to exchange face textures across different characters of the library with some knowledge of image editing and retouching. The UV Mapping of polygons that move or stretch significantly when animation is applied achieves a higher texture resolution to avoid blur effects (<xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<p>Another important set of source data for the creation of the Microsoft Rocketbox library were photographs of real people that were taken in a special setup with photo studio softbox lights. A whitebox and a turntable to guarantee neutral lighting of the photo material that was used as a reference for modeling the meshes and as source material for creating the textures. Photos of people from many different ethnic groups, clothing styles, age classes, and so on, were taken in order to have high diversity across the library. However, different portions from the source material were mixed and strongly modified for the creation of the final avatars so that the avatars represent generic humans that do not exist in reality.</p>
<p>The models are available in multiple Levels of Detail (LODs) which can be optionally be used for performance optimization for scenes with larger numbers of characters, or for mobile platforms. The LODs in the models include &#x0201C;hipoly&#x0201D; (10.000 triangles), &#x0201C;midpoly&#x0201D; (5.000 triangles), &#x0201C;lowpoly&#x0201D; (2.500 triangles), and &#x0201C;ultralowpoly&#x0201D; (500 triangles) levels, (<xref ref-type="fig" rid="F3">Figure 3</xref>). Textures are usually included with a resolution of 2048x2048 pixels, with one separate texture for the head and another one for the body. This means that the head texture has a higher detail level (pixel resolution per inch) than the body texture. A set of textures are then mapped to the avatar mesh to produce the best possible appearance. Textures need to be mapped per each vertex and can contain information about the diffuse, specular, or normal colors of each vertex (<xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<p>All in all, the Rocketbox avatars provide a reliable rigged skeletal system with 56 body bones, that despite not being procedurally generated can be used for procedural animation thanks to the rigged system.</p>
<sec>
<title>5.1. Facial Animation</title>
<p>Facial animations for the Microsoft Rocketbox library animation sets (<xref ref-type="fig" rid="F11">Figure 11</xref>) were created though generating different facial expressions manually by setting up and saving different face bone constellations (<xref ref-type="fig" rid="F5">Figure 5</xref>). Additionally visemes (face bone positions) for all important phonemes (speech-sounds) were created. These facial expressions were saved as poses and used for keyframe animation of the face adapted to the body movement of the respective body animation. The usage of animation sets across all characters of the library without retargeting the animations required some general conditions to be accounted for during creation of the Rocketbox HD library. The bones in the face only transform in x,y,z and do not rotate. Therefore, facial animation also looks correct on character variants with face elements such as eyebrows or lips positioned deviant from the base mesh. The only exception are the bones for the eyeballs which have rotation features only&#x02014;this requires the eyeballs of all avatars that belong to the same class (e.g., adult female) to be at the same position in 3D space.</p>
<fig id="F11" position="float">
<label>Figure 11</label>
<caption><p><bold>(Top)</bold> 3 of the 23 facial expressions included in the Microsoft Rocketbox avatars together with <bold>(Bottom)</bold> 5 of the 15th phonemes already setup in each character for easier implementation with lipsync plugins and mace animation software. Underlying there are 28 facial bones that allow researchers to create their own expressions.</p></caption>
<graphic xlink:href="frvir-01-561558-g0011.tif"/>
</fig>
</sec>
<sec>
<title>5.2. Limitations</title>
<p>When compared to the other available avatar libraries and tools, the Microsoft Rocketbox present some limitations (<xref ref-type="table" rid="T1">Table 1</xref>).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Avatar Libraries and tools comparisons.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th valign="top" align="left"><bold>Daz-3D</bold></th>
<th valign="top" align="left"><bold>Mixamo</bold></th>
<th valign="top" align="left"><bold>Autodesk character generator</bold></th>
<th valign="top" align="left"><bold>Microsoft Rocketbox</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Avatar</td>
<td valign="top" align="left">Fully textured and rigged (Improved shoulder, collar, and abdomen bends).</td>
<td valign="top" align="left">Fully textured and rigged. Automatic rigging of human skeleton to character created in AFCC.</td>
<td valign="top" align="left">Fully textured and rigged.</td>
<td valign="top" align="left">Fully textured and rigged.</td>
</tr>
<tr>
<td valign="top" align="left">Range of characters</td>
<td valign="top" align="left">A very wide ecosystem of models, cloths, accessories. (Every day, Sci-Fi, Animals, etc.).</td>
<td valign="top" align="left">A wide range of themes, but a small dataset of characters and motions.</td>
<td valign="top" align="left">Everyday models (Specific focus on this theme), a few motions.</td>
<td valign="top" align="left">Realistic humans of diverse raze, age, gender, and occupation.</td>
</tr>
<tr>
<td valign="top" align="left">Face</td>
<td valign="top" align="left">Blend shapes (no bones).</td>
<td valign="top" align="left">Blend shapes (no bones).</td>
<td valign="top" align="left">Facial expressions blends including phonemes.</td>
<td valign="top" align="left">Bones.</td>
</tr>
<tr>
<td valign="top" align="left">Uniqueness and Limitations</td>
<td valign="top" align="left">Muscle flexion. Blend between different models. Share clothing between genders via morph projection. Body proportions can be modified including child stature. No body mesh under cloths area to prevent artifacts. IRAY shader&#x02013;sub-surface features. Hi-res UV map.</td>
<td valign="top" align="left">Avatar is made of different parts that can be mixed and matched. No body mesh under cloths area to prevent artifacts.</td>
<td valign="top" align="left">Web based service. Limitation: One mesh only&#x02013;hard to modify or change shaders. Clothing&#x02013;is part of the mesh.</td>
<td valign="top" align="left">All avatars created with similar structure so relatively easy to interchange body parts and or outfits. Multiple HOD poly levels. Three submeshes: body, head, and hair. Limitation: Clothing&#x02013;is part of the mesh.</td>
</tr>
<tr>
<td valign="top" align="left">Paid/Free</td>
<td valign="top" align="left">Mixed.</td>
<td valign="top" align="left">Data is free.</td>
<td valign="top" align="left">Some free data but low poly and texture res, and body rig only, no face animation. Paid&#x02013;blend shapes no bones, facial expressions.</td>
<td valign="top" align="left">Free for research and academic use.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The Rocketbox avatars do not use blendshapes for facial animation but bones, meaning that, it is up to the users to create their own blendshapes on top, as shown in <xref ref-type="fig" rid="F11">Figure 11</xref>. However, creating blendshapes is a process that needs skill and cannot be replicated easily to other avatars but has to be performed manually for each avatar (or scripted).</p>
<p>The library also shows limitations for extreme scaling, that can result in errors regarding the predefined skeleton skin weights and additionally result in unrealistic proportions (see <xref ref-type="fig" rid="F12">Figure 12</xref> where the texture stretches unrealistically). Other limitations are the exchangeability of clothes, hair styles, face looks, gender, and so on. Nevertheless, given that the avatars use a base model this simplifies the access to and editing of avatar shapes and clothes. For cases where the avatars are required to look like the participants, or in which there is a need to adapt the body sizes of participants, there might be a way to either change the texture of the facial map, through a transformation, or modifying the mesh, or directly stitching a different mesh to the face of a given library avatar. Initiatives like the virtual caliper (Pujades et al., <xref ref-type="bibr" rid="B160">2019</xref>) could help in adapting the current avatars; however, these possibilities are not straightforward with the release. Future work on the library would also need to bring easier tools for animation for general use, as well as tools to facilitate the exchangeability of clothes based on the generic model, as well as easier blendshape creation.</p>
<fig id="F12" position="float">
<label>Figure 12</label>
<caption><p>A Microsoft Rocketbox avatar with modified mesh being used in a body weight study at Max Plank by Piryankova et al. (<xref ref-type="bibr" rid="B158">2014</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0012.tif"/>
</fig>
</sec>
</sec>
<sec id="s6">
<title>6. Avatar Science Research and Other Applications</title>
<p>The first experiences with avatars and social VR were implemented in the 1980s in the context of the technology startup (VP Research). While many of the results were reported in the popular press (Lanier, <xref ref-type="bibr" rid="B94">2001</xref>) that early work was also the germ of what later became a deep engagement from the academic community into exploring avatars and body representation illusions as well as their impact on human behavior (Lanier et al., <xref ref-type="bibr" rid="B95">1988</xref>; Lanier, <xref ref-type="bibr" rid="B93">1990</xref>). In that regard VPL Research not only did the initial explorations on avatar interactions but also provided crucial VR instrumentation for many laboratories and pioneered a whole industry in the headsets, haptics and computer graphics arenas. Many of the possibilities of avatars and VR for social and somatic interactions were explored (Blanchard et al., <xref ref-type="bibr" rid="B33">1990</xref>) and were later formalized empirically by the scientific community as presented in this section.</p>
<p>Researchers in diverse fields have since explored the impact of avatars on human responses. The underlying aim of the research in many of the avatar scientific studies is to better understand the behavioral, neural, and multisensory mechanisms involved in shaping the facets of humanness, and to further explore potential applications for therapeutic treatments, training, education, and entertainment. A significant number of these studies has used the Microsoft Rocketbox avatars (with over 145 papers and 5307 citations, see the <xref ref-type="supplementary-material" rid="SM1">Supplementary Materials</xref> for the full list). In this paper, at least 49 citations (representing 25% of the citations) used the Rocketbox avatars, and all are referenced in the following section about applications, where they represent almost 50% of all the citations in this section. Considering that the Rocketbox avatars started being used around 2009 (see our list of papers using the avatars in <xref ref-type="supplementary-material" rid="SM1">Supplementary Materials</xref>) and that 23 of the cited papers in this section were published before 2009, we can consider that 60% of the research referenced in this section has been carried out with the Rocketbox avatars. The remaining citations are included to provide further context for the importance of the research in this area, as well as of the potential applications of the avatars that range from simulations to entertainment to embodiment.</p>
<p>Indeed, a substantial focus of interest has been on how using and embodying self-avatars can change human perception, cognition and behavior. In fact, the possibility of making a person, even if only temporarily, &#x0201C;live in the skin&#x0201D; of a virtual avatar has tremendously widened the limit of experimental research with people, allowing scientific questions to be addressed that would not have been possible otherwise.</p>
<sec>
<title>6.1. Bodily Illusions Over Virtual Avatars</title>
<p>In the past two decades there has been an explosion of interest in the field of cognitive neuroscience on how the brain represents the body, with a large part of the related research making use of body illusions. A one-page paper in Nature (Botvinick and Cohen, <xref ref-type="bibr" rid="B37">1998</xref>), showed that a rubber hand can be incorporated into the body representation of a person simply by application of multisensory stimulation. This rubber-hand illusion occurs when a rubber hand in an anatomically plausible position is seen to be tactilely stimulated synchronously with the corresponding real out-of-sight hand. For most people, after about 1 minute of this stimulation and visuo-tactile correlation, proprioception shifts to the rubber hand, and if the rubber hand is threatened there is a strong physiological response (Armel and Ramachandran, <xref ref-type="bibr" rid="B17">2003</xref>) and corresponding brain activation (Ehrsson et al., <xref ref-type="bibr" rid="B51">2007</xref>). This research provided convincing evidence of body ownership illusions. The setup has since been reproduced with a virtual arm in virtual reality (Slater et al., <xref ref-type="bibr" rid="B183">2009</xref>), also with appropriate brain activation to a threat (Gonz&#x000E1;lez-Franco et al., <xref ref-type="bibr" rid="B69">2014</xref>).</p>
<p>Petkova and Ehrsson (<xref ref-type="bibr" rid="B155">2008</xref>) showed how the techniques used for the rubber-hand illusion could be applied to the whole body thus producing a &#x0201C;body ownership illusion&#x0201D; of a mannequin&#x00027;s body and this also has been reproduced in VR (Slater et al., <xref ref-type="bibr" rid="B187">2010b</xref>; Yuan and Steed, <xref ref-type="bibr" rid="B208">2010</xref>). First-person perspective over the virtual body that visually substitutes the real body in VR is a prerequisite for this type of body ownership illusion (Petkova et al., <xref ref-type="bibr" rid="B156">2011</xref>; Maselli and Slater, <xref ref-type="bibr" rid="B123">2014</xref>). The illusion induced by visuomotor synchrony, where the body moves synchronously with the movements of the person, is typically more vivid than those triggered by visuotactile synchrony, where the virtual body is passively touched synchronously with real touch (Gonzalez-Franco et al., <xref ref-type="bibr" rid="B70">2010</xref>; Sanchez-Vives et al., <xref ref-type="bibr" rid="B172">2010</xref>; Kokkinara and Slater, <xref ref-type="bibr" rid="B91">2014</xref>). Illusory body ownership over a virtual avatar, however, can be also experienced in static conditions, when the virtual body is seen in spatial alignment with the real body, a condition that could be easily met using immersive stereoscopic VR displays (Maselli and Slater, <xref ref-type="bibr" rid="B122">2013</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B66">2020a</xref>). The visuomotor route to body ownership is a powerful one, integrating visual and proprioceptive sensory inputs in correlation with motor outputs. This route is uniquely suited to self-avatars in VR, since it requires the technical capabilities of the forms of body tracking and rendering discussed in the animation section. Additional correlated modalities (passive or active touch, sound) can enhance the phenomenon.</p>
<p>Several studies have focused on casting the illusions of body ownership into a coherent theoretical framework. Most accounts have been discussed in the context of the rubber-hand illusion and point to multisensory integration as the main key underlying process (Graziano and Botvinick, <xref ref-type="bibr" rid="B73">2002</xref>; Makin et al., <xref ref-type="bibr" rid="B116">2008</xref>; Ehrsson, <xref ref-type="bibr" rid="B50">2012</xref>). More recently, computational models developed in the framework of causal inference (K&#x000F6;rding et al., <xref ref-type="bibr" rid="B92">2007</xref>; Shams and Beierholm, <xref ref-type="bibr" rid="B176">2010</xref>) described the onset of illusory ownership as the result of an &#x0201C;inference process&#x0201D; in which the brain associates all the available sensory information about the body (visual cues from the virtual, together with somatosensory or proprioceptive cues from the physical body) to a single origin: the own body (Kilteni et al., <xref ref-type="bibr" rid="B87">2015</xref>; Samad et al., <xref ref-type="bibr" rid="B170">2015</xref>). According to these accounts, all visual inputs associated with the virtual body (e.g., its aspect, the control over it, the interactions that it has with the surrounding environment) are processed as if emanating from the own body. As such, these could have a profound implications on perception and behavior.</p>
<p>What is even more interesting than the illusion of ownership over a virtual body is indeed the consequences it can have for changed physiology, behaviors, attitudes, and cognition.</p>
<sec>
<title>6.1.1. Self-Avatar Impact on Behavior</title>
<p>Self-perception theory argues that people often infer their own attitudes and beliefs from observing themselves&#x02014;for example their facial expressions or styles of dress&#x02014;as if from a third party (Bem, <xref ref-type="bibr" rid="B30">1972</xref>). Could avatars have a similar effect? Early work focused on what Yee and Bailenson called &#x0201C;The Proteus Effect,&#x0201D; the notion that one&#x00027;s avatar would influence the behavior of the person embodied in it. The first study examined the consequences of embodying older avatars, where college students were embodied in either age-appropriate avatars or elderly ones (Yee and Bailenson, <xref ref-type="bibr" rid="B205">2007</xref>). The results showed that negative stereotyping toward the elderly was reduced when participants were placed into older avatars compared with those placed into young avatars. Subsequent work extended this finding to other domains. For example, people embodied in taller avatars negotiated more aggressively than those embodied in shorter ones, and attractive avatars caused more self-disclosure and closer interpersonal distance to a confederate than unattractive ones during social interaction (Yee and Bailenson, <xref ref-type="bibr" rid="B205">2007</xref>). The embodiment effects of both height and attractiveness in VR extended to subsequent interactions outside of the laboratory, predicting players&#x00027; performance in online games, but also face-to-face interactions (Yee et al., <xref ref-type="bibr" rid="B207">2009</xref>). Another study from the same laboratory has also shown how embodying avatars of different races can modulate implicit racial prejudice (Groom et al., <xref ref-type="bibr" rid="B74">2009</xref>). In this case, Caucasian participants embodying &#x0201C;Black&#x0201D; avatars demonstrated increased level of racial prejudice favoring Whites, with respect to participants embodying &#x0201C;White&#x0201D; avatars, in the context of a job interview. These effects of embodying an avatar only occurred when the avatar&#x00027;s movements were controlled by the user&#x02014;simply observing an avatar was not sufficient to cause changes in social behavior (Yee and Bailenson, <xref ref-type="bibr" rid="B206">2009</xref>).</p>
<p>While these pioneering studies give a solid foundation to understanding the impact of self-avatars on social behavior, they relied on fairly simple technology at the time. The avatars were only tracked with head rotations and the body translated as a unit, while the latency and frame-rate were courser than today&#x00027;s standards. Also, these studies entailed a third-person view of self-avatars, either reflected in a mirror from a &#x0201C;disembodied&#x0201D; perspective (i.e., participants could see the avatar body rendered in a mirror as from a first-person perspective, but looking down they would see no virtual body), or from a third-person perspective as in an on-line community. More recent work has demonstrated that embodying life-size virtual avatars from a first-person perspective, and undergoing the illusion of having one&#x00027;s own physical body (concealed from view) substituted by the virtual body seen in its place and moving accordingly, could further leverage the impact of self-avatars on implicit attitudes, social behavior, and cognition (Maister et al., <xref ref-type="bibr" rid="B115">2015</xref>).</p>
<p>Several studies have shown that in neutral or positive social circumstances embodiment of White people in a Black virtual body results in a reduction in implicit racial bias, measured using the Implicit Association Test (Peck et al., <xref ref-type="bibr" rid="B149">2013</xref>). Similar results had been found with the rubber-hand illusion over a black rubber hand (Maister et al., <xref ref-type="bibr" rid="B114">2013</xref>), and these results and explanations for them were discussed in (Maister et al., <xref ref-type="bibr" rid="B115">2015</xref>). Banakou et al. (<xref ref-type="bibr" rid="B22">2016</xref>) explored the impact of the number of exposures and the duration of the effect. The results of Peck et al. (<xref ref-type="bibr" rid="B149">2013</xref>) were replicated, but in Banakou et al. the racial-bias measure was taken one week after the final exposure in VR, suggesting the durability of the effect. These results stand in contrast to Groom et al. (<xref ref-type="bibr" rid="B74">2009</xref>), which had found an increase in the racial-bias IAT, as discussed above. Recent evidence suggests, however, that when the surrounding social situation is a negative one (in the case of Groom et al., <xref ref-type="bibr" rid="B74">2009</xref> a job interview) then the effect reverses. These results have been simulated through a neural network model (Bedder et al., <xref ref-type="bibr" rid="B29">2019</xref>).</p>
<p>When adults are embodied in the body of a 5-year-old child with visuomotor synchrony they experience strong body ownership over the child body. As a result, they self-identify more with child-like attributes and see the surrounding world as larger (Banakou et al., <xref ref-type="bibr" rid="B21">2013</xref>; Tajadura-Jim&#x000E9;nez et al., <xref ref-type="bibr" rid="B193">2017</xref>). However, when there is body ownership over an adult body of the same size as the child, the perceptual effects are significantly lower, suggesting that it is the form of the body that matters, not only the height. This result was also replicated (Tajadura-Jim&#x000E9;nez et al., <xref ref-type="bibr" rid="B193">2017</xref>).</p>
<p>Virtual embodiment allows us not only to experience having a different body, but to live through situations from a different perspective. One of these situations is the experience of a violent situation. In (Seinfeld et al., <xref ref-type="bibr" rid="B174">2018</xref>) men who were domestic violence offenders experienced a virtual domestic violent confrontation from the perspective of the female victim. Such perspective has been found to modulate the brain network that encodes the bodily self (de Borst et al., <xref ref-type="bibr" rid="B45">2020</xref>). Violent offenders often have deficits in emotion recognition, in the case of male offenders, with a deficit in recognizing fear in the faces of women. This deficit was found to be reduced after embodiment in the female subject to domestic abuse by a virtual man (Seinfeld et al., <xref ref-type="bibr" rid="B174">2018</xref>). Similarly, mothers of young children tend to improve in empathy toward their children after spending some time embodied as a child in interaction with a virtual mother (Hamilton-Giachritsis et al., <xref ref-type="bibr" rid="B77">2018</xref>).</p>
<p>Embodying virtual avatars can further influence the engagement and performance on a given task or situation, depending on the perceived appropriateness of the embodied avatar for the task. For example, in a drumming task, participants showed more complex and articulated movement patterns when embodying a casually dressed avatar that when embodying a business man in a suit (Kilteni et al., <xref ref-type="bibr" rid="B85">2013</xref>). Effects at the cognitive level have also been found. For example, participants embodied as Albert Einstein tend to improve their performance on a cognitive test than when embodied in another &#x0201C;ordinary&#x0201D; virtual body (Banakou et al., <xref ref-type="bibr" rid="B23">2018</xref>). It has been shown that people embodied as Sigmund Freud tend to offer themselves better counseling than when embodied in a copy of their own body, or embodied as Freud with visuomotor asynchrony (Osimo et al., <xref ref-type="bibr" rid="B139">2015</xref>; Slater et al., <xref ref-type="bibr" rid="B182">2019</xref>). Moreover, being embodied as Lenin, the leader of the October 1917 Russian Revolution in a crowd scene leads to people being more likely to follow up on information about the Russian Revolution (Slater et al., <xref ref-type="bibr" rid="B181">2018</xref>). All these studies form a body of accumulated evidence of the power of embodying virtual avatars not only in modifying physiological responses and perception and the world and the others, but also modifying behavior and cognitive performance.</p>
</sec>
<sec>
<title>6.1.2. Self-Avatar Impact on Agency, Self-Perception and Pain</title>
<p>It has been largely demonstrated that embodying a virtual avatar affects the way bodily related stimuli are processed. Experimental research based on the rubber hand illusion paradigm (Botvinick and Cohen, <xref ref-type="bibr" rid="B37">1998</xref>) provided robust evidence for the impact of illusory body ownership on the perception of bodily related stimuli (Folegatti et al., <xref ref-type="bibr" rid="B58">2009</xref>; Mancini et al., <xref ref-type="bibr" rid="B117">2011</xref>; Zopf et al., <xref ref-type="bibr" rid="B212">2011</xref>). Following this tradition, the embodiment of virtual avatars was shown to affect different facets of perception, from tactile processing to pain perception and own body image.</p>
<p>When in an immersive VR scenario, embodying a life-size virtual avatar enhances the perception of touch delivered on a held object, with respect to having no body in VR (Gonzalez-Franco and Berger, <xref ref-type="bibr" rid="B65">2019</xref>). It was also shown that experiencing ownership toward a virtual avatar modulates the temporal constraints for associating two independent sensory cues, visual and tactile, to the same coherent visuo-tactile event (Maselli et al., <xref ref-type="bibr" rid="B121">2016</xref>; Gonzalez-Franco and Berger, <xref ref-type="bibr" rid="B65">2019</xref>). Virtual embodiment could therefore grant a larger flexibility to spatiotemporal offsets with respect to the constraints that apply in the physical world or when having an embodied self-avatar in VR.</p>
<p>Not only can embodiment modify the perception of timing of sensory inputs (Berger and Gonzalez-Franco, <xref ref-type="bibr" rid="B31">2018</xref>; Gonzalez-Franco and Berger, <xref ref-type="bibr" rid="B65">2019</xref>), but also the perception of other sensorial modalities such as temperature (Llobera et al., <xref ref-type="bibr" rid="B104">2013b</xref>) and pain (Llobera et al., <xref ref-type="bibr" rid="B103">2013a</xref>). Several studies have demonstrated that virtual embodiment can modulate pain (Lenggenhager et al., <xref ref-type="bibr" rid="B98">2010</xref>; Martini et al., <xref ref-type="bibr" rid="B120">2014</xref>). In particular, it has been shown that pain perception is modulated by the visual appearance of the virtual body, including its color (Martini et al., <xref ref-type="bibr" rid="B119">2013</xref>), shape (Matamala-Gomez et al., <xref ref-type="bibr" rid="B126">2020</xref>), or level of transparency (Martini et al., <xref ref-type="bibr" rid="B118">2015</xref>), as well as by the degree of spatial overlap between the real and the virtual bodies (Nierula et al., <xref ref-type="bibr" rid="B134">2017</xref>). This is a relevant area regarding chronic pain-therapeutical applications (Matamala-Gomez et al., <xref ref-type="bibr" rid="B125">2019b</xref>), although pain of different origins may require different manipulations of the embodied-avatars (Matamala-Gomez et al., <xref ref-type="bibr" rid="B124">2019a</xref>).</p>
<p>The embodiment of a virtual avatar further allows the temporal reshaping of peripersonal space, the space surrounding the body where external stimuli (e.g., visual or auditory) interact with the somatosensory system (Serino et al., <xref ref-type="bibr" rid="B175">2006</xref>). This can be done by manipulating the visual perspective over an embodied avatar, as for the case of illusory out-of-body experiences (Lenggenhager et al., <xref ref-type="bibr" rid="B99">2007</xref>; Blanke, <xref ref-type="bibr" rid="B34">2012</xref>; Maselli and Slater, <xref ref-type="bibr" rid="B123">2014</xref>), as well as by modulating the size and/or shape of the virtual body that interacts with this peripersonal space (Abtahi et al., <xref ref-type="bibr" rid="B12">2019</xref>), for example by having an elongated virtual arm (Kilteni et al., <xref ref-type="bibr" rid="B88">2012b</xref>; Feuchtner and M&#x000FC;ller, <xref ref-type="bibr" rid="B57">2017</xref>).</p>
<p>Such flexibility of the virtual body shape modification has been leveraged to study psychological phenomena such as body image in anorexia and other eating disorders (Piryankova et al., <xref ref-type="bibr" rid="B158">2014</xref>; M&#x000F6;lbert et al., <xref ref-type="bibr" rid="B130">2018</xref>) (<xref ref-type="fig" rid="F12">Figure 12</xref>).</p>
<p>Agency is also an important element of the perceptual experience associated with the embodiment of self-avatars. Suppose you are embodied in a virtual body visuomotor synchrony and after a while the body does something that you did not do (in this case talk). It was found that participants have agency over the speaking, and the way they themselves speak later is influenced by how their self-avatar spoke (Banakou and Slater, <xref ref-type="bibr" rid="B24">2014</xref>). The rigged self-avatar which has its movements driven by the movements of the participant was crucial to this result, since it was later found that if the body ownership is induced through tactile synchrony, then although the subjective illusory agency still occurs, it is not complemented by the behavioral after-effect (Banakou and Slater, <xref ref-type="bibr" rid="B25">2017</xref>). Agency can also be induced when the real body does not move but the virtual body moves, after embodiment has been induced. This is the case when the virtual body is moved by means of a brain-computer interface (Nierula et al., <xref ref-type="bibr" rid="B135">2019</xref>) or when seated participants can have the illusion of walking, based solely on the feedback from their embodiment in a walking virtual body (Kokkinara et al., <xref ref-type="bibr" rid="B90">2016</xref>).</p>
<p>Interesting is also the case of pain perception. Several studies have demonstrated that virtual embodiment can modulate it (Lenggenhager et al., <xref ref-type="bibr" rid="B98">2010</xref>; Martini et al., <xref ref-type="bibr" rid="B120">2014</xref>) with important implication for pain treatment applications (Matamala-Gomez et al., <xref ref-type="bibr" rid="B124">2019a</xref>,<xref ref-type="bibr" rid="B125">b</xref>). In particular, it was shown that pain perception is modulated by the visual appearance of the virtual body, including its color (Martini et al., <xref ref-type="bibr" rid="B119">2013</xref>), shape (Matamala-Gomez et al., <xref ref-type="bibr" rid="B126">2020</xref>), and level of transparency (Martini et al., <xref ref-type="bibr" rid="B118">2015</xref>), as well as by the degree of spatial overlap between the real and the virtual bodies (Nierula et al., <xref ref-type="bibr" rid="B134">2017</xref>).</p>
</sec>
</sec>
<sec>
<title>6.2. Applications</title>
<p>The use and applications of avatars beyond basic scientific research is potentially as vast as the use of VR and AR and more generally computer graphics. In sum avatars are the main way to realistically represent humans, or in some situations, computational agents, inside digital content even if they are not displayed in immersive technologies, for example they are often part of animation movies or console games. Therefore, avatars have potential applications across many areas including therapeutic treatments, training, education, and entertainment. In this section we will only explore two areas of application that have traditionally had a big emphasis on avatars: the entertainment industry and simulations. We will try to summarize why having access to high-quality rigged avatars is important for these fields. For more possible applications of avatars and VR in general we recommend a more in-depth review by Slater and Sanchez-Vives (<xref ref-type="bibr" rid="B185">2016</xref>).</p>
<sec>
<title>6.2.1. Entertainment and Movies</title>
<p>Avatars or 3D characters can create a long-lasting emotional bond with the audience, and thus play an essential role in film-making, VR and computer games. For example, characters must exhibit clearly recognizable facial expressions that are consistent with their emotional state in the storyline (Aneja et al., <xref ref-type="bibr" rid="B15">2018</xref>). Manually creating character animation requires expertise and hours of work. To speed up the animation process, we could use human actors to control and animate a character using a facial motion capture system (Li et al., <xref ref-type="bibr" rid="B101">2015</xref>). Many of the techniques to create and animate avatars have been described above. However, there are particularities for this industry and despite recent advances in modeling capabilities, motion capture and control parameterization, most current animation studios still rely on artists manually producing high-quality frames. Most motion capture systems require skilled actors and laborious post-processing steps. The avatars need to match or exaggerate the physiology of performer limits to the possible motions (e.g., actor cannot perform exaggerated facial expressions).</p>
<p>Alleviating artist workload, but creating believable and compelling character motions, is arguably the central challenge in animated storytelling. Some professionals are using VR-based interfaces to pose and animate 3D characters: (PoseVR, <xref ref-type="bibr" rid="B159">2019</xref>) or (Pan and Mitchell, <xref ref-type="bibr" rid="B145">2020</xref>) developed by the Walt Disney Animation Studios, is a recent example in this direction. Meanwhile, a couple of VR-based research prototypes and commercial products have also recently been developed; however, these have mainly targeted to non-professional users (Cannav&#x000F2; et al., <xref ref-type="bibr" rid="B39">2019</xref>). Alternatively, some researchers have proposed methods for generating 3D character expressions from humans in a geometrically consistent and perceptually valid manner using machine learning models (Aneja et al., <xref ref-type="bibr" rid="B15">2018</xref>).</p>
<p>By open sourcing high quality rigs, the Microsoft Rocketbox avatar library, we are providing opportunities for researchers, engineers, and artists to work together to discover new tools and techniques that will shape the future of animated storytelling.</p>
</sec>
<sec>
<title>6.2.2. Avatars in Simulation</title>
<p>No matter how realistic a virtual environment looks from the point of view of geometry and rendering, it is essential that the virtual environment appears populated by realistic people and crowds in order to bring those virtual environments to life.</p>
<p>The needs for other people with whom a person interacts in VR/AR ranges from simulating one or two avatars to a whole crowd. Multiple studies have found that people behave realistically when interacting with avatars inside VR/AR. In recent years these realistic responses gained the interest of sociologists and psychologists who want to explore scientifically increasingly complex scenarios. Inside VR, researchers have replicated obedience to authority paradigms such as the Milgram experiments, that became almost impossible to run in real setups due to the ethical considerations regarding the deception scheme underlying the learner and punishment mechanisms (Slater et al., <xref ref-type="bibr" rid="B180">2006</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B71">2019b</xref>). Indeed, the replication of famous results such as the Milgram studies, further validate the use of avatars for social-psychological studies (<xref ref-type="fig" rid="F13">Figure 13</xref>). Indeed, VR Milgram paradigms have recently been used to study empathy levels, predisposition and conformity to sexual harassment scenarios (Neyret et al., <xref ref-type="bibr" rid="B133">2020</xref>).</p>
<fig id="F13" position="float">
<label>Figure 13</label>
<caption><p>One of the Microsoft Rocketbox avatars being used for a recreation of the Stanley Milgram experiment on obedience to authority at University College London by Gonzalez-Franco et al. (<xref ref-type="bibr" rid="B71">2019b</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0013.tif"/>
</fig>
<p>Researchers have also used VR to create violent scenarios. For example, converting the avatar into a domestic abuser to see what the response of a real offender would be when exposed to this role scenario (Seinfeld et al., <xref ref-type="bibr" rid="B174">2018</xref>), or studying what happens when a violent scenario has bystanders. In an experiment with soccer fans in a VR pub, researchers found a strong in-group, out-group effect for the victim of a soccer bulling interaction (Rovira et al., <xref ref-type="bibr" rid="B166">2009</xref>). However, the response of the participants would vary depending on whether there were other bystander avatars and whether they were or not fans of the same soccer team (Slater et al., <xref ref-type="bibr" rid="B184">2013</xref>). Some of these scenarios recreated in VR would be impossible in reality.</p>
<p>Besides the interactions with avatars in violent scenarios, researchers have also explored how users of different personalities interact with avatars. For example (Pan et al., <xref ref-type="bibr" rid="B142">2012</xref>) studied how socially anxious and confident men interacted with a forward virtual woman, and how medical doctors respond to avatar patients who insist and demand unreasonably being treated with antibiotics (Pan et al., <xref ref-type="bibr" rid="B144">2016</xref>).</p>
<p>Research in the area of social-psychology has also utilized avatars, for example to study moral dilemmas on how people would react when exposed to a shooting in a museum (Pan and Slater, <xref ref-type="bibr" rid="B143">2011</xref>; Friedman et al., <xref ref-type="bibr" rid="B59">2014</xref>). Or how different types of audiences would affect public speaking anxiety (Pertaub et al., <xref ref-type="bibr" rid="B154">2002</xref>), and phobias (Botella et al., <xref ref-type="bibr" rid="B36">2017</xref>).</p>
<p>Simulations have evolved from a different angle in the field of crowd simulation (<xref ref-type="fig" rid="F14">Figure 14</xref>). In that area researchers have spent a great deal of effort in improving the algorithms that move agents smoothly between two points while avoiding collisions. However, no matter how close the simulation gets to real data, it is essential that each agent&#x00027;s position is then represented with a natural looking fully rigged avatar. The crowd simulation field has focused its work in the development of a large number of algorithms based on social forces (Helbing et al., <xref ref-type="bibr" rid="B79">2000</xref>), geometrical rules (Pelechano et al., <xref ref-type="bibr" rid="B150">2007</xref>), vision-based approaches (L&#x000F3;pez et al., <xref ref-type="bibr" rid="B109">2019</xref>), velocity vectors (Van den Berg et al., <xref ref-type="bibr" rid="B196">2008</xref>), or data driven (Charalambous and Chrysanthou, <xref ref-type="bibr" rid="B43">2014</xref>). Often psychological and personality traits can be included to add heterogeneity to the crowd (Pelechano et al., <xref ref-type="bibr" rid="B151">2016</xref>). The output of a crowd simulation model is typically limited to just a position, the motion and sometimes some limited pose data such as a torso orientation. This type of output is repeatedly rendered as a crowd of moving points, or simple geometrical proxies such as 3D cylinders. Ideally the crowd simulation output should be seamlessly input ted into a system that could provide fully animated avatars with animations naturally matching the crowd trajectories. Research in real-time animation is not yet at the stage of providing a good real-time solution to this problem, but having high-quality fully rigged avatars is already a big step forward into making crowd simulation more realistic and thus, being ready to enhance the realism of immersive virtual scenarios.</p>
<fig id="F14" position="float">
<label>Figure 14</label>
<caption><p>Microsoft Rocketbox avatars being used for a recreation of a train station for studies of human behavior in crowded VR situations at the Universitat Politcnica de Catalunya, by R&#x000ED;os and Pelechano (<xref ref-type="bibr" rid="B162">2020</xref>).</p></caption>
<graphic xlink:href="frvir-01-561558-g0014.tif"/>
</fig>
<p>Research efforts into simulations of few detailed humans and large crowds are gradually converging. The simulation research community needs realistic looking rigged avatars. For large crowds it also needs them to have flexibility in the number of polygons so that the cost of skinning and rendering does not become a bottleneck (Beacco et al., <xref ref-type="bibr" rid="B28">2016</xref>). Natural looking avatars are not only critical to small simulations but can also greatly enhance the crowd simulation appearance when being rendered in 3D on a computer screen. This effect is even more important when being rendered in a HMD, where avatars are seen at eye level and from close distances.</p>
<p>Having realistic and well-formed avatars is especially relevant if we are studying human behavior in crowded spaces using immersive VR. Such setups can be used, for example, to evaluate human decision making during emergencies based on the behavior of the crowd (R&#x000ED;os and Pelechano, <xref ref-type="bibr" rid="B162">2020</xref>). Or to explore cognitive models of locomotion in crowded spaces (Thalmann, <xref ref-type="bibr" rid="B194">2007</xref>; Luo et al., <xref ref-type="bibr" rid="B111">2008</xref>; Olivier et al., <xref ref-type="bibr" rid="B136">2014</xref>). Or for one-to-one interactions with avatars inside VR while performing locomotion (Pelechano et al., <xref ref-type="bibr" rid="B152">2011</xref>; R&#x000ED;os et al., <xref ref-type="bibr" rid="B161">2018</xref>).</p>
<p>While in one-to-one interactions the use of facial expressions has been growing in use (Vinayagamoorthy et al., <xref ref-type="bibr" rid="B197">2006</xref>; Gonzalez-Franco et al., <xref ref-type="bibr" rid="B72">2020b</xref>). In current crowd simulations that very important aspect is missing. Current simulated virtual crowds appear as non-expressive avatars that simply look straight ahead while maneuvring around obstacles and other agents. There have been some attempts to introduce gaze so that avatars appear to acknowledge the user&#x00027;s presence in the virtual environment but without changes in facial expression (Narang et al., <xref ref-type="bibr" rid="B132">2016</xref>).</p>
<p>The Microsoft Rocketbox avatars not only provide a large set of avatars that can be animated for simulations but also provide the possibility of including facial expressions, which can open the doors to achieving virtual simulations of avatars and crowds that appear more lively and that can show real emotions. This will have a massive impact on the overall credibility of the crowd and will enrich the heterogeneity and realism of the populated virtual worlds.</p>
</sec>
</sec>
</sec>
<sec sec-type="conclusions" id="s7">
<title>7. Conclusions</title>
<p>This paper gives an overview the different pipelines that can be used to create 3D avatars that resemble humans, from mesh creation to rigging and animation, and from manual artist ic work to deep learning advancements. For each part of the pipeline we present the different options and outline the process complexities and pitfalls. In many cases there are references to the specific tools that the authors have used in the past for creating avatars themselves. These creation tools are also put into context of the Microsoft Rocketbox avatar library release with more details as to how these particular avatars were created and their limitations.</p>
<p>Furthermore, the paper reviews how these avatars are being used for scientific purposes and emphasizes the unique application of self-avatars that can be used to substitute the human subject&#x00027;s own body inside VR. Further details on applications in other fields such as crowd simulations and entertainment are depicted but for a full review of applications of avatars and VR in general see (Slater and Sanchez-Vives, <xref ref-type="bibr" rid="B185">2016</xref>; Gonzalez-Franco and Lanier, <xref ref-type="bibr" rid="B67">2017</xref>). Altogether, this paper conveys the many needs for rigged avatars that allow manipulation and animation in real time for the future of Virtual Reality. We anticipate that this widely used and freely available library and some of its important applications will enable novel research, and we encourage the research community to complete and share their research and/or enhanced tools based on this paper and avatar library with future publications.</p>
</sec>
<sec sec-type="data-availability-statement" id="s8">
<title>Data Availability Statement</title>
<p>The full Microsoft Rocketbox library is publicly available for research and academic use and can be downloaded in <ext-link ext-link-type="uri" xlink:href="https://github.com/microsoft/Microsoft-Rocketbox">https://github.com/microsoft/Microsoft-Rocketbox</ext-link> (Mic, <xref ref-type="bibr" rid="B8">2020</xref>). Microsoft Rocketbox avatars were a proprietary large and varied set of rigged avatars representing humans of different genders, races, occupations as well as some non-humans examples. Rocketbox Studios GmbH released three different libraries of avatars: starting with &#x0201C;Complete Characters&#x0201D; in 2005, and finally a new generation of highly detailed avatars and animations named &#x0201C;Complete Characters HD&#x0201D; from 2010 to 2015. It includes 115 characters and avatars, created over the course of these 10 years. Rocketbox Studios GmbH was then acquired by Havoc, which is now part of Microsoft. The entity that has now released the library free for academic and research purposes. The diversity of the characters and the quality of the rigging together with a relatively low-poly meshes, made this library the go-to asset among research laboratories worldwide from crowd simulation to real-time avatar embodiment and Virtual Reality (VR). Ever since their launch, laboratories around the globe have been using the library and many of the authors in this paper have extensively used these avatars during their research.</p>
</sec>
<sec id="s9">
<title>Author Contributions</title>
<p>The sections dedicated to the creation of the avatars were mainly contributed by MG-F, EO, AS, YP, AA, BS, MW, LT, SO-E, and VO. The sections dedicated to reviewing avatar science and research were mainly contributed by AM, MS-V, MS, JB, JL, YP, NP, and MG-F. All authors have contributed to the writing of this paper.</p>
</sec>
<sec id="s10">
<title>Conflict of Interest</title>
<p>Microsoft, Disney, Google, Didimo, and Virtual Bodyworks are either private or traded companies with interests in the topic discussed. MG-F, EO, AA, AS, and JL were employed by company Microsoft. SO-E was employed by company Google. YP was employed by company Disney. VO was employed by company Didimo. BS was employed by company Virtual BodyWorks. The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
</body>
<back>
<ack><p>The authors would like to thank also to the contribution to the creation of the original Rocketbox library of avatars to: Martin Beyer, Michael Dogan, Denis Dzienziol, Manu Eidner, Adrian Kumorowski, Johannes Ostrowitzki, Aleksander Roman, Artur Sabat, Robert Zimmermann, that together with Markus Wojcik created the original library. And to Dave Garagan from Havok for his help in the release of the public library. We also thank Tony Donegan for the language editing.</p>
</ack>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frvir.2020.561558/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frvir.2020.561558/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="web">(<year>2020</year>). <source>3d Studio Max</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.autodesk.com/products/3ds-max">https://www.autodesk.com/products/3ds-max</ext-link></citation>
</ref>
<ref id="B2">
<citation citation-type="web">(<year>2020</year>). <source>Adobe Fuse</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.adobe.com/products/fuse.html">https://www.adobe.com/products/fuse.html</ext-link><pub-id pub-id-type="pmid">23893476</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="web">(<year>2020</year>). <source>Binaryvr by Hyprsense</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.hyprsense.com/hyprface">https://www.hyprsense.com/hyprface</ext-link></citation>
</ref>
<ref id="B4">
<citation citation-type="web">(<year>2020</year>). <source>Blender</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.blender.org/">https://www.blender.org/</ext-link></citation>
</ref>
<ref id="B5">
<citation citation-type="web">(<year>2020</year>). <source>Cats</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://vrcat.club/threads/cats-blender-plugin-0-14-0.6">http://vrcat.club/threads/cats-blender-plugin-0-14-0.6</ext-link></citation>
</ref>
<ref id="B6">
<citation citation-type="web">(<year>2020</year>).<source>Maya</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.autodesk.com/products/maya">https://www.autodesk.com/products/maya</ext-link></citation>
</ref>
<ref id="B7">
<citation citation-type="web">(<year>2020</year>).<source>Metamotion</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://metamotion.com/gypsy/gypsy-motion-capture-system.htm">https://metamotion.com/gypsy/gypsy-motion-capture-system.htm</ext-link></citation>
</ref>
<ref id="B8">
<citation citation-type="web">(<year>2020</year>).<source>Microsoftrocketbox</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://github.com/microsoft/Microsoft-Rocketbox">https://github.com/microsoft/Microsoft-Rocketbox</ext-link></citation>
</ref>
<ref id="B9">
<citation citation-type="web">(<year>2020</year>).<source>Mixamo</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.mixamo.com">https://www.mixamo.com</ext-link></citation>
</ref>
<ref id="B10">
<citation citation-type="web">(<year>2020</year>).<source>Optitrack</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://optitrack.com">https://optitrack.com</ext-link></citation>
</ref>
<ref id="B11">
<citation citation-type="web">(<year>2020</year>).<source>Vicon</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://vicon.com">https://vicon.com</ext-link></citation>
</ref>
<ref id="B12">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Abtahi</surname> <given-names>P.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>I&#x00027;m a giant: walking in large virtual environments at high speed gains</article-title>, in <source>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems</source> (<publisher-loc>Glasgow</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1145/3290605.3300752</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ahuja</surname> <given-names>K.</given-names></name> <name><surname>Harrison</surname> <given-names>C.</given-names></name> <name><surname>Goel</surname> <given-names>M.</given-names></name> <name><surname>Xiao</surname> <given-names>R.</given-names></name></person-group> (<year>2019</year>). <article-title>MeCap: whole-body digitization for low-cost VR/AR headsets</article-title>, in <source>Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology</source> (<publisher-loc>New orleans, LU</publisher-loc>), <fpage>453</fpage>&#x02013;<lpage>462</lpage>. <pub-id pub-id-type="doi">10.1145/3332165.3347889</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aitpayev</surname> <given-names>K.</given-names></name> <name><surname>Gaber</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>Creation of 3D human avatar using kinect</article-title>. <source>Asian Trans. Fundam. Electron. Commun. Multimed</source>. <volume>1</volume>, <fpage>12</fpage>&#x02013;<lpage>24</lpage>.</citation></ref>
<ref id="B15">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Aneja</surname> <given-names>D.</given-names></name> <name><surname>Chaudhuri</surname> <given-names>B.</given-names></name> <name><surname>Colburn</surname> <given-names>A.</given-names></name> <name><surname>Faigin</surname> <given-names>G.</given-names></name> <name><surname>Shapiro</surname> <given-names>L.</given-names></name> <name><surname>Mones</surname> <given-names>B.</given-names></name></person-group> (<year>2018</year>). <article-title>Learning to generate 3D stylized character expressions from humans</article-title>, in <source>2018 IEEE Winter Conference on Applications of Computer Vision (WACV)</source> (<publisher-loc>Lake Tahoe, CA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>160</fpage>&#x02013;<lpage>169</lpage>. <pub-id pub-id-type="doi">10.1109/WACV.2018.00024</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aristidou</surname> <given-names>A.</given-names></name> <name><surname>Lasenby</surname> <given-names>J.</given-names></name> <name><surname>Chrysanthou</surname> <given-names>Y.</given-names></name> <name><surname>Shamir</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Inverse kinematics techniques in computer graphics: a survey</article-title>. <source>Comput. Graph. Forum</source> <volume>37</volume>, <fpage>35</fpage>&#x02013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1111/cgf.13310</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Armel</surname> <given-names>K. C.</given-names></name> <name><surname>Ramachandran</surname> <given-names>V. S.</given-names></name></person-group> (<year>2003</year>). <article-title>Projecting sensations to external objects: evidence from skin conductance response</article-title>. <source>Proc. R. Soc. Lond. B Biol. Sci</source>. <volume>270</volume>, <fpage>1499</fpage>&#x02013;<lpage>1506</lpage>. <pub-id pub-id-type="doi">10.1098/rspb.2003.2364</pub-id><pub-id pub-id-type="pmid">12965016</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Aymerich-Franch</surname> <given-names>L.</given-names></name></person-group> (<year>2012</year>). <article-title>Can we identify with a block? Identification with non-anthropomorphic avatars in virtual reality games</article-title>, in <source>Proceedings of the International Society for Presence Research Annual Conference</source> (<publisher-loc>Philadelphia, CA</publisher-loc>), <fpage>24</fpage>&#x02013;<lpage>26</lpage>.</citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Badler</surname> <given-names>N. I.</given-names></name> <name><surname>Hollick</surname> <given-names>M. J.</given-names></name> <name><surname>Granieri</surname> <given-names>J. P.</given-names></name></person-group> (<year>1993</year>). <article-title>Real-time control of a virtual human using minimal sensors</article-title>. <source>Presence Teleoper. Virt. Environ</source>. <volume>2</volume>, <fpage>82</fpage>&#x02013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1162/pres.1993.2.1.82</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bailenson</surname> <given-names>J.</given-names></name> <name><surname>Blascovich</surname> <given-names>J.</given-names></name></person-group> (<year>2004</year>). <article-title>Avatars. encyclopedia of human-computer interaction</article-title>. <source>Berkshire Publ. Group</source> <volume>64</volume>:<fpage>68</fpage>.</citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Groten</surname> <given-names>R.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Illusory ownership of a virtual child body causes overestimation of object sizes and implicit attitude changes</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A</source>. <volume>110</volume>, <fpage>12846</fpage>&#x02013;<lpage>12851</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1306779110</pub-id><pub-id pub-id-type="pmid">23858436</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Hanumanthu</surname> <given-names>P. D.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>Virtual embodiment of white people in a black virtual body leads to a sustained reduction in their implicit racial bias</article-title>. <source>Front. Hum. Neurosci</source>. <volume>10</volume>:<fpage>601</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00601</pub-id><pub-id pub-id-type="pmid">27965555</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Kishore</surname> <given-names>S.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Virtually being einstein results in an improvement in cognitive task performance and a decrease in age bias</article-title>. <source>Front. Psychol</source>. <volume>9</volume>:<fpage>917</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2018.00917</pub-id><pub-id pub-id-type="pmid">29942270</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Body ownership causes illusory self-attribution of speaking and influences subsequent real speaking</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A</source>. <volume>111</volume>, <fpage>17678</fpage>&#x02013;<lpage>17683</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1414936111</pub-id><pub-id pub-id-type="pmid">25422444</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Embodiment in a virtual body that speaks produces agency over the speaking but does not necessarily influence subsequent real speaking</article-title>. <source>Sci. Rep</source>. <volume>7</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-14620-5</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baran</surname> <given-names>I.</given-names></name> <name><surname>Popovi&#x00107;</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>Automatic rigging and animation of 3D characters</article-title>. <source>ACM Trans. Graph</source>. <volume>26</volume>:<fpage>72</fpage>. <pub-id pub-id-type="doi">10.1145/1276377.1276467</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Baumberg</surname> <given-names>A.</given-names></name></person-group> (<year>2002</year>). <article-title>Blending images for texturing 3D models</article-title>, in <source>Conference on British Machine Vision Association</source> (<publisher-loc>Cardiff</publisher-loc>), <fpage>404</fpage>&#x02013;<lpage>413</lpage>. <pub-id pub-id-type="doi">10.5244/C.16.38</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Beacco</surname> <given-names>A.</given-names></name> <name><surname>Pelechano</surname> <given-names>N.</given-names></name> <name><surname>And&#x000FA;jar</surname> <given-names>C.</given-names></name></person-group> (<year>2016</year>). <article-title>A survey of real-time crowd rendering</article-title>. <source>Comput. Graph. Forum</source> <volume>35</volume>, <fpage>32</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1111/cgf.12774</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bedder</surname> <given-names>R. L.</given-names></name> <name><surname>Bush</surname> <given-names>D.</given-names></name> <name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Peck</surname> <given-names>T.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Burgess</surname> <given-names>N.</given-names></name></person-group> (<year>2019</year>). <article-title>A mechanistic account of bodily resonance and implicit bias</article-title>. <source>Cognition</source> <volume>184</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2018.11.010</pub-id><pub-id pub-id-type="pmid">30553934</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bem</surname> <given-names>D. J.</given-names></name></person-group> (<year>1972</year>). <article-title>Self-perception theory</article-title>. <source>Adv. Exp. Soc. Psychol</source>. <volume>6</volume>, <fpage>1</fpage>&#x02013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.1016/S0065-2601(08)60024-6</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Berger</surname> <given-names>C. C.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Expanding the sense of touch outside the body</article-title>, in <source>Proceedings of the 15th ACM Symposium on Applied Perception</source> (<publisher-loc>ACM</publisher-loc>), <fpage>10</fpage>. <pub-id pub-id-type="doi">10.1145/3225153.3225172</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Berger</surname> <given-names>C. C.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Hinckley</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>The uncanny valley of haptics</article-title>. <source>Sci. Robot</source>. <volume>3</volume>:<fpage>eaar7010</fpage>. <pub-id pub-id-type="doi">10.1126/scirobotics.aar7010</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Blanchard</surname> <given-names>C.</given-names></name> <name><surname>Burgess</surname> <given-names>S.</given-names></name> <name><surname>Harvill</surname> <given-names>Y.</given-names></name> <name><surname>Lanier</surname> <given-names>J.</given-names></name> <name><surname>Lasko</surname> <given-names>A.</given-names></name> <name><surname>Oberman</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>1990</year>). <article-title>Reality built for two: a virtual reality tool</article-title>, in <source>Proceedings of the 1990 Symposium on Interactive 3D Graphics</source> (<publisher-loc>Snowbird, UT</publisher-loc>), <fpage>35</fpage>&#x02013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.1145/91385.91409</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2012</year>). <article-title>Multisensory brain mechanisms of bodily self-consciousness</article-title>. <source>Nat. Rev. Neurosci</source>. <volume>13</volume>:<fpage>556</fpage>. <pub-id pub-id-type="doi">10.1038/nrn3292</pub-id><pub-id pub-id-type="pmid">22805909</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Blanz</surname> <given-names>V.</given-names></name> <name><surname>Vetter</surname> <given-names>T.</given-names></name></person-group> (<year>1999</year>). <article-title>A morphable model for the synthesis of 3D faces</article-title>, in <source>Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques</source> (<publisher-loc>Los Angeles, CA</publisher-loc>), <fpage>187</fpage>&#x02013;<lpage>194</lpage>. <pub-id pub-id-type="doi">10.1145/311535.311556</pub-id><pub-id pub-id-type="pmid">17354809</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Botella</surname> <given-names>C.</given-names></name> <name><surname>Fern&#x000E1;ndez-&#x000C1;lvarez</surname> <given-names>J.</given-names></name> <name><surname>Guill&#x000E9;n</surname> <given-names>V.</given-names></name> <name><surname>Garc&#x000ED;a-Palacios</surname> <given-names>A.</given-names></name> <name><surname>Ba&#x000F1;os</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Recent progress in virtual reality exposure therapy for phobias: a systematic review</article-title>. <source>Curr. Psychiatry Rep</source>. <volume>19</volume>:<fpage>42</fpage>. <pub-id pub-id-type="doi">10.1007/s11920-017-0788-4</pub-id><pub-id pub-id-type="pmid">28540594</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Botvinick</surname> <given-names>M.</given-names></name> <name><surname>Cohen</surname> <given-names>J.</given-names></name></person-group> (<year>1998</year>). <article-title>Rubber hands &#x02018;feel&#x00027; touch that eyes see</article-title>. <source>Nature</source> <volume>391</volume>:<fpage>756</fpage>. <pub-id pub-id-type="doi">10.1038/35784</pub-id><pub-id pub-id-type="pmid">9486643</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bouaziz</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Pauly</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Online modeling for realtime facial animation</article-title>. <source>ACM Trans. Graph</source>. <volume>32</volume>:<fpage>40</fpage>. <pub-id pub-id-type="doi">10.1145/2461912.2461976</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cannav&#x000F3;</surname> <given-names>A.</given-names></name> <name><surname>Demartini</surname> <given-names>C.</given-names></name> <name><surname>Morra</surname> <given-names>L.</given-names></name> <name><surname>Lamberti</surname> <given-names>F.</given-names></name></person-group> (<year>2019</year>). <article-title>Immersive virtual reality-based interfaces for character animation</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>125463</fpage>&#x02013;<lpage>125480</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2939427</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>C.</given-names></name> <name><surname>Weng</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>S.</given-names></name> <name><surname>Tong</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Facewarehouse: a 3D facial expression database for visual computing</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>20</volume>, <fpage>413</fpage>&#x02013;<lpage>425</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2013.249</pub-id><pub-id pub-id-type="pmid">24434222</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>Z.</given-names></name> <name><surname>Simon</surname> <given-names>T.</given-names></name> <name><surname>Wei</surname> <given-names>S.-E.</given-names></name> <name><surname>Sheikh</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>Realtime multi-person 2D pose estimation using part affinity fields</article-title>, in <source>Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Honolulu, HI</publisher-loc>), <fpage>7291</fpage>&#x02013;<lpage>7299</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2017.143</pub-id><pub-id pub-id-type="pmid">31331883</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Casas</surname> <given-names>D.</given-names></name> <name><surname>Feng</surname> <given-names>A.</given-names></name> <name><surname>Alexander</surname> <given-names>O.</given-names></name> <name><surname>Fyffe</surname> <given-names>G.</given-names></name> <name><surname>Debevec</surname> <given-names>P.</given-names></name> <name><surname>Ichikari</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Rapid photorealistic blendshape modeling from RGB-D sensors</article-title>, in <source>Proceedings of the 29th International Conference on Computer Animation and Social Agents</source> (<publisher-loc>Geneva</publisher-loc>), <fpage>121</fpage>&#x02013;<lpage>129</lpage>. <pub-id pub-id-type="doi">10.1145/2915926.2915936</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Charalambous</surname> <given-names>P.</given-names></name> <name><surname>Chrysanthou</surname> <given-names>Y.</given-names></name></person-group> (<year>2014</year>). <article-title>The PAG crowd: a graph based approach for efficient data-driven crowd simulation</article-title>. <source>Comput. Graph. Forum</source> <volume>33</volume>, <fpage>95</fpage>&#x02013;<lpage>108</lpage>. <pub-id pub-id-type="doi">10.1111/cgf.12403</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Collet</surname> <given-names>A.</given-names></name> <name><surname>Chuang</surname> <given-names>M.</given-names></name> <name><surname>Sweeney</surname> <given-names>P.</given-names></name> <name><surname>Gillett</surname> <given-names>D.</given-names></name> <name><surname>Evseev</surname> <given-names>D.</given-names></name> <name><surname>Calabrese</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>High-quality streamable free-viewpoint video</article-title>. <source>ACM Trans. Graph</source>. <volume>34</volume>:<fpage>69</fpage>. <pub-id pub-id-type="doi">10.1145/2766945</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>de Borst</surname> <given-names>A. W.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>de Gelder</surname> <given-names>B.</given-names></name></person-group> (<year>2020</year>). <article-title>First person virtual embodiment modulates cortical network that encodes the bodily self and its surrounding space during the experience of domestic violence</article-title>. <source>eNeuro</source> <volume>7</volume>:ENEURO.0263-19.2019. <pub-id pub-id-type="doi">10.1523/ENEURO.0263-19.2019</pub-id><pub-id pub-id-type="pmid">32312823</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>De Vignemont</surname> <given-names>F.</given-names></name></person-group> (<year>2011</year>). <article-title>Embodiment, ownership and disownership</article-title>. <source>Conscious. Cogn</source>. <volume>20</volume>, <fpage>82</fpage>&#x02013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2010.09.004</pub-id><pub-id pub-id-type="pmid">20943417</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Debevec</surname> <given-names>P.</given-names></name> <name><surname>Hawkins</surname> <given-names>T.</given-names></name> <name><surname>Tchou</surname> <given-names>C.</given-names></name> <name><surname>Duiker</surname> <given-names>H.-P.</given-names></name> <name><surname>Sarokin</surname> <given-names>W.</given-names></name> <name><surname>Sagarz</surname> <given-names>M.</given-names></name></person-group> (<year>2000</year>). <article-title>Acquiring the reflectance field of a human face</article-title>, in <source>SIGGRAPH</source> (<publisher-loc>New Orleans, LU</publisher-loc>). <pub-id pub-id-type="doi">10.1145/344779.344855</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname> <given-names>M.</given-names></name> <name><surname>Davidson</surname> <given-names>P. L.</given-names></name> <name><surname>Fanello</surname> <given-names>S. R.</given-names></name> <name><surname>Khamis</surname> <given-names>S.</given-names></name> <name><surname>Kowdle</surname> <given-names>A.</given-names></name> <name><surname>Rhemann</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Motion2fusion: real-time volumetric performance capture</article-title>. <source>ACM Trans. Graph</source>. <volume>36</volume>, <fpage>246:1</fpage>&#x02013;<lpage>246:16</lpage>. <pub-id pub-id-type="doi">10.1145/3130800.3130801</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname> <given-names>M.</given-names></name> <name><surname>Khamis</surname> <given-names>S.</given-names></name> <name><surname>Degtyarev</surname> <given-names>Y.</given-names></name> <name><surname>Davidson</surname> <given-names>P. L.</given-names></name> <name><surname>Fanello</surname> <given-names>S. R.</given-names></name> <name><surname>Kowdle</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Fusion 4D: real-time performance capture of challenging scenes</article-title>. <source>ACM Trans. Graph</source>. <volume>35</volume>, <fpage>114:1</fpage>&#x02013;<lpage>114:13</lpage>. <pub-id pub-id-type="doi">10.1145/2897824.2925969</pub-id></citation></ref>
<ref id="B50">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ehrsson</surname> <given-names>H. H.</given-names></name></person-group> (<year>2012</year>). <article-title>The concept of body ownership and its relation to multisensory integration</article-title>, in <source>The New Handbook of Multisensory Process</source>, ed <person-group person-group-type="editor"><name><surname>Stein</surname> <given-names>B. E.</given-names></name></person-group> (<publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>), <fpage>775</fpage>&#x02013;<lpage>792</lpage>.</citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ehrsson</surname> <given-names>H. H.</given-names></name> <name><surname>Wiech</surname> <given-names>K.</given-names></name> <name><surname>Weiskopf</surname> <given-names>N.</given-names></name> <name><surname>Dolan</surname> <given-names>R. J.</given-names></name> <name><surname>Passingham</surname> <given-names>R. E.</given-names></name></person-group> (<year>2007</year>). <article-title>Threatening a rubber hand that you feel is yours elicits a cortical anxiety response</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A</source>. <volume>104</volume>, <fpage>9828</fpage>&#x02013;<lpage>9833</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0610011104</pub-id><pub-id pub-id-type="pmid">17517605</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ekman</surname> <given-names>P.</given-names></name> <name><surname>Friesen</surname> <given-names>W. V.</given-names></name></person-group> (<year>1976</year>). <article-title>Measuring facial movement</article-title>. <source>Environ. Psychol. Nonverbal Behav</source>. <volume>1</volume>, <fpage>56</fpage>&#x02013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1007/BF01115465</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Esteban</surname> <given-names>C. H.</given-names></name> <name><surname>Schmitt</surname> <given-names>F.</given-names></name></person-group> (<year>2004</year>). <article-title>Silhouette and stereo fusion for 3D object modeling</article-title>. <source>Comput. Vis. Image Understand</source>. <volume>96</volume>, <fpage>367</fpage>&#x02013;<lpage>392</lpage>. <pub-id pub-id-type="doi">10.1016/j.cviu.2004.03.016</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falconer</surname> <given-names>C. J.</given-names></name> <name><surname>Rovira</surname> <given-names>A.</given-names></name> <name><surname>King</surname> <given-names>J. A.</given-names></name> <name><surname>Gilbert</surname> <given-names>P.</given-names></name> <name><surname>Antley</surname> <given-names>A.</given-names></name> <name><surname>Fearon</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Embodying self-compassion within virtual reality and its effects on patients with depression</article-title>. <source>BJPsych. Open</source> <volume>2</volume>, <fpage>74</fpage>&#x02013;<lpage>80</lpage>. <pub-id pub-id-type="doi">10.1192/bjpo.bp.115.002147</pub-id><pub-id pub-id-type="pmid">27703757</pub-id></citation></ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falconer</surname> <given-names>C. J.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Rovira</surname> <given-names>A.</given-names></name> <name><surname>King</surname> <given-names>J. A.</given-names></name> <name><surname>Gilbert</surname> <given-names>P.</given-names></name> <name><surname>Antley</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Embodying compassion: a virtual reality paradigm for overcoming excessive self-criticism</article-title>. <source>PLoS ONE</source> <volume>9</volume>:<fpage>e111933</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0111933</pub-id><pub-id pub-id-type="pmid">25389766</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>A.</given-names></name> <name><surname>Casas</surname> <given-names>D.</given-names></name> <name><surname>Shapiro</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Avatar reshaping and automatic rigging using a deformable model</article-title>, in <source>Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games</source> (<publisher-loc>Los Angeles, CA</publisher-loc>), <fpage>57</fpage>&#x02013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1145/2822013.2822017</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Feuchtner</surname> <given-names>T.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Extending the body for interaction with reality</article-title>, in <source>Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems</source> (<publisher-loc>Denver, CO</publisher-loc>), <fpage>5145</fpage>&#x02013;<lpage>5157</lpage>. <pub-id pub-id-type="doi">10.1145/3025453.3025689</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Folegatti</surname> <given-names>A.</given-names></name> <name><surname>De Vignemont</surname> <given-names>F.</given-names></name> <name><surname>Pavani</surname> <given-names>F.</given-names></name> <name><surname>Rossetti</surname> <given-names>Y.</given-names></name> <name><surname>Farn&#x000E9;</surname> <given-names>A.</given-names></name></person-group> (<year>2009</year>). <article-title>Losing one&#x00027;s hand: visual-proprioceptive conflict affects touch perception</article-title>. <source>PLoS ONE</source> <volume>4</volume>:<fpage>e6920</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0006920</pub-id><pub-id pub-id-type="pmid">19738900</pub-id></citation></ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friedman</surname> <given-names>D.</given-names></name> <name><surname>Pizarro</surname> <given-names>R.</given-names></name> <name><surname>Or-Berkers</surname> <given-names>K.</given-names></name> <name><surname>Neyret</surname> <given-names>S.</given-names></name> <name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>A method for generating an illusion of backwards time travel using immersive virtual reality-an exploratory study</article-title>. <source>Front. Psychol</source>. <volume>5</volume>:<fpage>943</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2014.00943</pub-id><pub-id pub-id-type="pmid">25228889</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Frueh</surname> <given-names>C.</given-names></name> <name><surname>Sud</surname> <given-names>A.</given-names></name> <name><surname>Kwatra</surname> <given-names>V.</given-names></name></person-group> (<year>2017</year>). <article-title>Headset removal for virtual and mixed reality</article-title>, in <source>SIGGRAPH Talks 2017</source> (<publisher-loc>Los Angeles, CA</publisher-loc>). <pub-id pub-id-type="doi">10.1145/3084363.3085083</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gal</surname> <given-names>R.</given-names></name> <name><surname>Wexler</surname> <given-names>Y.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Hoppe</surname> <given-names>H.</given-names></name> <name><surname>Cohen-Or</surname> <given-names>D.</given-names></name></person-group> (<year>2010</year>). <article-title>Seamless montage for texturing models</article-title>, in <source>EuroGraphics</source> (<publisher-loc>Norrk&#x000F6;ping</publisher-loc>). <pub-id pub-id-type="doi">10.1111/j.1467-8659.2009.01617.x</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Garau</surname> <given-names>M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Vinayagamoorthy</surname> <given-names>V.</given-names></name> <name><surname>Brogni</surname> <given-names>A.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name> <name><surname>Sasse</surname> <given-names>M. A.</given-names></name></person-group> (<year>2003</year>). <article-title>The impact of avatar realism and eye gaze control on perceived quality of communication in a shared immersive virtual environment</article-title>, in <source>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</source> (<publisher-loc>Fort Lauderdale, FL</publisher-loc>), <fpage>529</fpage>&#x02013;<lpage>536</lpage>. <pub-id pub-id-type="doi">10.1145/642611.642703</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Abtahi</surname> <given-names>P.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name></person-group> (<year>2019a</year>). <article-title>Individual differences in embodied distance estimation in virtual reality</article-title>, in <source>2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)</source> (<publisher-loc>Osaka</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>941</fpage>&#x02013;<lpage>943</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2019.8798348</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Bellido</surname> <given-names>A. I.</given-names></name> <name><surname>Blom</surname> <given-names>K. J.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Rodriguez-Fornells</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>The neurological traces of look-alike avatars</article-title>. <source>Front. Hum. Neurosci</source>. <volume>10</volume>:<fpage>392</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00392</pub-id><pub-id pub-id-type="pmid">27536228</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Berger</surname> <given-names>C. C.</given-names></name></person-group> (<year>2019</year>). <article-title>Avatar embodiment enhances haptic confidence on the out-of-body touch illusion</article-title>. <source>IEEE Trans. Haptics</source> <volume>12</volume>, <fpage>319</fpage>&#x02013;<lpage>326</lpage>. <pub-id pub-id-type="doi">10.1109/TOH.2019.2925038</pub-id><pub-id pub-id-type="pmid">31251194</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Cohn</surname> <given-names>B.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Burin</surname> <given-names>D.</given-names></name> <name><surname>Maselli</surname> <given-names>A.</given-names></name></person-group> (<year>2020a</year>). <article-title>The self-avatar follower effect in virtual reality</article-title>, in <source>2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)</source> (<publisher-loc>Atlanta, GA</publisher-loc>). <pub-id pub-id-type="doi">10.1109/VR46266.2020.00019</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Lanier</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Model of illusions and virtual reality</article-title>. <source>Front. Psychol</source>. <volume>8</volume>:<fpage>1125</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01125</pub-id><pub-id pub-id-type="pmid">28713323</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Peck</surname> <given-names>T. C.</given-names></name></person-group> (<year>2018</year>). <article-title>Avatar embodiment. Towards a standardized questionnaire</article-title>. <source>Front. Robot. AI</source> <volume>5</volume>:<fpage>74</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00074</pub-id></citation></ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonz&#x000E1;lez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Peck</surname> <given-names>T. C.</given-names></name> <name><surname>Rodr&#x000ED;guez-Fornells</surname> <given-names>A.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>A threat to a virtual hand elicits motor cortex activation</article-title>. <source>Exp. Brain Res</source>. <volume>232</volume>, <fpage>875</fpage>&#x02013;<lpage>887</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-013-3800-1</pub-id><pub-id pub-id-type="pmid">24337257</pub-id></citation></ref>
<ref id="B70">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Perez-Marcos</surname> <given-names>D.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>The contribution of real-time mirror reflections of motor actions on virtual body ownership in an immersive virtual environment</article-title>, in <source>2010 IEEE Virtual Reality Conference (VR)</source> (<publisher-loc>Waltham, MA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>111</fpage>&#x02013;<lpage>114</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2010.5444805</pub-id></citation></ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Birney</surname> <given-names>M. E.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name> <name><surname>Haslam</surname> <given-names>S. A.</given-names></name> <name><surname>Reicher</surname> <given-names>S. D.</given-names></name></person-group> (<year>2019b</year>). <article-title>Participant concerns for the learner in a virtual reality replication of the milgram obedience study</article-title>. <source>PLoS ONE</source> <volume>13</volume>:<fpage>e209704</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0209704</pub-id><pub-id pub-id-type="pmid">30596731</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name> <name><surname>Hoogendyk</surname> <given-names>S.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name></person-group> (<year>2020b</year>). <article-title>Using facial animation to increase the enfacement illusion and avatar self-identification</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>26</volume>, <fpage>2023</fpage>&#x02013;<lpage>2029</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2020.2973075</pub-id><pub-id pub-id-type="pmid">32070973</pub-id></citation></ref>
<ref id="B73">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Graziano</surname> <given-names>M. S.</given-names></name> <name><surname>Botvinick</surname> <given-names>M.</given-names></name></person-group> (<year>2002</year>). <source>How the Brain Represents the Body: Insights From Neurophysiology and Psychology</source>. <publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>.<pub-id pub-id-type="pmid">18762203</pub-id></citation></ref>
<ref id="B74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Groom</surname> <given-names>V.</given-names></name> <name><surname>Bailenson</surname> <given-names>J. N.</given-names></name> <name><surname>Nass</surname> <given-names>C.</given-names></name></person-group> (<year>2009</year>). <article-title>The influence of racial embodiment on racial bias in immersive virtual environments</article-title>. <source>Soc. Influence</source> <volume>4</volume>, <fpage>231</fpage>&#x02013;<lpage>248</lpage>. <pub-id pub-id-type="doi">10.1080/15534510802643750</pub-id></citation></ref>
<ref id="B75">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>K.</given-names></name> <name><surname>Lincoln</surname> <given-names>P.</given-names></name> <name><surname>Davidson</surname> <given-names>P. L.</given-names></name> <name><surname>Busch</surname> <given-names>J.</given-names></name> <name><surname>Yu</surname> <given-names>X.</given-names></name> <name><surname>Whalen</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The relightables: volumetric performance capture of humans with realistic relighting</article-title>. <source>ACM Trans. Graph</source>. <volume>38</volume>, <fpage>217:1</fpage>&#x02013;<lpage>217:19</lpage>. <pub-id pub-id-type="doi">10.1145/3355089.3356571</pub-id></citation></ref>
<ref id="B76">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ha</surname> <given-names>S.</given-names></name> <name><surname>Bai</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>C. K.</given-names></name></person-group> (<year>2011</year>). <article-title>Human motion reconstruction from force sensors</article-title>, in <source>Symposium on Computer Animation (SCA &#x00027;11)</source> (<publisher-loc>Vancouver, BC</publisher-loc>), <fpage>129</fpage>&#x02013;<lpage>138</lpage>. <pub-id pub-id-type="doi">10.1145/2019406.2019424</pub-id></citation></ref>
<ref id="B77">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hamilton-Giachritsis</surname> <given-names>C.</given-names></name> <name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Quiroga</surname> <given-names>M. G.</given-names></name> <name><surname>Giachritsis</surname> <given-names>C.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Reducing risk and improving maternal perspective-taking and empathy using virtual embodiment</article-title>. <source>Sci. Rep</source>. <volume>8</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-21036-2</pub-id><pub-id pub-id-type="pmid">29445183</pub-id></citation></ref>
<ref id="B78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hasler</surname> <given-names>B. S.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Virtual race transformation reverses racial in-group bias</article-title>. <source>PLoS ONE</source> <volume>12</volume>:<fpage>e174965</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0174965</pub-id><pub-id pub-id-type="pmid">28437469</pub-id></citation></ref>
<ref id="B79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Helbing</surname> <given-names>D.</given-names></name> <name><surname>Farkas</surname> <given-names>I.</given-names></name> <name><surname>Vicsek</surname> <given-names>T.</given-names></name></person-group> (<year>2000</year>). <article-title>Simulating dynamical features of escape panic</article-title>. <source>Nature</source> <volume>407</volume>, <fpage>487</fpage>&#x02013;<lpage>490</lpage>. <pub-id pub-id-type="doi">10.1038/35035023</pub-id><pub-id pub-id-type="pmid">11028994</pub-id></citation></ref>
<ref id="B80">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holden</surname> <given-names>D.</given-names></name> <name><surname>Komura</surname> <given-names>T.</given-names></name> <name><surname>Saito</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Phase-functioned neural networks for character control</article-title>. <source>ACM Trans. Graph</source>. <volume>36</volume>, <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1145/3072959.3073663</pub-id></citation></ref>
<ref id="B81">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>L.</given-names></name> <name><surname>Saito</surname> <given-names>S.</given-names></name> <name><surname>Wei</surname> <given-names>L.</given-names></name> <name><surname>Nagano</surname> <given-names>K.</given-names></name> <name><surname>Seo</surname> <given-names>J.</given-names></name> <name><surname>Fursund</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Avatar digitization from a single image for real-time rendering</article-title>. <source>ACM Trans. Graph</source>. <volume>36</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1145/3130800.31310887</pub-id></citation></ref>
<ref id="B82">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ichim</surname> <given-names>A. E.</given-names></name> <name><surname>Bouaziz</surname> <given-names>S.</given-names></name> <name><surname>Pauly</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Dynamic 3D avatar creation from hand-held video input</article-title>. <source>ACM Trans. Graph</source>. <volume>34</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1145/2766974</pub-id></citation></ref>
<ref id="B83">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Joshi</surname> <given-names>P.</given-names></name> <name><surname>Tien</surname> <given-names>W. C.</given-names></name> <name><surname>Desbrun</surname> <given-names>M.</given-names></name> <name><surname>Pighin</surname> <given-names>F.</given-names></name></person-group> (<year>2006</year>). <article-title>Learning controls for blend shape based realistic facial animation</article-title>, in <source>ACM Siggraph 2006 Courses</source> (<publisher-loc>Boston, MA</publisher-loc>), <fpage>17</fpage>. <pub-id pub-id-type="doi">10.1145/1185657.1185857</pub-id></citation></ref>
<ref id="B84">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Karras</surname> <given-names>T.</given-names></name> <name><surname>Laine</surname> <given-names>S.</given-names></name> <name><surname>Aila</surname> <given-names>T.</given-names></name></person-group> (<year>2019</year>). <article-title>A style-based generator architecture for generative adversarial networks</article-title>, in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Long Beach, CA</publisher-loc>), <fpage>4401</fpage>&#x02013;<lpage>4410</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2019.00453</pub-id><pub-id pub-id-type="pmid">32012000</pub-id></citation></ref>
<ref id="B85">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Bergstrom</surname> <given-names>I.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Drumming in immersive virtual reality: the body shapes the way we play</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>19</volume>, <fpage>597</fpage>&#x02013;<lpage>605</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2013.29</pub-id></citation>
</ref>
<ref id="B86">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Groten</surname> <given-names>R.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2012a</year>). <article-title>The sense of embodiment in virtual reality</article-title>. <source>Presence Teleoper. Virtual Environ</source>. <volume>21</volume>, <fpage>373</fpage>&#x02013;<lpage>387</lpage>. <pub-id pub-id-type="doi">10.1162/PRES_a_00124</pub-id></citation></ref>
<ref id="B87">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Maselli</surname> <given-names>A.</given-names></name> <name><surname>Kording</surname> <given-names>K. P.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Over my fake body: body ownership illusions for studying the multisensory basis of own-body perception</article-title>. <source>Front. Hum. Neurosci</source>. <volume>9</volume>:<fpage>141</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2015.00141</pub-id><pub-id pub-id-type="pmid">25852524</pub-id></citation></ref>
<ref id="B88">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Normand</surname> <given-names>J.-M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2012b</year>). <article-title>Extending body space in immersive virtual reality: a very long arm illusion</article-title>. <source>PLoS ONE</source> <volume>7</volume>:<fpage>e40867</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0040867</pub-id><pub-id pub-id-type="pmid">22829891</pub-id></citation></ref>
<ref id="B89">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kobbelt</surname> <given-names>L.</given-names></name> <name><surname>Botsch</surname> <given-names>M.</given-names></name></person-group> (<year>2004</year>). <article-title>A survey of point-based techniques in computer graphics</article-title>. <source>Comput. Graph</source>. <volume>28</volume>, <fpage>801</fpage>&#x02013;<lpage>814</lpage>. <pub-id pub-id-type="doi">10.1016/j.cag.2004.08.009</pub-id></citation></ref>
<ref id="B90">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kokkinara</surname> <given-names>E.</given-names></name> <name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Blom</surname> <given-names>K. J.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>First person perspective of seated participants over a walking virtual body leads to illusory agency over the walking</article-title>. <source>Sci. Rep</source>. <volume>6</volume>, <fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1038/srep28879</pub-id><pub-id pub-id-type="pmid">27364767</pub-id></citation></ref>
<ref id="B91">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kokkinara</surname> <given-names>E.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Measuring the effects through time of the influence of visuomotor and visuotactile synchronous stimulation on a virtual body ownership illusion</article-title>. <source>Perception</source> <volume>43</volume>, <fpage>43</fpage>&#x02013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1068/p7545</pub-id><pub-id pub-id-type="pmid">24689131</pub-id></citation></ref>
<ref id="B92">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>K&#x000F6;rding</surname> <given-names>K. P.</given-names></name> <name><surname>Beierholm</surname> <given-names>U.</given-names></name> <name><surname>Ma</surname> <given-names>W. J.</given-names></name> <name><surname>Quartz</surname> <given-names>S.</given-names></name> <name><surname>Tenenbaum</surname> <given-names>J. B.</given-names></name> <name><surname>Shams</surname> <given-names>L.</given-names></name></person-group> (<year>2007</year>). <article-title>Causal inference in multisensory perception</article-title>. <source>PLoS ONE</source> <volume>2</volume>:<fpage>e943</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0000943</pub-id><pub-id pub-id-type="pmid">17895984</pub-id></citation></ref>
<ref id="B93">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lanier</surname> <given-names>J.</given-names></name></person-group> (<year>1990</year>). <article-title>Riding the giant worm to saturn: post-symbolic communication in virtual reality</article-title>. <source>ARS Electron</source>. <volume>2</volume>, <fpage>186</fpage>&#x02013;<lpage>188</lpage>.</citation></ref>
<ref id="B94">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lanier</surname> <given-names>J.</given-names></name></person-group> (<year>2001</year>). <article-title>Virtually there</article-title>. <source>Sci. Am</source>. <volume>284</volume>, <fpage>66</fpage>&#x02013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1038/scientificamerican0401-66</pub-id><pub-id pub-id-type="pmid">11285824</pub-id></citation></ref>
<ref id="B95">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lanier</surname> <given-names>J.</given-names></name> <name><surname>Lasko-Harvill</surname> <given-names>A.</given-names></name> <name><surname>Blanchard</surname> <given-names>C.</given-names></name> <name><surname>Smithers</surname> <given-names>W.</given-names></name> <name><surname>Harvill</surname> <given-names>Y.</given-names></name> <name><surname>Coffman</surname> <given-names>A.</given-names></name></person-group> (<year>1988</year>). <article-title>From dataglove to datasuit</article-title>, in <source>Digest of Papers. COMPCON Spring 88 Thirty-Third IEEE Computer Society International Conference</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>536</fpage>&#x02013;<lpage>538</lpage>. <pub-id pub-id-type="doi">10.1109/CMPCON.1988.4925</pub-id></citation></ref>
<ref id="B96">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>Y.</given-names></name> <name><surname>Kim</surname> <given-names>S.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>Data-driven biped control</article-title>, in <source>ACM SIGGRAPH 2010 Papers</source> (<publisher-loc>Los Angeles, CA</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1145/1833349.1781155</pub-id></citation></ref>
<ref id="B97">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lempitsky</surname> <given-names>V.</given-names></name> <name><surname>Ivanov</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>Seamless mosaicing of image-based texture maps</article-title>, in <source>Computer Vision and Pattern Recognition</source> (<publisher-loc>Minneapolis, MN</publisher-loc>).</citation></ref>
<ref id="B98">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lenggenhager</surname> <given-names>B.</given-names></name> <name><surname>H&#x000E4;nsel</surname> <given-names>A.</given-names></name> <name><surname>von K&#x000E4;nel</surname> <given-names>R.</given-names></name> <name><surname>Curatolo</surname> <given-names>M.</given-names></name> <name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2010</year>). <source>Analgesic Effects of Illusory Self-Perception</source>.</citation></ref>
<ref id="B99">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lenggenhager</surname> <given-names>B.</given-names></name> <name><surname>Tadi</surname> <given-names>T.</given-names></name> <name><surname>Metzinger</surname> <given-names>T.</given-names></name> <name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2007</year>). <article-title>Video ergo sum: manipulating bodily self-consciousness</article-title>. <source>Science</source> <volume>317</volume>, <fpage>1096</fpage>&#x02013;<lpage>1099</lpage>. <pub-id pub-id-type="doi">10.1126/science.1143439</pub-id><pub-id pub-id-type="pmid">17717189</pub-id></citation></ref>
<ref id="B100">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lewis</surname> <given-names>J. P.</given-names></name> <name><surname>Anjyo</surname> <given-names>K.</given-names></name> <name><surname>Rhee</surname> <given-names>T.</given-names></name> <name><surname>Zhang</surname> <given-names>M.</given-names></name> <name><surname>Pighin</surname> <given-names>F. H.</given-names></name> <name><surname>Deng</surname> <given-names>Z.</given-names></name></person-group> (<year>2014</year>). <article-title>Practice and theory of blendshape facial models</article-title>. <source>Eurographics</source> <volume>1</volume>:<fpage>2</fpage>. <pub-id pub-id-type="doi">10.2312/egst.20141042</pub-id></citation></ref>
<ref id="B101">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Trutoiu</surname> <given-names>L.</given-names></name> <name><surname>Olszewski</surname> <given-names>K.</given-names></name> <name><surname>Wei</surname> <given-names>L.</given-names></name> <name><surname>Trutna</surname> <given-names>T.</given-names></name> <name><surname>Hsieh</surname> <given-names>P.-L.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Facial performance sensing head-mounted display</article-title>. <source>ACM Trans. Graph</source>. <volume>34</volume>:<fpage>47</fpage>. <pub-id pub-id-type="doi">10.1145/2766939</pub-id></citation></ref>
<ref id="B102">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>L.</given-names></name> <name><surname>Hodgins</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>Learning basketball dribbling skills using trajectory optimization and deep reinforcement learning</article-title>. <source>ACM Trans. Graph</source>. <volume>37</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1145/3197517.3201315</pub-id></citation></ref>
<ref id="B103">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Llobera</surname> <given-names>J.</given-names></name> <name><surname>Gonz&#x000E1;lez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Perez-Marcos</surname> <given-names>D.</given-names></name> <name><surname>Valls-Sol&#x000E9;</surname> <given-names>J.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2013a</year>). <article-title>Virtual reality for assessment of patients suffering chronic pain: a case study</article-title>. <source>Exp. Brain Res</source>. <volume>225</volume>, <fpage>105</fpage>&#x02013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-012-3352-9</pub-id><pub-id pub-id-type="pmid">23223781</pub-id></citation></ref>
<ref id="B104">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Llobera</surname> <given-names>J.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013b</year>). <article-title>The relationship between virtual body ownership and temperature sensitivity</article-title>. <source>J. R. Soc. Interface</source> <volume>10</volume>:<fpage>20130300</fpage>. <pub-id pub-id-type="doi">10.1098/rsif.2013.0300</pub-id><pub-id pub-id-type="pmid">23720537</pub-id></citation></ref>
<ref id="B105">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lombardi</surname> <given-names>S.</given-names></name> <name><surname>Saragih</surname> <given-names>J.</given-names></name> <name><surname>Simon</surname> <given-names>T.</given-names></name> <name><surname>Sheikh</surname> <given-names>Y.</given-names></name></person-group> (<year>2018</year>). <article-title>Deep appearance models for face rendering</article-title>. <source>ACM Trans. Graph</source>. <volume>37</volume>, <fpage>68:1</fpage>&#x02013;<lpage>68:13</lpage>. <pub-id pub-id-type="doi">10.1145/3197517.3201401</pub-id></citation></ref>
<ref id="B106">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lombardi</surname> <given-names>S.</given-names></name> <name><surname>Simon</surname> <given-names>T.</given-names></name> <name><surname>Saragih</surname> <given-names>J. M.</given-names></name> <name><surname>Schwartz</surname> <given-names>G.</given-names></name> <name><surname>Lehrmann</surname> <given-names>A. M.</given-names></name> <name><surname>Sheikh</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Neural volumes: Learning dynamic renderable volumes from images</article-title>. <source>CoRR</source> abs/1906.07751. <pub-id pub-id-type="doi">10.1145/3306346.3323020</pub-id></citation></ref>
<ref id="B107">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Longo</surname> <given-names>M. R.</given-names></name> <name><surname>Sch&#x000FC;&#x000FC;r</surname> <given-names>F.</given-names></name> <name><surname>Kammers</surname> <given-names>M. P.</given-names></name> <name><surname>Tsakiris</surname> <given-names>M.</given-names></name> <name><surname>Haggard</surname> <given-names>P.</given-names></name></person-group> (<year>2008</year>). <article-title>What is embodiment? A psychometric approach</article-title>. <source>Cognition</source> <volume>107</volume>, <fpage>978</fpage>&#x02013;<lpage>998</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2007.12.004</pub-id><pub-id pub-id-type="pmid">18262508</pub-id></citation></ref>
<ref id="B108">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Loop</surname> <given-names>C. T.</given-names></name> <name><surname>Cai</surname> <given-names>Q.</given-names></name> <name><surname>Orts-Escolano</surname> <given-names>S.</given-names></name> <name><surname>Chou</surname> <given-names>P. A.</given-names></name></person-group> (<year>2016</year>). <article-title>A closed-form bayesian fusion equation using occupancy probabilities</article-title>, in <source>Fourth International Conference on 3D Vision, 3DV 2016</source> (<publisher-loc>Stanford, CA</publisher-loc>: <publisher-name>IEEE Computer Society</publisher-name>), <fpage>380</fpage>&#x02013;<lpage>388</lpage>. <pub-id pub-id-type="doi">10.1109/3DV.2016.47</pub-id></citation></ref>
<ref id="B109">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>L&#x000F3;pez</surname> <given-names>A.</given-names></name> <name><surname>Chaumette</surname> <given-names>F.</given-names></name> <name><surname>Marchand</surname> <given-names>E.</given-names></name> <name><surname>Pettr&#x000E9;</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Character navigation in dynamic environments based on optical flow</article-title>. <source>Comput. Graph. Forum</source> <volume>38</volume>, <fpage>181</fpage>&#x02013;<lpage>192</lpage>. <pub-id pub-id-type="doi">10.1111/cgf.13629</pub-id></citation></ref>
<ref id="B110">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lou</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Nduka</surname> <given-names>C.</given-names></name> <name><surname>Hamedi</surname> <given-names>M.</given-names></name> <name><surname>Mavridou</surname> <given-names>I.</given-names></name> <name><surname>Wang</surname> <given-names>F.-Y.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Realistic facial expression reconstruction for VR HMD users</article-title>. <source>IEEE Trans. Multimed</source>. <volume>22</volume>, <fpage>730</fpage>&#x02013;<lpage>743</lpage>. <pub-id pub-id-type="doi">10.1109/TMM.2019.2933338</pub-id></citation></ref>
<ref id="B111">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luo</surname> <given-names>L.</given-names></name> <name><surname>Zhou</surname> <given-names>S.</given-names></name> <name><surname>Cai</surname> <given-names>W.</given-names></name> <name><surname>Low</surname> <given-names>M. Y. H.</given-names></name> <name><surname>Tian</surname> <given-names>F.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Agent-based human behavior modeling for crowd simulation</article-title>. <source>Comput. Anim. Virtual Worlds</source> <volume>19</volume>, <fpage>271</fpage>&#x02013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.1002/cav.238</pub-id></citation></ref>
<ref id="B112">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ma</surname> <given-names>Y.</given-names></name> <name><surname>Paterson</surname> <given-names>M. H.</given-names></name> <name><surname>Pollick</surname> <given-names>E.</given-names></name></person-group> (<year>2006</year>). <article-title>A motion capture library for the study of identity, gender, and emotion perception from biological motion</article-title>. <source>Behav. Res. Methods</source> <volume>38</volume>, <fpage>134</fpage>&#x02013;<lpage>141</lpage>. <pub-id pub-id-type="doi">10.3758/BF03192758</pub-id><pub-id pub-id-type="pmid">16817522</pub-id></citation></ref>
<ref id="B113">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>MacQuarrie</surname> <given-names>A.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Perception of volumetric characters&#x00027; eye-gaze direction in head-mounted displays</article-title>, in <source>2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)</source> (<publisher-loc>Osaka</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>645</fpage>&#x02013;<lpage>654</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2019.8797852</pub-id></citation></ref>
<ref id="B114">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maister</surname> <given-names>L.</given-names></name> <name><surname>Sebanz</surname> <given-names>N.</given-names></name> <name><surname>Knoblich</surname> <given-names>G.</given-names></name> <name><surname>Tsakiris</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Experiencing ownership over a dark-skinned body reduces implicit racial bias</article-title>. <source>Cognition</source> <volume>128</volume>, <fpage>170</fpage>&#x02013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2013.04.002</pub-id><pub-id pub-id-type="pmid">23680793</pub-id></citation></ref>
<ref id="B115">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maister</surname> <given-names>L.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Tsakiris</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Changing bodies changes minds: owning another body affects social cognition</article-title>. <source>Trends Cogn. Sci</source>. <volume>19</volume>, <fpage>6</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2014.11.001</pub-id><pub-id pub-id-type="pmid">25524273</pub-id></citation></ref>
<ref id="B116">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Makin</surname> <given-names>T. R.</given-names></name> <name><surname>Holmes</surname> <given-names>N. P.</given-names></name> <name><surname>Ehrsson</surname> <given-names>H. H.</given-names></name></person-group> (<year>2008</year>). <article-title>On the other hand: dummy hands and peripersonal space</article-title>. <source>Behav. Brain Res</source>. <volume>191</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1016/j.bbr.2008.02.041</pub-id><pub-id pub-id-type="pmid">18423906</pub-id></citation></ref>
<ref id="B117">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mancini</surname> <given-names>F.</given-names></name> <name><surname>Longo</surname> <given-names>M. R.</given-names></name> <name><surname>Kammers</surname> <given-names>M. P.</given-names></name> <name><surname>Haggard</surname> <given-names>P.</given-names></name></person-group> (<year>2011</year>). <article-title>Visual distortion of body size modulates pain perception</article-title>. <source>Psychol. Sci</source>. <volume>22</volume>, <fpage>325</fpage>&#x02013;<lpage>330</lpage>. <pub-id pub-id-type="doi">10.1177/0956797611398496</pub-id><pub-id pub-id-type="pmid">21303990</pub-id></citation></ref>
<ref id="B118">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martini</surname> <given-names>M.</given-names></name> <name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Maselli</surname> <given-names>A.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2015</year>). <article-title>The body fades away: investigating the effects of transparency of an embodied virtual body on pain threshold and body ownership</article-title>. <source>Sci. Rep</source>. <volume>5</volume>:<fpage>13948</fpage>. <pub-id pub-id-type="doi">10.1038/srep13948</pub-id><pub-id pub-id-type="pmid">26415748</pub-id></citation></ref>
<ref id="B119">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martini</surname> <given-names>M.</given-names></name> <name><surname>P&#x000E9;rez Marcos</surname> <given-names>D.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2013</year>). <article-title>What color is my arm? Changes in skin color of an embodied virtual arm modulates pain threshold</article-title>. <source>Front. Hum. Neurosci</source>. <volume>7</volume>:<fpage>438</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2013.00438</pub-id><pub-id pub-id-type="pmid">23914172</pub-id></citation></ref>
<ref id="B120">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martini</surname> <given-names>M.</given-names></name> <name><surname>Perez-Marcos</surname> <given-names>D.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2014</year>). <article-title>Modulation of pain threshold by virtual body ownership</article-title>. <source>Eur. J. Pain</source> <volume>18</volume>, <fpage>1040</fpage>&#x02013;<lpage>1048</lpage>. <pub-id pub-id-type="doi">10.1002/j.1532-2149.2014.00451.x</pub-id></citation></ref>
<ref id="B121">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maselli</surname> <given-names>A.</given-names></name> <name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>L&#x000F3;pez-Moliner</surname> <given-names>J.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>The sense of body ownership relaxes temporal constraints for multisensory integration</article-title>. <source>Sci. Rep</source>. <volume>6</volume>:<fpage>30628</fpage>. <pub-id pub-id-type="doi">10.1038/srep30628</pub-id></citation></ref>
<ref id="B122">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maselli</surname> <given-names>A.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>The building blocks of the full body ownership illusion</article-title>. <source>Front. Hum. Neurosci</source>. <volume>7</volume>:<fpage>83</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2013.00083</pub-id></citation></ref>
<ref id="B123">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maselli</surname> <given-names>A.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Sliding perspectives: dissociating ownership from self-location during full body illusions in virtual reality</article-title>. <source>Front. Hum. Neurosci</source>. <volume>8</volume>:<fpage>693</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2014.00693</pub-id></citation></ref>
<ref id="B124">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Matamala-Gomez</surname> <given-names>M.</given-names></name> <name><surname>Donegan</surname> <given-names>T.</given-names></name> <name><surname>Bottiroli</surname> <given-names>S.</given-names></name> <name><surname>Sandrini</surname> <given-names>G.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Tassorelli</surname> <given-names>C.</given-names></name></person-group> (<year>2019a</year>). <article-title>Immersive virtual reality and virtual embodiment for pain relief</article-title>. <source>Front. Hum. Neurosci</source>. <volume>13</volume>:<fpage>279</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2019.00279</pub-id></citation></ref>
<ref id="B125">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Matamala-Gomez</surname> <given-names>M.</given-names></name> <name><surname>Gonzalez</surname> <given-names>A. M. D.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2019b</year>). <article-title>Decreasing pain ratings in chronic arm pain through changing a virtual body: different strategies for different pain types</article-title>. <source>J. Pain</source> <volume>20</volume>, <fpage>685</fpage>&#x02013;<lpage>697</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpain.2018.12.001</pub-id><pub-id pub-id-type="pmid">30562584</pub-id></citation></ref>
<ref id="B126">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Matamala-Gomez</surname> <given-names>M.</given-names></name> <name><surname>Nierula</surname> <given-names>B.</given-names></name> <name><surname>Donegan</surname> <given-names>T.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2020</year>). <article-title>Manipulating the perceived shape and color of a virtual limb can modulate pain responses</article-title>. <source>J. Clin. Med</source>. <volume>9</volume>:<fpage>291</fpage>. <pub-id pub-id-type="doi">10.3390/jcm9020291</pub-id><pub-id pub-id-type="pmid">31973014</pub-id></citation></ref>
<ref id="B127">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mehta</surname> <given-names>D.</given-names></name> <name><surname>Sridhar</surname> <given-names>S.</given-names></name> <name><surname>Sotnychenko</surname> <given-names>O.</given-names></name> <name><surname>Rhodin</surname> <given-names>H.</given-names></name> <name><surname>Shafiei</surname> <given-names>M.</given-names></name> <name><surname>Seidel</surname> <given-names>H.-P.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Vnect: real-time 3D human pose estimation with a single rgb camera</article-title>. <source>ACM Trans. Graph</source>. <volume>36</volume>, <fpage>7291</fpage>&#x02013;<lpage>7299</lpage>. <pub-id pub-id-type="doi">10.1145/3072959.3073596</pub-id></citation></ref>
<ref id="B128">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moeslund</surname> <given-names>B. T.</given-names></name> <name><surname>Hilton</surname> <given-names>A.</given-names></name> <name><surname>Kr&#x000FC;gerc</surname> <given-names>V.</given-names></name></person-group> (<year>2006</year>). <article-title>A survey of advances in vision-based human motion capture and analysis</article-title>. <source>Comput. Visi. Image Understand</source>. <volume>104</volume>, <fpage>90</fpage>&#x02013;<lpage>126</lpage>. <pub-id pub-id-type="doi">10.1016/j.cviu.2006.08.002</pub-id></citation></ref>
<ref id="B129">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mohler</surname> <given-names>B. J.</given-names></name> <name><surname>Creem-Regehr</surname> <given-names>S. H.</given-names></name> <name><surname>Thompson</surname> <given-names>W. B.</given-names></name> <name><surname>B&#x000FC;lthoff</surname> <given-names>H. H.</given-names></name></person-group> (<year>2010</year>). <article-title>The effect of viewing a self-avatar on distance judgments in an hmd-based virtual environment</article-title>. <source>Presence Teleoper. Virtual Environ</source>. <volume>19</volume>, <fpage>230</fpage>&#x02013;<lpage>242</lpage>. <pub-id pub-id-type="doi">10.1162/pres.19.3.230</pub-id></citation></ref>
<ref id="B130">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>M&#x000F6;lbert</surname> <given-names>S. C.</given-names></name> <name><surname>Thaler</surname> <given-names>A.</given-names></name> <name><surname>Mohler</surname> <given-names>B. J.</given-names></name> <name><surname>Streuber</surname> <given-names>S.</given-names></name> <name><surname>Romero</surname> <given-names>J.</given-names></name> <name><surname>Black</surname> <given-names>M. J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Assessing body image in anorexia nervosa using biometric self-avatars in virtual reality: attitudinal components rather than visual body size estimation are distorted</article-title>. <source>Psychol. Med</source>. <volume>48</volume>, <fpage>642</fpage>&#x02013;<lpage>653</lpage>. <pub-id pub-id-type="doi">10.1017/S0033291717002008</pub-id></citation></ref>
<ref id="B131">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname> <given-names>M.</given-names></name></person-group> (<year>1970</year>). <article-title>The uncanny valley</article-title>. <source>Energy</source> <volume>7</volume>, <fpage>33</fpage>&#x02013;<lpage>35</lpage>.</citation></ref>
<ref id="B132">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Narang</surname> <given-names>S.</given-names></name> <name><surname>Best</surname> <given-names>A.</given-names></name> <name><surname>Randhavane</surname> <given-names>T.</given-names></name> <name><surname>Shapiro</surname> <given-names>A.</given-names></name> <name><surname>Manocha</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Pedvr: simulating gaze-based interactions between a real user and virtual crowds</article-title>, in <source>Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology</source> (<publisher-loc>Munich</publisher-loc>), <fpage>91</fpage>&#x02013;<lpage>100</lpage>. <pub-id pub-id-type="doi">10.1145/2993369.2993378</pub-id></citation></ref>
<ref id="B133">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neyret</surname> <given-names>S.</given-names></name> <name><surname>Navarro</surname> <given-names>X.</given-names></name> <name><surname>Beacco</surname> <given-names>A.</given-names></name> <name><surname>Oliva</surname> <given-names>R.</given-names></name> <name><surname>Bourdin</surname> <given-names>P.</given-names></name> <name><surname>Valenzuela</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>An embodied perspective as a victim of sexual harassment in virtual reality reduces action conformity in a later milgram obedience scenario</article-title>. <source>Sci. Rep</source>. <volume>10</volume>, <fpage>1</fpage>&#x02013;<lpage>18</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-62932-w</pub-id><pub-id pub-id-type="pmid">32277079</pub-id></citation></ref>
<ref id="B134">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nierula</surname> <given-names>B.</given-names></name> <name><surname>Martini</surname> <given-names>M.</given-names></name> <name><surname>Matamala-Gomez</surname> <given-names>M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2017</year>). <article-title>Seeing an embodied virtual hand is analgesic contingent on colocation</article-title>. <source>J. Pain</source> <volume>18</volume>, <fpage>645</fpage>&#x02013;<lpage>655</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpain.2017.01.003</pub-id><pub-id pub-id-type="pmid">28108385</pub-id></citation></ref>
<ref id="B135">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nierula</surname> <given-names>B.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Martini</surname> <given-names>M.</given-names></name> <name><surname>Borrell</surname> <given-names>M.</given-names></name> <name><surname>Nikulin</surname> <given-names>V. V.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2019</year>). <article-title>Agency and responsibility over virtual movements controlled through different paradigms of brain-computer interface</article-title>. <source>J. Physiol</source>. <volume>1</volume>, <fpage>1</fpage>&#x02013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1113/JP278167</pub-id><pub-id pub-id-type="pmid">31647122</pub-id></citation></ref>
<ref id="B136">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Olivier</surname> <given-names>A.-H.</given-names></name> <name><surname>Bruneau</surname> <given-names>J.</given-names></name> <name><surname>Cirio</surname> <given-names>G.</given-names></name> <name><surname>Pettr&#x000E9;</surname> <given-names>J.</given-names></name></person-group> (<year>2014</year>). <article-title>A virtual reality platform to study crowd behaviors</article-title>. <source>Transport. Res. Proc</source>. <volume>2</volume>, <fpage>114</fpage>&#x02013;<lpage>122</lpage>. <pub-id pub-id-type="doi">10.1016/j.trpro.2014.09.015</pub-id></citation></ref>
<ref id="B137">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Orts-Escolano</surname> <given-names>S.</given-names></name> <name><surname>Rhemann</surname> <given-names>C.</given-names></name> <name><surname>Fanello</surname> <given-names>S.</given-names></name> <name><surname>Chang</surname> <given-names>W.</given-names></name> <name><surname>Kowdle</surname> <given-names>A.</given-names></name> <name><surname>Degtyarev</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Holoportation: virtual 3D teleportation in real-time</article-title>, in <source>Proceedings of the 29th Annual Symposium on User Interface Software and Technology</source> (<publisher-loc>Tokyo</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>741</fpage>&#x02013;<lpage>754</lpage>. <pub-id pub-id-type="doi">10.1145/2984511.2984517</pub-id></citation></ref>
<ref id="B138">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Orvalho</surname> <given-names>V.</given-names></name> <name><surname>Bastos</surname> <given-names>P.</given-names></name> <name><surname>Parke</surname> <given-names>F. I.</given-names></name> <name><surname>Oliveira</surname> <given-names>B.</given-names></name> <name><surname>Alvarez</surname> <given-names>X.</given-names></name></person-group> (<year>2012</year>). <article-title>A facial rigging survey</article-title>, in <source>Eurographics (STARs)</source> (<publisher-loc>Cagliari</publisher-loc>), <fpage>183</fpage>&#x02013;<lpage>204</lpage>.</citation></ref>
<ref id="B139">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Osimo</surname> <given-names>S. A.</given-names></name> <name><surname>Pizarro</surname> <given-names>R.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Conversations between self and self as sigmund freud-a virtual body ownership paradigm for self counselling</article-title>. <source>Sci. Rep</source>. <volume>5</volume>:<fpage>13899</fpage>. <pub-id pub-id-type="doi">10.1038/srep13899</pub-id><pub-id pub-id-type="pmid">26354311</pub-id></citation></ref>
<ref id="B140">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Overbeck</surname> <given-names>R. S.</given-names></name> <name><surname>Erickson</surname> <given-names>D.</given-names></name> <name><surname>Evangelakos</surname> <given-names>D.</given-names></name> <name><surname>Pharr</surname> <given-names>M.</given-names></name> <name><surname>Debevec</surname> <given-names>P.</given-names></name></person-group> (<year>2018</year>). <article-title>A system for acquiring, processing, and rendering panoramic light field stills for virtual reality</article-title>. <source>ACM Trans. Graph</source>. <volume>37</volume>:<fpage>197</fpage>. <pub-id pub-id-type="doi">10.1145/3272127.3275031</pub-id></citation></ref>
<ref id="B141">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Padrao</surname> <given-names>G.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Rodriguez-Fornells</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Violating body movement semantics: neural signatures of self-generated and external-generated errors</article-title>. <source>Neuroimage</source> <volume>124</volume>, <fpage>147</fpage>&#x02013;<lpage>156</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.08.022</pub-id><pub-id pub-id-type="pmid">26282856</pub-id></citation></ref>
<ref id="B142">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Gillies</surname> <given-names>M.</given-names></name> <name><surname>Barker</surname> <given-names>C.</given-names></name> <name><surname>Clark</surname> <given-names>D. M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2012</year>). <article-title>Socially anxious and confident men interact with a forward virtual woman: an experimental study</article-title>. <source>PLoS ONE</source> <volume>7</volume>:<fpage>e32931</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0032931</pub-id></citation></ref>
<ref id="B143">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Confronting a moral dilemma in virtual reality: a pilot study</article-title>, in <source>Proceedings of HCI 2011 The 25th BCS Conference on Human Computer Interaction</source> (<publisher-loc>Newcastle</publisher-loc>), <fpage>46</fpage>&#x02013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.14236/ewic/HCI2011.26</pub-id></citation></ref>
<ref id="B144">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Beacco</surname> <given-names>A.</given-names></name> <name><surname>Navarro</surname> <given-names>X.</given-names></name> <name><surname>Bellido Rivas</surname> <given-names>A. I.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>The responses of medical general practitioners to unreasonable patient demand for antibiotics&#x02013;a study of medical ethics using immersive virtual reality</article-title>. <source>PLoS ONE</source> <volume>11</volume>:<fpage>e146837</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0146837</pub-id><pub-id pub-id-type="pmid">26889676</pub-id></citation></ref>
<ref id="B145">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Mitchell</surname> <given-names>K.</given-names></name></person-group> (<year>2020</year>). <article-title>PoseMMR: a collaborative mixed reality authoring tool for character animation</article-title>, in <source>IEEE Virtual Reality</source> (<publisher-loc>Atlanta, GA</publisher-loc>). <pub-id pub-id-type="doi">10.1109/VRW50115.2020.00230</pub-id></citation></ref>
<ref id="B146">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Steptoe</surname> <given-names>W.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Comparing flat and spherical displays in a trust scenario in avatar-mediated interaction</article-title>, in <source>Proceedings of the 32nd Annual ACM Conference on Human Factors in Computing Systems</source> (<publisher-loc>ACM</publisher-loc>), <fpage>1397</fpage>&#x02013;<lpage>1406</lpage>. <pub-id pub-id-type="doi">10.1145/2556288.2557276</pub-id></citation></ref>
<ref id="B147">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pandey</surname> <given-names>R.</given-names></name> <name><surname>Tkach</surname> <given-names>A.</given-names></name> <name><surname>Yang</surname> <given-names>S.</given-names></name> <name><surname>Pidlypenskyi</surname> <given-names>P.</given-names></name> <name><surname>Taylor</surname> <given-names>J.</given-names></name> <name><surname>Martin-Brualla</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Volumetric capture of humans with a single RGBD camera via semi-parametric learning</article-title>. <source>CoRR</source> abs/1905.12162. <pub-id pub-id-type="doi">10.1109/CVPR.2019.00994</pub-id></citation></ref>
<ref id="B148">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Parger</surname> <given-names>M.</given-names></name> <name><surname>Mueller</surname> <given-names>J. H.</given-names></name> <name><surname>Schmalstieg</surname> <given-names>D.</given-names></name> <name><surname>Steinberger</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Human upper-body inverse kinematics for increased embodiment in consumer-grade virtual reality</article-title>, in <source>Proceedings of the 24th ACM Symposium on Virtual Reality Software and Technology</source> (<publisher-loc>Tokyo</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1145/3281505.3281529</pub-id></citation></ref>
<ref id="B149">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peck</surname> <given-names>T. C.</given-names></name> <name><surname>Seinfeld</surname> <given-names>S.</given-names></name> <name><surname>Aglioti</surname> <given-names>S. M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Putting yourself in the skin of a black avatar reduces implicit racial bias</article-title>. <source>Conscious. Cogn</source>. <volume>22</volume>, <fpage>779</fpage>&#x02013;<lpage>787</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2013.04.016</pub-id><pub-id pub-id-type="pmid">23727712</pub-id></citation></ref>
<ref id="B150">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pelechano</surname> <given-names>N.</given-names></name> <name><surname>Allbeck</surname> <given-names>J. M.</given-names></name> <name><surname>Badler</surname> <given-names>N. I.</given-names></name></person-group> (<year>2007</year>). <article-title>Controlling individual agents in high-density crowd simulation</article-title>, in <source>Proceedings of the 2007 ACM SIGGRAPH/Eurographics Symposium on Computer Animation</source> (<publisher-loc>Prague</publisher-loc>: <publisher-name>Eurographics Association</publisher-name>), <fpage>99</fpage>&#x02013;<lpage>108</lpage>.</citation></ref>
<ref id="B151">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pelechano</surname> <given-names>N.</given-names></name> <name><surname>Allbeck</surname> <given-names>J. M.</given-names></name> <name><surname>Kapadia</surname> <given-names>M.</given-names></name> <name><surname>Badler</surname> <given-names>N. I.</given-names></name></person-group> (<year>2016</year>). <source>Simulating Heterogeneous Crowds With Interactive Behaviors</source>. <publisher-name>CRC Press</publisher-name>.</citation></ref>
<ref id="B152">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelechano</surname> <given-names>N.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Beacco</surname> <given-names>A.</given-names></name></person-group> (<year>2011</year>). <article-title>Avatar locomotion in crowd simulation</article-title>. <source>Int. J. Virtual Reality</source> <volume>10</volume>, <fpage>13</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.20870/IJVR.2011.10.1.2796</pub-id></citation></ref>
<ref id="B153">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>X. B.</given-names></name> <name><surname>Abbeel</surname> <given-names>P.</given-names></name> <name><surname>Levine</surname> <given-names>S.</given-names></name> <name><surname>van de Panne</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Deepmimic: example-guided deep reinforcement learning of physics-based character skills</article-title>. <source>ACM Trans. Graph</source>. <volume>37</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1145/3197517.3201311</pub-id></citation></ref>
<ref id="B154">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pertaub</surname> <given-names>D.-P.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Barker</surname> <given-names>C.</given-names></name></person-group> (<year>2002</year>). <article-title>An experiment on public speaking anxiety in response to three different types of virtual audience</article-title>. <source>Presence Teleoper. Virtual Environ</source>. <volume>11</volume>, <fpage>68</fpage>&#x02013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1162/105474602317343668</pub-id></citation></ref>
<ref id="B155">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petkova</surname> <given-names>V. I.</given-names></name> <name><surname>Ehrsson</surname> <given-names>H. H.</given-names></name></person-group> (<year>2008</year>). <article-title>If i were you: perceptual illusion of body swapping</article-title>. <source>PLoS ONE</source> <volume>3</volume>:<fpage>e3832</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0003832</pub-id><pub-id pub-id-type="pmid">19050755</pub-id></citation></ref>
<ref id="B156">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petkova</surname> <given-names>V. I.</given-names></name> <name><surname>Khoshnevis</surname> <given-names>M.</given-names></name> <name><surname>Ehrsson</surname> <given-names>H. H.</given-names></name></person-group> (<year>2011</year>). <article-title>The perspective matters! Multisensory integration in ego-centric reference frames determines full-body ownership</article-title>. <source>Front. Psychol</source>. <volume>2</volume>:<fpage>35</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2011.00035</pub-id><pub-id pub-id-type="pmid">21687436</pub-id></citation></ref>
<ref id="B157">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Phillips</surname> <given-names>L.</given-names></name> <name><surname>Ries</surname> <given-names>B.</given-names></name> <name><surname>Kaeding</surname> <given-names>M.</given-names></name> <name><surname>Interrante</surname> <given-names>V.</given-names></name></person-group> (<year>2010</year>). <article-title>Avatar self-embodiment enhances distance perception accuracy in non-photorealistic immersive virtual environments</article-title>, in <source>2010 IEEE Virtual Reality Conference (VR)</source> (<publisher-loc>Waltham, MA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>115</fpage>&#x02013;<lpage>1148</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2010.5444802</pub-id></citation></ref>
<ref id="B158">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Piryankova</surname> <given-names>I. V.</given-names></name> <name><surname>Wong</surname> <given-names>H. Y.</given-names></name> <name><surname>Linkenauger</surname> <given-names>S. A.</given-names></name> <name><surname>Stinson</surname> <given-names>C.</given-names></name> <name><surname>Longo</surname> <given-names>M. R.</given-names></name> <name><surname>B&#x000FC;lthoff</surname> <given-names>H. H.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Owning an overweight or underweight body: distinguishing the physical, experienced and virtual body</article-title>. <source>PLoS ONE</source> <volume>9</volume>:<fpage>e103428</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0103428</pub-id><pub-id pub-id-type="pmid">25083784</pub-id></citation></ref>
<ref id="B159">
<citation citation-type="journal"><person-group person-group-type="author"><collab>PoseVR</collab></person-group> (<year>2019</year>). <source>Walt Disney Animation Studios</source>. PoseVR.</citation></ref>
<ref id="B160">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pujades</surname> <given-names>S.</given-names></name> <name><surname>Mohler</surname> <given-names>B.</given-names></name> <name><surname>Thaler</surname> <given-names>A.</given-names></name> <name><surname>Tesch</surname> <given-names>J.</given-names></name> <name><surname>Mahmood</surname> <given-names>N.</given-names></name> <name><surname>Hesse</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The virtual caliper: rapid creation of metrically accurate avatars from 3d measurements</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>25</volume>, <fpage>1887</fpage>&#x02013;<lpage>1897</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2019.2898748</pub-id><pub-id pub-id-type="pmid">30794512</pub-id></citation></ref>
<ref id="B161">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>R&#x000ED;os</surname> <given-names>A.</given-names></name> <name><surname>Palomar</surname> <given-names>M.</given-names></name> <name><surname>Pelechano</surname> <given-names>N.</given-names></name></person-group> (<year>2018</year>). <article-title>Users&#x00027; locomotor behavior in collaborative virtual reality</article-title>, in <source>Proceedings of the 11th Annual International Conference on Motion, Interaction, and Games</source> (<publisher-loc>Limassol</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1145/3274247.3274513</pub-id></citation></ref>
<ref id="B162">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>R&#x000ED;os</surname> <given-names>A.</given-names></name> <name><surname>Pelechano</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Follower behavior under stress in immersive VR</article-title>. <source>Virtual Reality</source> <volume>1</volume>, <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1007/s10055-020-00428-8</pub-id></citation></ref>
<ref id="B163">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Roth</surname> <given-names>D.</given-names></name> <name><surname>Lugrin</surname> <given-names>J.</given-names></name> <name><surname>von Mammen</surname> <given-names>S.</given-names></name> <name><surname>Latoschik</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Controllers &#x00026; inputs: masters of puppets</article-title>, in <source>Avatar, Assembled: The Social and Technical Anatomy of Digital Bodies</source>, eds <person-group person-group-type="editor"><name><surname>Bank</surname> <given-names>J.</given-names></name></person-group> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Peter Lang</publisher-name>), <fpage>281</fpage>&#x02013;<lpage>290</lpage>.</citation></ref>
<ref id="B164">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Roth</surname> <given-names>D.</given-names></name> <name><surname>Lugrin</surname> <given-names>J.-L.</given-names></name> <name><surname>B&#x000FC;ser</surname> <given-names>J.</given-names></name> <name><surname>Bente</surname> <given-names>G.</given-names></name> <name><surname>Fuhrmann</surname> <given-names>A.</given-names></name> <name><surname>Latoschik</surname> <given-names>M. E.</given-names></name></person-group> (<year>2016</year>). <article-title>A simplified inverse kinematic approach for embodied VR applications</article-title>, in <source>2016 IEEE Virtual Reality (VR)</source> (<publisher-loc>Greenville, SC:IEEE</publisher-loc>), <fpage>275</fpage>&#x02013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2016.7504760</pub-id></citation></ref>
<ref id="B165">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Roth</surname> <given-names>D.</given-names></name> <name><surname>Stauffert</surname> <given-names>J.-P.</given-names></name> <name><surname>Latoschik</surname> <given-names>M. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Avatar embodiment, behavior replication, and kinematics in virtual reality</article-title>, in <source>VR Developer Gems</source> (<publisher-loc>AK Peters; CRC Press</publisher-loc>), <fpage>321</fpage>&#x02013;<lpage>346</lpage>. <pub-id pub-id-type="doi">10.1201/b21598-17</pub-id></citation></ref>
<ref id="B166">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rovira</surname> <given-names>A.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2009</year>). <article-title>The use of virtual reality in the study of people&#x00027;s responses to violent incidents</article-title>. <source>Front. Behav. Neurosci</source>. <volume>3</volume>:<fpage>59</fpage>. <pub-id pub-id-type="doi">10.3389/neuro.08.059.2009</pub-id><pub-id pub-id-type="pmid">20076762</pub-id></citation></ref>
<ref id="B167">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saito</surname> <given-names>S.</given-names></name> <name><surname>Huang</surname> <given-names>Z.</given-names></name> <name><surname>Natsume</surname> <given-names>R.</given-names></name> <name><surname>Morishima</surname> <given-names>S.</given-names></name> <name><surname>Kanazawa</surname> <given-names>A.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>Pifu: pixel-aligned implicit function for high-resolution clothed human digitization</article-title>, in <source>Proceedings of the IEEE International Conference on Computer Vision</source>, <fpage>2304</fpage>&#x02013;<lpage>2314</lpage>. <pub-id pub-id-type="doi">10.1109/ICCV.2019.00239</pub-id></citation></ref>
<ref id="B168">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Saito</surname> <given-names>S.</given-names></name> <name><surname>Wei</surname> <given-names>L.</given-names></name> <name><surname>Fursund</surname> <given-names>J.</given-names></name> <name><surname>Hu</surname> <given-names>L.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name> <name><surname>Yu</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Pinscreen: 3D avatar from a single image</article-title>, in <source>SIGGRAPH Asia Emerging Technologies</source> (<publisher-loc>Seoul</publisher-loc>). <pub-id pub-id-type="doi">10.1145/2988240.3014572</pub-id></citation></ref>
<ref id="B169">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Salmanowitz</surname> <given-names>N.</given-names></name></person-group> (<year>2018</year>). <article-title>The impact of virtual reality on implicit racial bias and mock legal decisions</article-title>. <source>J. Law Biosci</source>. <volume>5</volume>, <fpage>174</fpage>&#x02013;<lpage>203</lpage>. <pub-id pub-id-type="doi">10.1093/jlb/lsy005</pub-id></citation></ref>
<ref id="B170">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Samad</surname> <given-names>M.</given-names></name> <name><surname>Chung</surname> <given-names>A. J.</given-names></name> <name><surname>Shams</surname> <given-names>L.</given-names></name></person-group> (<year>2015</year>). <article-title>Perception of body ownership is driven by bayesian sensory inference</article-title>. <source>PLoS ONE</source> <volume>10</volume>:<fpage>e117178</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0117178</pub-id><pub-id pub-id-type="pmid">25658822</pub-id></citation></ref>
<ref id="B171">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2005</year>). <article-title>From presence to consciousness through virtual reality</article-title>. <source>Nat. Rev. Neurosci</source>. <volume>6</volume>:<fpage>332</fpage>. <pub-id pub-id-type="doi">10.1038/nrn1651</pub-id></citation></ref>
<ref id="B172">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Frisoli</surname> <given-names>A.</given-names></name> <name><surname>Bergamasco</surname> <given-names>M.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Virtual hand illusion induced by visuomotor correlations</article-title>. <source>PLoS ONE</source> <volume>5</volume>:<fpage>e10381</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0010381</pub-id><pub-id pub-id-type="pmid">20454463</pub-id></citation></ref>
<ref id="B173">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Schroeder</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). <source>The Social Life of Avatars: Presence and Interaction in Shared Virtual Environments</source>. <publisher-name>Springer Science &#x00026; Business Media</publisher-name>.</citation></ref>
<ref id="B174">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seinfeld</surname> <given-names>S.</given-names></name> <name><surname>Arroyo-Palacios</surname> <given-names>J.</given-names></name> <name><surname>Iruretagoyena</surname> <given-names>G.</given-names></name> <name><surname>Hortensius</surname> <given-names>R.</given-names></name> <name><surname>Zapata</surname> <given-names>L.</given-names></name> <name><surname>Borland</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Offenders become the victim in virtual reality: impact of changing perspective in domestic violence</article-title>. <source>Sci. Rep</source>. <volume>8</volume>:<fpage>2692</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-19987-7</pub-id><pub-id pub-id-type="pmid">29426819</pub-id></citation></ref>
<ref id="B175">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Serino</surname> <given-names>A.</given-names></name> <name><surname>Farn&#x000E9;</surname> <given-names>A.</given-names></name> <name><surname>L&#x000E1;davas</surname> <given-names>E.</given-names></name></person-group> (<year>2006</year>). <article-title>Visual peripersonal space</article-title>. <source>Adv. Conscious. Res</source>. <volume>66</volume>:<fpage>323</fpage>. <pub-id pub-id-type="doi">10.1075/aicr.66.24ser</pub-id></citation></ref>
<ref id="B176">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shams</surname> <given-names>L.</given-names></name> <name><surname>Beierholm</surname> <given-names>U. R.</given-names></name></person-group> (<year>2010</year>). <article-title>Causal inference in perception</article-title>. <source>Trends Cogn. Sci</source>. <volume>14</volume>, <fpage>425</fpage>&#x02013;<lpage>432</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2010.07.001</pub-id><pub-id pub-id-type="pmid">20705502</pub-id></citation></ref>
<ref id="B177">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Shiratori</surname> <given-names>T.</given-names></name> <name><surname>Park</surname> <given-names>H. S.</given-names></name> <name><surname>Sigal</surname> <given-names>L.</given-names></name> <name><surname>Sheikh</surname> <given-names>Y.</given-names></name> <name><surname>Hodgins</surname> <given-names>J. K.</given-names></name></person-group> (<year>2011</year>). <article-title>Motion capture from body-mounted cameras</article-title>, in <source>ACM SIGGRAPH 2011 Papers</source> (<publisher-loc>Vancouver, BC</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1145/2010324.1964926</pub-id></citation></ref>
<ref id="B178">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Shysheya</surname> <given-names>A.</given-names></name> <name><surname>Zakharov</surname> <given-names>E.</given-names></name> <name><surname>Aliev</surname> <given-names>K.-A.</given-names></name> <name><surname>Bashirov</surname> <given-names>R.</given-names></name> <name><surname>Burkov</surname> <given-names>E.</given-names></name> <name><surname>Iskakov</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Textured neural avatars</article-title>, in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Long Beach, CA</publisher-loc>), <fpage>2387</fpage>&#x02013;<lpage>2397</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2019.00249</pub-id></citation></ref>
<ref id="B179">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2009</year>). <article-title>Place illusion and plausibility can lead to realistic behaviour in immersive virtual environments</article-title>. <source>Philos. Trans. R. Soc. B Biol. Sci</source>. <volume>364</volume>, <fpage>3549</fpage>&#x02013;<lpage>3557</lpage>. <pub-id pub-id-type="doi">10.1098/rstb.2009.0138</pub-id><pub-id pub-id-type="pmid">19884149</pub-id></citation></ref>
<ref id="B180">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Antley</surname> <given-names>A.</given-names></name> <name><surname>Davison</surname> <given-names>A.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name> <name><surname>Guger</surname> <given-names>C.</given-names></name> <name><surname>Barker</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2006</year>). <article-title>A virtual reprise of the stanley milgram obedience experiments</article-title>. <source>PLoS ONE</source> <volume>1</volume>:<fpage>e39</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0000039</pub-id></citation></ref>
<ref id="B181">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Navarro</surname> <given-names>X.</given-names></name> <name><surname>Valenzuela</surname> <given-names>J.</given-names></name> <name><surname>Oliva</surname> <given-names>R.</given-names></name> <name><surname>Beacco</surname> <given-names>A.</given-names></name> <name><surname>Thorn</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Virtually being lenin enhances presence and engagement in a scene from the russian revolution</article-title>. <source>Front. Robot. AI</source> <volume>5</volume>:<fpage>91</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00091</pub-id></citation></ref>
<ref id="B182">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Neyret</surname> <given-names>S.</given-names></name> <name><surname>Johnston</surname> <given-names>T.</given-names></name> <name><surname>Iruretagoyena</surname> <given-names>G.</given-names></name> <name><surname>de la Campa Crespo</surname> <given-names>M. &#x000C1;.</given-names></name> <name><surname>Alab&#x000E9;rnia-Segura</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>An experimental study of a virtual reality counselling paradigm using embodied self-dialogue</article-title>. <source>Sci. Rep</source>. <volume>9</volume>, <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-46877-3</pub-id><pub-id pub-id-type="pmid">31358846</pub-id></citation></ref>
<ref id="B183">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>P&#x000E9;rez Marcos</surname> <given-names>D.</given-names></name> <name><surname>Ehrsson</surname> <given-names>H.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2009</year>). <article-title>Inducing illusory ownership of a virtual body</article-title>. <source>Front. Neurosci</source>. <volume>3</volume>:<fpage>29</fpage>. <pub-id pub-id-type="doi">10.3389/neuro.01.029.2009</pub-id><pub-id pub-id-type="pmid">20011144</pub-id></citation></ref>
<ref id="B184">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Rovira</surname> <given-names>A.</given-names></name> <name><surname>Southern</surname> <given-names>R.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>J. J.</given-names></name> <name><surname>Campbell</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Bystander responses to a violent incident in an immersive virtual environment</article-title>. <source>PLoS ONE</source> <volume>8</volume>:<fpage>e52766</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0052766</pub-id><pub-id pub-id-type="pmid">23300991</pub-id></citation></ref>
<ref id="B185">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name></person-group> (<year>2016</year>). <article-title>Enhancing our lives with immersive virtual reality</article-title>. <source>Front. Robot. AI</source> <volume>3</volume>:<fpage>74</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2016.00074</pub-id></citation></ref>
<ref id="B186">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Corominas</surname> <given-names>D.</given-names></name></person-group> (<year>2010a</year>). <article-title>Simulating virtual environments within virtual environments as the basis for a psychophysics of presence</article-title>. <source>ACM Trans. Graph</source>. <volume>29</volume>, <fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1145/1778765.1778829</pub-id></citation></ref>
<ref id="B187">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2010b</year>). <article-title>First person experience of body transfer in virtual reality</article-title>. <source>PLoS ONE</source> <volume>5</volume>:<fpage>e10564</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0010564</pub-id><pub-id pub-id-type="pmid">20485681</pub-id></citation></ref>
<ref id="B188">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Navarro</surname> <given-names>X.</given-names></name> <name><surname>Normand</surname> <given-names>J.-M.</given-names></name> <name><surname>Kishore</surname> <given-names>S.</given-names></name> <name><surname>Pizarro</surname> <given-names>R.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Real time whole body motion mapping for avatars and robots</article-title>, in <source>Proceedings of the 19th ACM Symposium on Virtual Reality Software and Technology</source> (<publisher-loc>Singapore</publisher-loc>), <fpage>175</fpage>&#x02013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1145/2503713.2503747</pub-id></citation></ref>
<ref id="B189">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Normand</surname> <given-names>J.-M.</given-names></name> <name><surname>Borland</surname> <given-names>D.</given-names></name> <name><surname>Kilteni</surname> <given-names>K.</given-names></name> <name><surname>Giannopoulos</surname> <given-names>E.</given-names></name> <name><surname>Pom&#x000E9;s</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>How to build an embodiment lab: achieving body representation illusions in virtual reality</article-title>. <source>Front. Robot. AI</source> <volume>1</volume>:<fpage>9</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2014.00009</pub-id></citation></ref>
<ref id="B190">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Steed</surname> <given-names>A.</given-names></name> <name><surname>Frlston</surname> <given-names>S.</given-names></name> <name><surname>Lopez</surname> <given-names>M. M.</given-names></name> <name><surname>Drummond</surname> <given-names>J.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Swapp</surname> <given-names>D.</given-names></name></person-group> (<year>2016a</year>). <article-title>An &#x02018;in the wild&#x00027; experiment on presence and embodiment using consumer virtual reality equipment</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>22</volume>, <fpage>1406</fpage>&#x02013;<lpage>1414</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2016.2518135</pub-id><pub-id pub-id-type="pmid">26780804</pub-id></citation></ref>
<ref id="B191">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Steed</surname> <given-names>A.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Zisch</surname> <given-names>F.</given-names></name> <name><surname>Steptoe</surname> <given-names>W.</given-names></name></person-group> (<year>2016b</year>). <article-title>The impact of a self-avatar on cognitive load in immersive virtual reality</article-title>, in <source>2016 IEEE Virtual Reality (VR)</source> (<publisher-loc>Greenville, SC</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>67</fpage>&#x02013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2016.7504689</pub-id><pub-id pub-id-type="pmid">29240837</pub-id></citation></ref>
<ref id="B192">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Steve</surname> <given-names>S.</given-names></name> <name><surname>Curless</surname> <given-names>B. J. D.</given-names></name> <name><surname>Rick</surname> <given-names>S. D. S.</given-names></name></person-group> (<year>2006</year>). <article-title>A comparison and evaluation of multi-view stereo reconstruction algorithms</article-title>, in <source>Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>New York, NY</publisher-loc>).</citation></ref>
<ref id="B193">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tajadura-Jim&#x000E9;nez</surname> <given-names>A.</given-names></name> <name><surname>Banakou</surname> <given-names>D.</given-names></name> <name><surname>Bianchi-Berthouze</surname> <given-names>N.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Embodiment in a child-like talking virtual body influences object size perception, self-identification, and subsequent real speaking</article-title>. <source>Sci. Rep</source>. <volume>7</volume>:<fpage>9637</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-09497-3</pub-id></citation></ref>
<ref id="B194">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Thalmann</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <source>Crowd Simulation</source>. <publisher-name>Wiley Encyclopedia of Computer Science and Engineering</publisher-name>. <pub-id pub-id-type="doi">10.1002/9780470050118.ecse676</pub-id></citation></ref>
<ref id="B195">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Thorn</surname> <given-names>J.</given-names></name> <name><surname>Pizarro</surname> <given-names>R.</given-names></name> <name><surname>Spanlang</surname> <given-names>B.</given-names></name> <name><surname>Bermell-Garcia</surname> <given-names>P.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>Assessing 3D scan quality through paired-comparisons psychophysics</article-title>, in <source>Proceedings of the 24th ACM International Conference on Multimedia, MM &#x00027;16</source> (<publisher-loc>Amsterdam</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>147</fpage>&#x02013;<lpage>151</lpage>. <pub-id pub-id-type="doi">10.1145/2964284.2967200</pub-id></citation></ref>
<ref id="B196">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Van den Berg</surname> <given-names>J.</given-names></name> <name><surname>Lin</surname> <given-names>M.</given-names></name> <name><surname>Manocha</surname> <given-names>D.</given-names></name></person-group> (<year>2008</year>). <article-title>Reciprocal velocity obstacles for real-time multi-agent navigation</article-title>, in <source>2008 IEEE International Conference on Robotics and Automation</source> (<publisher-loc>Pasadena, CA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1928</fpage>&#x02013;<lpage>1935</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2008.4543489</pub-id></citation></ref>
<ref id="B197">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Vinayagamoorthy</surname> <given-names>V.</given-names></name> <name><surname>Gillies</surname> <given-names>M.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name> <name><surname>Tanguy</surname> <given-names>E.</given-names></name> <name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Loscos</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2006</year>). <article-title>Building expression into virtual characters</article-title>, in <source>Eurographics</source> (<publisher-loc>Vienna</publisher-loc>).</citation></ref>
<ref id="B198">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Waltemate</surname> <given-names>T.</given-names></name> <name><surname>Gall</surname> <given-names>D.</given-names></name> <name><surname>Roth</surname> <given-names>D.</given-names></name> <name><surname>Botsch</surname> <given-names>M.</given-names></name> <name><surname>Latoschik</surname> <given-names>M. E.</given-names></name></person-group> (<year>2018</year>). <article-title>The impact of avatar personalization and immersion on virtual body ownership, presence, and emotional response</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source>. <volume>24</volume>, <fpage>1643</fpage>&#x02013;<lpage>1652</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2018.2794629</pub-id><pub-id pub-id-type="pmid">29543180</pub-id></citation></ref>
<ref id="B199">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Kang</surname> <given-names>S. B.</given-names></name> <name><surname>Szeliski</surname> <given-names>R.</given-names></name> <name><surname>Shum</surname> <given-names>H. Y.</given-names></name></person-group> (<year>2001</year>). <article-title>Optimal texture map reconstruction from multiple views</article-title>, in <source>Computer Vision and Pattern Recognition</source> (<publisher-loc>Kauai, HI</publisher-loc>).</citation></ref>
<ref id="B200">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>T.-C.</given-names></name> <name><surname>Zhu</surname> <given-names>J.-Y.</given-names></name> <name><surname>Kalantari</surname> <given-names>N. K.</given-names></name> <name><surname>Efros</surname> <given-names>A. A.</given-names></name> <name><surname>Ramamoorthi</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Light field video capture using a learning-based hybrid imaging system</article-title>. <source>ACM Trans. Graph</source>. <volume>36</volume>:<fpage>133</fpage>. <pub-id pub-id-type="doi">10.1145/3072959.3073614</pub-id></citation></ref>
<ref id="B201">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>P.</given-names></name> <name><surname>Chai</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>Accurate realtime full-body motion capture using a single depth camera</article-title>. <source>ACM Trans. Graph</source>. <volume>31</volume>, <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1145/2366145.2366207</pub-id></citation></ref>
<ref id="B202">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>Y.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Quan</surname> <given-names>L.</given-names></name> <name><surname>Shum</surname> <given-names>H.</given-names></name></person-group> (<year>2005</year>). <article-title>Modeling hair from multiple views</article-title>, in <source>SIGGRAPH</source> (<publisher-loc>Los Angeles, CA</publisher-loc>). <pub-id pub-id-type="doi">10.1145/1187112.1187292</pub-id></citation></ref>
<ref id="B203">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>Y.</given-names></name> <name><surname>Ofek</surname> <given-names>E.</given-names></name> <name><surname>Quan</surname> <given-names>L.</given-names></name> <name><surname>Shum</surname> <given-names>H.-Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Realistic facial expression reconstruction for VR hmd users</article-title>. <source>ACM Trans. Graph</source>.</citation></ref>
<ref id="B204">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Weng</surname> <given-names>C.-Y.</given-names></name> <name><surname>Curless</surname> <given-names>B.</given-names></name> <name><surname>Kemelmacher-Shlizerman</surname> <given-names>I.</given-names></name></person-group> (<year>2019</year>). <article-title>Photo wake-up: 3D character animation from a single photo</article-title>, in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Long Beach, CA</publisher-loc>), <fpage>5908</fpage>&#x02013;<lpage>5917</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2019.00606</pub-id></citation></ref>
<ref id="B205">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yee</surname> <given-names>N.</given-names></name> <name><surname>Bailenson</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>The proteus effect: the effect of transformed self-representation on behavior</article-title>. <source>Hum. Commun. Res</source>. <volume>33</volume>, <fpage>271</fpage>&#x02013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1111/j.1468-2958.2007.00299.x</pub-id></citation></ref>
<ref id="B206">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yee</surname> <given-names>N.</given-names></name> <name><surname>Bailenson</surname> <given-names>J. N.</given-names></name></person-group> (<year>2009</year>). <article-title>The difference between being and seeing: the relative contribution of self-perception and priming to behavioral changes via digital self-representation</article-title>. <source>Media Psychol</source>. <volume>12</volume>, <fpage>195</fpage>&#x02013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1080/15213260902849943</pub-id></citation></ref>
<ref id="B207">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yee</surname> <given-names>N.</given-names></name> <name><surname>Bailenson</surname> <given-names>J. N.</given-names></name> <name><surname>Ducheneaut</surname> <given-names>N.</given-names></name></person-group> (<year>2009</year>). <article-title>The proteus effect: implications of transformed digital self-representation on online and offline behavior</article-title>. <source>Commun. Res</source>. <volume>36</volume>, <fpage>285</fpage>&#x02013;<lpage>312</lpage>. <pub-id pub-id-type="doi">10.1177/0093650208330254</pub-id></citation></ref>
<ref id="B208">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Yuan</surname> <given-names>Y.</given-names></name> <name><surname>Steed</surname> <given-names>A.</given-names></name></person-group> (<year>2010</year>). <article-title>Is the rubber hand illusion induced by immersive virtual reality?</article-title> in <source>2010 IEEE Virtual Reality Conference (VR)</source> (<publisher-loc>Waltham, MA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>95</fpage>&#x02013;<lpage>102</lpage>. <pub-id pub-id-type="doi">10.1109/VR.2010.5444807</pub-id></citation></ref>
<ref id="B209">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>M.</given-names></name> <name><surname>Li</surname> <given-names>T.</given-names></name> <name><surname>Abu Alsheikh</surname> <given-names>M.</given-names></name> <name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Torralba</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2018a</year>). <article-title>Through-wall human pose estimation using radio signals</article-title>, in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Salt Lake City, UT</publisher-loc>), <fpage>7356</fpage>&#x02013;<lpage>7365</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2018.00768</pub-id></citation></ref>
<ref id="B210">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>M.</given-names></name> <name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Alsheikh</surname> <given-names>M. A.</given-names></name> <name><surname>Li</surname> <given-names>T.</given-names></name> <name><surname>Hristov</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2018b</year>). <article-title>RF-based 3D skeletons</article-title>, in <source>Proceedings of the 2018 Conference of the ACM Special Interest Group on Data Communication</source> (<publisher-loc>Budapest</publisher-loc>), <fpage>267</fpage>&#x02013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.1145/3230543.3230579</pub-id></citation></ref>
<ref id="B211">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>E.</given-names></name> <name><surname>Mai</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Q.</given-names></name></person-group> (<year>2018</year>). <article-title>Forearm motion recognition with noncontact capacitive sensing</article-title>. <source>Front. Neurorobot</source>. <volume>12</volume>:<fpage>47</fpage>. <pub-id pub-id-type="doi">10.3389/fnbot.2018.00047</pub-id><pub-id pub-id-type="pmid">30100872</pub-id></citation></ref>
<ref id="B212">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zopf</surname> <given-names>R.</given-names></name> <name><surname>Harris</surname> <given-names>J. A.</given-names></name> <name><surname>Williams</surname> <given-names>M. A.</given-names></name></person-group> (<year>2011</year>). <article-title>The influence of body-ownership cues on tactile sensitivity</article-title>. <source>Cogn. Neurosci</source>. <volume>2</volume>, <fpage>147</fpage>&#x02013;<lpage>154</lpage>. <pub-id pub-id-type="doi">10.1080/17588928.2011.578208</pub-id><pub-id pub-id-type="pmid">24168529</pub-id></citation></ref>
</ref-list>
<fn-group>
<fn fn-type="financial-disclosure"><p><bold>Funding.</bold> MS-V and MS are funded by NEUROVIRTUAL-AGAUR (2017 SGR 1296). MS-V and Virtual Bodyworks are also supported by the European Union&#x00027;s Rights, Equality and Citizenship Programme (2014-2020) under Grant Agreement: 881712 (VRperGenere). DB and MS are supported by the European Research Council Advanced Grant MoTIVE &#x00023;742989. NP was partly funded by the Spanish Ministry of Economy, Industry and Competitiveness under Grant No. TIN2017-88515-C2-1-R.</p>
</fn>
</fn-group>
</back>
</article>