<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">695673</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2021.695673</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>The Influence of Embodiment as a Cartoon Character on Public Speaking Anxiety</article-title>
<alt-title alt-title-type="left-running-head">Bellido Rivas et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Embodiment as a Cartoon Character</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Bellido Rivas</surname>
<given-names>Anna I.</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1413679/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Navarro</surname>
<given-names>Xavi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn2">
<sup>&#x2021;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/595759/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Banakou</surname>
<given-names>Domna</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/228064/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Oliva</surname>
<given-names>Ramon</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1299907/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Orvalho</surname>
<given-names>Veronica</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Slater</surname>
<given-names>Mel</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1114/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Event Lab, Faculty of Psychology, University of Barcelona, <addr-line>Barcelona</addr-line>, <country>Spain</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>Institute of Neurosciences of the University of Barcelona, <addr-line>Barcelona</addr-line>, <country>Spain</country>
</aff>
<aff id="aff3">
<label>
<sup>3</sup>
</label>Universidade do Porto, Instituto de Telecominica&#xe7;&#xf5;es, <addr-line>Porto</addr-line>, <country>Portugal</country>
</aff>
<author-notes>
<corresp id="c001">&#x2a;Correspondence: Mel Slater, <email>melslater@ub.edu</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Virtual Reality and Human Behavior, a section of the journal Frontiers in Virtual Reality</p>
</fn>
<fn fn-type="present-address" id="fn1">
<label>
<sup>
<bold>&#x2020;</bold>
</sup>
</label>
<p>
<bold>Present address:</bold> Netquest, Barcelona, Spain</p>
</fn>
<fn fn-type="present-address" id="fn2">
<label>
<sup>
<bold>&#x2021;</bold>
</sup>
</label>
<p>
<bold>Present address:</bold> VISYON, Barcelona, Spain.</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/140134/overview">Regis Kopper</ext-link>, University of North Carolina at Greensboro, United&#x20;States</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/844875/overview">Rabindra Ratan</ext-link>, Michigan State University, United&#x20;States</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/305341/overview">Mina C. Johnson-Glenberg</ext-link>, Arizona State University, United&#x20;States</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>22</day>
<month>10</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>2</volume>
<elocation-id>695673</elocation-id>
<history>
<date date-type="received">
<day>15</day>
<month>04</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>09</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Bellido Rivas, Navarro, Banakou, Oliva, Orvalho and Slater.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Bellido Rivas, Navarro, Banakou, Oliva, Orvalho and Slater</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>Virtual Reality can be used to embody people in different types of body&#x2014;so that when they look towards themselves or in a mirror they will see a life-sized virtual body instead of their own, and that moves with their own movements. This will typically give rise to the illusion of body ownership over the virtual body. Previous research has focused on embodiment in humanoid bodies, albeit with various distortions such as an extra limb or asymmetry, or with a body of a different race or gender. Here we show that body ownership also occurs over a virtual body that looks like a cartoon rabbit, at the same level as embodiment as a human. Furthermore, we explore the impact of embodiment on performance as a public speaker in front of a small audience. Forty five participants were recruited who had public speaking anxiety. They were randomly partitioned into three groups of 15, embodied as a Human, as the Cartoon rabbit, or from third person perspective (3PP) with respect to the rabbit. In each condition they gave two talks to a small audience of the same type as their virtual body. Several days later, as a test condition, they returned to give a talk to an audience of human characters embodied as a human. Overall, anxiety reduced the most in the Human condition, the least in the Cartoon condition, and there was no change in the 3PP condition, taking into account existing levels of trait anxiety. We show that embodiment in a cartoon character leads to high levels of body ownership from the first person perspective and synchronous real and virtual body movements. We also show that the embodiment influences outcomes on the public speaking&#x20;task.</p>
</abstract>
<kwd-group>
<kwd>embodiment</kwd>
<kwd>virtual reality</kwd>
<kwd>body ownership</kwd>
<kwd>fear of public speaking</kwd>
<kwd>cartoon</kwd>
</kwd-group>
<contract-sponsor id="cn001">H2020 European Research Council<named-content content-type="fundref-id">10.13039/100010663</named-content>
</contract-sponsor>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Introduction</title>
<p>When you put on a head-tracked stereo head-mounted display and you look down towards yourself, if it has been so programmed you will see a life-sized virtual body substituting your real unseen body. Your body movements can be tracked in real-time and mapped to the movements of the virtual body so that as you move and look down towards yourself you will see the virtual body move correspondingly and in synchrony with your movements. A mirror can be programmed so that looking into it you will see a reflection of your virtual body that would move synchronously and in correspondence with your real body movements. In our whole lives whenever we have looked down towards ourselves we have seen our own body, similarly in mirror reflections and similarly when we move our limbs it is our own limbs that we see moving correspondingly and synchronously. It is no surprise therefore that in such a setup in virtual reality (VR) people typically have the strong perceptual illusion that the virtual body that they see is their own body, even though they know for sure that this is not the case. This is referred to as a <italic>body ownership illusion</italic>, a concept inspired originally by the rubber hand illusion (RHI), where participants can feel a rubber hand as their own when it is seen to be touched, with touch that is felt synchronously on the corresponding real out-of-sight hand (<xref ref-type="bibr" rid="B14">Botvinick and Cohen, 1998</xref>). It is an example of an illusion resulting from multisensory stimulation (first person perspective over the hand, synchronous vision and touch) that provides evidence to the brain that the rubber hand is part of the body. In our opening example we refer to synchrony between proprioception and vision as well as first person perspective over the body. Full body ownership in VR is discussed extensively in (<xref ref-type="bibr" rid="B33">Kilteni et&#x20;al., 2012a</xref>) and body consciousness more generally in (<xref ref-type="bibr" rid="B21">Ehrsson and Stein, 2012</xref>; <xref ref-type="bibr" rid="B13">Blanke et&#x20;al., 2015</xref>).</p>
<p>Certain conditions must be satisfied for the RHI to occur. For example, the rubber hand should be in an anatomically plausible position in relation to the real body (<xref ref-type="bibr" rid="B22">Ehrsson et&#x20;al., 2004</xref>) and using a VR version of the illusion it has been shown that there should be continuity between the virtual hand and the rest of the virtual body (<xref ref-type="bibr" rid="B45">Perez-Marcos et&#x20;al., 2011</xref>; <xref ref-type="bibr" rid="B56">Tieri et&#x20;al., 2015</xref>). However, with respect to the virtual hand illusion there is inconsistent evidence regarding ownership of non-hand objects&#x2014;for example (<xref ref-type="bibr" rid="B64">Yuan and Steed, 2010</xref>) found stronger ownership over a hand than over an arrow, six different hand representations were compared in (<xref ref-type="bibr" rid="B39">Lin and J&#xf6;rg, 2016</xref>) with wide variation in ownership though with strongest overall level corresponding to the most realistic hand. It was shown in (<xref ref-type="bibr" rid="B28">Guterstam et&#x20;al., 2013</xref>) that with appropriate multisensory stimulation that there could even be an illusion of ownership over empty space. Moreover major distortions can occur with ownership preserved: having a third arm (<xref ref-type="bibr" rid="B29">Guterstam et&#x20;al., 2011</xref>), an extra finger (<xref ref-type="bibr" rid="B30">Hoyet et&#x20;al., 2016</xref>), one very long arm (<xref ref-type="bibr" rid="B34">Kilteni et&#x20;al., 2012b</xref>), a body with a tail (<xref ref-type="bibr" rid="B53">Steptoe et&#x20;al., 2013</xref>), and non-human bodies that can be moved by the self in unusual ways&#x2014;e.g., moving a leg by arm movements (<xref ref-type="bibr" rid="B62">Won et&#x20;al., 2015a</xref>; <xref ref-type="bibr" rid="B61">Won et&#x20;al., 2015b</xref>). With respect to the full body ownership illusion in VR again there is remarkable plasticity&#x2014;adult men embodied successfully as a young girl (<xref ref-type="bibr" rid="B49">Slater et&#x20;al., 2010</xref>), adults in small or very large bodies (<xref ref-type="bibr" rid="B58">van der Hoort et&#x20;al., 2011</xref>), or as children (<xref ref-type="bibr" rid="B8">Banakou et&#x20;al., 2013</xref>; <xref ref-type="bibr" rid="B55">Tajadura-Jim&#xe9;nez et&#x20;al., 2017</xref>), in bodies of a different race (<xref ref-type="bibr" rid="B44">Peck et&#x20;al., 2013</xref>; <xref ref-type="bibr" rid="B9">Banakou et&#x20;al., 2016</xref>), or age (<xref ref-type="bibr" rid="B7">Banakou et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B48">Slater et&#x20;al., 2019</xref>), or alien bodies (<xref ref-type="bibr" rid="B11">Barberia et&#x20;al., 2018</xref>).</p>
<p>The question that we address is whether body ownership is afforded through appropriate multisensory integration providing evidence to the brain that the virtual body is the person&#x2019;s own body, thus leading to the illusion of body ownership, or whether appearance of the body has a fundamental role. Therefore, here our first goal was to test whether embodiment in a virtual body that is deliberately designed to look like a cartoon character can also result in the body ownership illusion. Our second goal was to exploit this representation to examine whether it would have an impact over public speaking anxiety. It is known that embodiment in different types of bodies has an impact on attitudes and behaviour, for example people in a body taller than their own will be more confident in negotiations (<xref ref-type="bibr" rid="B63">Yee and Bailenson, 2007</xref>), or being embodied as Einstein leads to better cognitive test performance compared to being embodied in another body (<xref ref-type="bibr" rid="B7">Banakou et&#x20;al., 2018</xref>), and there are several studies that show that embodiment of Caucasian people in a dark skinned virtual body decreases their implicit racial bias&#x2014;summarized in (<xref ref-type="bibr" rid="B40">Maister et&#x20;al., 2015</xref>), with a mechanism presented in (<xref ref-type="bibr" rid="B12">Bedder et&#x20;al., 2019</xref>). These are all examples of what was termed by Yee and Bailenson (<xref ref-type="bibr" rid="B63">Yee and Bailenson, 2007</xref>) as the &#x2018;Proteus Effect&#x2019;.</p>
<p>It has long been known that people with public speaking anxiety exhibit this also talking with entirely virtual audiences (<xref ref-type="bibr" rid="B46">Pertaub et&#x20;al., 2002</xref>; <xref ref-type="bibr" rid="B4">Aymerich-Franch et&#x20;al., 2014</xref>), and VR has been used for psychological therapy to overcome this aspect of social phobia, for example (<xref ref-type="bibr" rid="B59">Vanni et&#x20;al., 2013</xref>). Our idea here, however, was that if the speaker with public speaking anxiety is embodied as a cartoon character, and the audience itself is a deliberate cartoon audience, then possibly the humour of the situation or the likelihood that the cartoon audience would not be seen as having expertise in any particular topic, would lead to a reduction of anxiety that would carry over to a later exposure of speaking to a virtual audience representing people rather than cartoons. Factors such as the size of the audience and their expertise level have been shown to influence anxiety in a public speaking task (<xref ref-type="bibr" rid="B31">Jackson and Latan&#xe9;, 1981</xref>; <xref ref-type="bibr" rid="B6">Ayres, 1990</xref>). The authors in these real-life studies found that the larger the audience and the more expert they were, the higher the anxiety level. Hence, we can infer that a positive audience consisting of a reduced number of non-experts would be an easier context for people with public speaking anxiety to deliver a speech. Immersion in such an environment may allow them to establish new positive associations with the feared speaking task, which may give rise to a progressive systematic desensitization, session after session.</p>
</sec>
<sec sec-type="methods" id="s2">
<title>Methods</title>
<sec id="s2-1">
<title>Overview</title>
<p>In order to examine these ideas we carried out a between-groups experiment with 45 participants with three conditions. Each participant visited the virtual reality lab on two occasions separated by mean 5.3&#x20;&#xb1; 2.3 (S.D.) days. On the first visit they gave a speech embodied either as a cartoon character from first person perspective (1PP) speaking to a cartoon audience, or as a human from 1PP speaking to a human audience, or as a cartoon character speaking to a cartoon audience from a third person perspective (3PP). Then in the same session they gave another talk under the same condition. On the second visit, some days later, they gave a third speech, but this time embodied from 1PP as a human speaking to a human audience. This last exposure was considered as a test of the outcome of the first exposures. Our two questions were 1) whether the level of body ownership would differ between the three conditions and 2) whether embodiment as the cartoon character would lead to less anxiety for the public speaking in front of humans at the second&#x20;visit.</p>
</sec>
<sec id="s2-2">
<title>Ethics</title>
<p>This experiment was approved by the Comisi&#xf3;n de Bio&#xe9;tica de la Universitat de Barcelona (IRB00003099). Participants gave written and informed consent.</p>
</sec>
<sec id="s2-3">
<title>Recruitment</title>
<p>Participants were recruited from the Mundet campus of the University of Barcelona and were independent from our own research group. A previous virtual reality study found a greater level of fear of public speaking for women compared to men (<xref ref-type="bibr" rid="B46">Pertaub et&#x20;al., 2002</xref>) and a large sample study amongst college students found the same (<xref ref-type="bibr" rid="B23">Ferreira Marinho et&#x20;al., 2017</xref>). Since our goal was to recruit participants with relatively high levels of public speaking anxiety the most convenient was to recruit women. The inclusion criteria was participants who scored at least 18 on the Personal Report of Confidence as a Speaker (PRCS) (<xref ref-type="bibr" rid="B43">Paul, 1966</xref>; <xref ref-type="bibr" rid="B25">Gallego et&#x20;al., 2009</xref>). This is a set of 30 questions with yes/no answers and a maximum score of 30 indicating a high degree of anxiety. The mean&#x20;&#xb1; SD score was 22.4&#x20;&#xb1; 2.86 with scores ranging from 18 to 28. Participants had to be at least 18&#x20;years, and the mean&#x20;&#xb1; SD age of participants was 24.5&#x20;&#xb1; 9.31. A further exclusion criteria was obtained using the LSB-50 questionnaire that was used to screen out participants with potentially serious psychological disorders (<xref ref-type="bibr" rid="B1">Abu&#xed;n and Rivera, 2014</xref>). Further details of the sample are given in <xref ref-type="sec" rid="s11">Supplementary Table&#x20;S1</xref>.</p>
</sec>
<sec id="s2-4">
<title>Experimental Design</title>
<p>This was a between groups experiment with three groups: Cartoon, Human and 3PP. In the Cartoon condition participants were embodied as a cartoon character and spoke to an audience of cartoon characters. Embodiment was from 1PP and visuomotor synchrony so that the virtual body moved in synchrony with real body movements. In the Human condition the participant was embodied in a female virtual body with visuomotor synchrony. In the 3PP condition the participant saw the cartoon virtual body from 3PP and it did not move with their body movements. However, they still had full control of the head and visual updates to the images in the head-mounted display were based on their own head movements. However, the displayed cartoon body did not show the participant&#x2019;s head movements. The virtual audience also consisted of cartoon characters (<xref ref-type="fig" rid="F1">Figure&#x20;1</xref>). We maintained the audience as the same type as the embodied character in order to avoid effects solely caused by difference between these two. Each condition was assigned 15 participants selected by a pseudo random number generator. The experiment is illustrated in <xref ref-type="sec" rid="s11">Supplementary Video&#x20;S1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>The scenario <bold>(A)</bold> The Cartoon condition. <bold>(B)</bold> The Human condition. The 3PP condition looked the same as the Cartoon except that the participant was not embodied in the bunny rabbit.</p>
</caption>
<graphic xlink:href="frvir-02-695673-g001.tif"/>
</fig>
</sec>
<sec id="s2-5">
<title>Implementation</title>
<p>Participants used a stereo NVIS nVisor SX111&#x20;head-mounted display. This has dual SXGA displays with 76H &#xd7; 64V (degrees) field of view (FOV) per eye, with a wide field-of-view 111&#xb0; horizontal with 50 (66%) overlap and 64 vertical, with a resolution of 1280&#x20;&#xd7; 1024 pixels per eye displayed at 60&#xa0;Hz. Head tracking was performed by a 6-DOF Intersense IS-900 device. Participants wore an OptiTrack full body motion capture suit that uses 37 markers and the corresponding software (<ext-link ext-link-type="uri" xlink:href="https://optitrack.com/software/motive/">https://optitrack.com/software/motive/</ext-link>) to track their movements. This used a 12-camera truss setup by OptiTrack. Participants were assisted to don and calibrate the head-mounted display (HMD) following the method described in (<xref ref-type="bibr" rid="B32">Jones et&#x20;al., 2008</xref>).</p>
<p>The virtual room in which the speech took place was the same for all conditions. It was designed to be neutral, and it included a wooden platform on which participants virtually stood while delivering a speech. A virtual mirror was located on the left of the participant, which helped her inspect the body assigned. The mirror was carefully located so that it was in full view of the participant throughout the speech while she was looking at the audience. A virtual clock was added to the opposite wall of the room in order to help the participant keep track of the&#x20;time.</p>
<p>The avatars generated for the Cartoon and 3PP conditions were cartoon-like, not culturally offensive, anthropomorphic figures of animals to make them look friendly and humorous, and were rigged so that they could be animated. The human avatars used in the Human condition were formed of male and female avatars from a RocketBox collection (<xref ref-type="bibr" rid="B27">Gonzalez-Franco et&#x20;al., 2020</xref>). Both human and cartoon audiences were located in the same places in the virtual room. All the animations generated were for one audience and retargeted to the other so that the audience behaviors were identical.</p>
</sec>
<sec id="s2-6">
<title>Assessing Anxiety</title>
<p>Public speaking anxiety was measured using the State-Trait Anxiety Inventory (STAI) (<xref ref-type="bibr" rid="B50">Spielberger, 1983</xref>; <xref ref-type="bibr" rid="B51">Spielberger, 2010</xref>), a commonly used measure to diagnose anxiety and to distinguish it from depressive syndromes. The STAI measures two types of anxiety, state anxiety, or anxiety about an event, and trait anxiety, or anxiety level as a personal characteristic. Form Y is its most popular version and includes 20 items for assessing trait anxiety and 20 items for state anxiety, rated on a 4-point scale from &#x201c;Almost Never&#x201d; to &#x201c;Almost Always&#x201d;. Scores range from 20 to 80, where 20 indicates absence of anxiety and 80 its maximum value. The STAI is translated into Spanish and validated (<xref ref-type="bibr" rid="B47">Seisdedos, 1988</xref>). It has good test-retest reliability (Cronbach alpha of 0.90 for the state scale and 0.84 for the trait scale). Examples of State questions include: &#x201c;I am tense; I am worried&#x201d; and &#x201c;I feel calm; I feel secure.&#x201d; Trait questions include: &#x201c;I worry too much over something that really doesn&#x2019;t matter&#x201d; and &#x201c;I am content; I am a steady person.&#x201d; The STAI Trait was used as a background variable for the participant&#x2019;s general self-assessed anxiety, since how people might respond to a particular incident would be influenced by their general predisposition to anxiety, so that this is a critical covariate. The STAI State was used to assess the participant&#x2019;s state before and after each&#x20;talk.</p>
</sec>
<sec id="s2-7">
<title>Procedures</title>
<p>The experiment was carried out in three phases: a pre-experimental phase and two experimental sessions. The first phase was used to recruit only those participants who had sufficient level of fear of speaking in public using the PRCS as described above. A day was then arranged to hold the first session, and the participants were asked to choose two topics they could talk about for 5&#xa0;minutes. They had two exposures in their assigned condition.</p>
<p>At the first session participants were given an information sheet to read, a consent form to sign, and if they agreed to participate in the study, they were asked to complete the LSB-50 and the STAI-Trait questionnaires. They were then assigned to one of the three conditions (Cartoon, Human or 3PP) following a pseudo-random method that guaranteed the same number of participants per condition. Prior to and after each VR exposure the participant was asked to complete the STAI-State.</p>
<p>The sequence of events started with 1&#xa0;min 40&#x2002;s of audio instructions the participants had to follow while looking at a virtual mirror in order to get them to move their head, arms and legs. This also allowed them to become acquainted with the virtual environment and their virtual body (or their relationship to it in the 3PP condition) in order to provide time for the body ownership illusion to be induced (or not). After the audio instructions, the participant was asked to move freely (although within the tracking area) for 1&#xa0;min and 20&#xa0;s and wait for a brief 3&#x2002;s clap of the audience, which was the sign of the beginning of the talk. After 5&#xa0;min, the audience applauded resoundingly indicating the end of the speech. The virtual environment slowly faded out and the experimenter helped the participant take off the HMD. Finally, she filled in the post-experiment questionnaires and a brief informal interview on their experience followed. Participants went through the virtual reality experiment twice (first and second session) with a 15&#xa0;min break in between. After the end of the second session, a day for the third session was arranged. It had to be not sooner than 2&#xa0;days nor later than a week, and they were asked to think of another topic to talk about. The participant was paid 5&#x20ac; and left. In the third session, participants had to perform only one talk (third talk) always in the Human condition, so they went through the experimental procedure only once, which was identical to that of the first two sessions. After completion of the third talk, the participant was paid 15&#x20ac; and debriefed.</p>
</sec>
<sec id="s2-8">
<title>Response Variables</title>
<sec id="s2-8-1">
<title>Body Ownership</title>
<p>Body ownership was assessed using the questionnaire shown in <xref ref-type="table" rid="T1">Table&#x20;1</xref> administered immediately after each VR exposure. The first three questions assess body ownership itself. The <italic>twobodies</italic> question is a control question&#x2014;since if there is strong body ownership we would expect participants to report the feeling of having one body (the virtual) rather than two. The last question is a test of the extent to which the tracking system and mapping real movements to the movements of the virtual bodies was successful. If the variable <inline-formula id="inf1">
<mml:math id="m1">
<mml:mi>x</mml:mi>
</mml:math>
</inline-formula> refers to any of these questions then <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> refer to the responses after exposure 1, 2 and 3 respectively.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Subjective evaluation of the body ownership illusion. The questionnaire was answered after completing each talk. Answers were rated on a 7-point Likert scale, where 1 was &#x201c;Not at all&#x201d; and 7 was &#x201c;Completely&#x201d;.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Variable</th>
<th align="center">Question</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<italic>Memirror</italic>
</td>
<td align="left">Independently of the physical resemblance between the body I saw and mine, I felt that the virtual body I saw when I looked in the mirror was mine.</td>
</tr>
<tr>
<td align="left">
<italic>Medown</italic>
</td>
<td align="left">Independently of the physical resemblance between the body I saw and mine, I felt that the virtual body I saw when looking downwards was mine.</td>
</tr>
<tr>
<td align="left">
<italic>mybody</italic>
</td>
<td align="left">Independently of the physical resemblance between the body I saw and mine, in general I felt that the body I saw when looking in the mirror or downwards was mine.</td>
</tr>
<tr>
<td align="left">
<italic>twobodies</italic>
</td>
<td align="left">I felt I had two bodies.</td>
</tr>
<tr>
<td align="left">
<italic>mymovements</italic>
</td>
<td align="left">I felt that the movements of the virtual body were caused by mine.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2-8-2">
<title>State Anxiety</title>
<p>We refer to the STAI State questionnaire prior to an exposure as <inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> after the exposure. Then <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> refer to the states prior to the first, second and third exposures respectively. Similarly for <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>. The response variable of interest is:<disp-formula id="e1">
<mml:math id="m11">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>This is the difference between STAI state after the final exposure to the Human condition, and the mean of the STAI states prior to the first two exposures. We consider the mean of the first two exposures since the first alone may induce anxiety simply due to a new and unknown forthcoming event. By the second time participants would know what to expect, and therefore be less anxious. So the first may overestimate anxiety and the second underestimate it, so taking the mean of the two is a balance. However, we have also carried out the analysis using instead<inline-formula id="inf11">
<mml:math id="m12">
<mml:mrow>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> and also <inline-formula id="inf12">
<mml:math id="m13">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, discussed in Results.</p>
<p>The STAI Trait, assessed in the pre-experimental meeting was used as a covariate since participants may respond differently depending on their underlying normal level of anxiety.</p>
<p>The anxiety variables are summarized in <xref ref-type="table" rid="T2">Table&#x20;2</xref>.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>The anxiety scores. In general &#x201c;trait anxiety&#x201d; refers to a stable attribute of personality, whereas &#x201c;state anxiety&#x201d; refers to anxiety with respect to a particular situation or event. The STAI refers to the State-Trait Anxiety Inventory questionnaire (<xref ref-type="bibr" rid="B50">Spielberger, 1983</xref>).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Variable</th>
<th align="center">Question</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<italic>staitrait</italic>
</td>
<td align="left">STAI Trait elicited during the first session.</td>
</tr>
<tr>
<td align="left">
<italic>staistatepre1</italic>
</td>
<td align="left">STAI state elicited immediately after the first talk to the virtual audience.</td>
</tr>
<tr>
<td align="left">
<italic>staistatepre2</italic>
</td>
<td align="left">STAI state elicited immediately after the second talk to the virtual audience.</td>
</tr>
<tr>
<td align="left">
<italic>staistatepost</italic>
</td>
<td align="left">STAI state elicited immediately after the third talk to the virtual audience.</td>
</tr>
<tr>
<td align="left">
<italic>dstai</italic>
</td>
<td align="left">
<inline-formula id="inf13">
<mml:math id="m14">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, the difference between the state anxiety after the third talk and the average anxiety after the first two talks.</td>
</tr>
<tr>
<td align="left">
<italic>dstai1</italic>
</td>
<td align="left">
<inline-formula id="inf14">
<mml:math id="m15">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, the difference between state anxiety after the third talk and the anxiety after the first talk. The results for this are discussed in <xref ref-type="sec" rid="s11">Supplementary Table S2A</xref>
</td>
</tr>
<tr>
<td align="left">
<italic>dstai2</italic>
</td>
<td align="left">
<inline-formula id="inf15">
<mml:math id="m16">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>3</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, the difference between state anxiety after the third talk and the anxiety after the second talk. The results for this are discussed in <xref ref-type="sec" rid="s11">Supplementary Table S2B</xref>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<p>In this section we will first present descriptive results for body ownership and anxiety, and then a statistical analysis for all the results together.</p>
<sec id="s3-1">
<title>Body Ownership</title>
<p>
<xref ref-type="fig" rid="F2">Figures 2A,B</xref> shows the box plots for the scores on the questions of <xref ref-type="table" rid="T1">Table&#x20;1</xref> for the three exposures. For exposures 1 and 2 where participants were embodied as Cartoon, Human or the 3PP, it is clear that the scores on the three embodiment questions are very high, and much higher than the scores on the control question <italic>twobodies</italic> for the Cartoon and Human conditions, and the scores are always low for the 3PP condition. In the third exposure (<xref ref-type="fig" rid="F2">Figure&#x20;2C</xref>) all were embodied as Human (the conditions refer to how they had been embodied in the first two exposures) and all body ownership scores are high, and again much greater than the control question. In all conditions and exposures except for 3PP the <italic>mymovements</italic> scores are very high, indicating that the tracking system and mapping of real movements to movements of the virtual body worked&#x20;well.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Scores on the ownership questions from <xref ref-type="table" rid="T1">Table&#x20;1</xref>. <bold>(A)</bold> Box plot for exposure 1, session 1. <bold>(B)</bold> Box plot for exposure 2, session 1. <bold>(C)</bold> Box plot for the exposure embodied as Human for session 2, but where the conditions refer to those of session 1. <bold>(D)</bold> Bar charts showing means and standard errors of the factor scores from the principal component factor analysis of the ownership scores of the first two exposures&#x20;only.</p>
</caption>
<graphic xlink:href="frvir-02-695673-g002.tif"/>
</fig>
<p>The critical embodiments were those of exposures 1 and 2, since the goal was to understand how experiencing the public speaking in the Human or Cartoon conditions would influence anxiety in the final test in the Human condition (exposure 3). We carried out a principal components factor analysis with varimax rotation on the scores <italic>mybody</italic>, <italic>medown</italic>, <italic>memirror</italic> and <italic>twobodies</italic> for exposures 1 and 2 (i.e.,&#x20;eight variables). This was with the Stata program 16.1 (<ext-link ext-link-type="uri" xlink:href="https://www.stata.com/">https://www.stata.com/</ext-link>) using the &#x201c;factor&#x201d; command. Two factors were retained, the first accounting for 68% of the variance and the second for 23% of the variance, thus cumulatively 91%. Then regression scores were obtained for each of the two factors resulting in two uncorrelated variables with the scoring coefficients shown in <xref ref-type="table" rid="T3">Table&#x20;3</xref>. The first factor is proportional to the mean of all the scores apart from <italic>twobodies</italic>, and the second factor is proportional to the mean of the <italic>twobodies</italic> scores. Hence the factor structure is consistent with the meaning of the questionnaire. The interest is only on the first factor, which measures the overall level of ownership in the first two exposures, and we refer to this factor as <italic>own</italic>, which we will use in subsequent analysis. The means and standard errors are shown in <xref ref-type="fig" rid="F2">Figure&#x20;2D</xref>, demonstrating no difference between the Cartoon and Human conditions, which are both much greater than the 3PP condition.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Scoring coefficients for the principal components factor analysis of the questionnaire scores of exposures 1 and 2. (Method &#x3d; regression based on varimax rotated factors).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Variable</th>
<th align="center">Factor1</th>
<th align="center">Factor2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<italic>memirror1</italic>
</td>
<td align="char" char=".">0.174</td>
<td align="char" char=".">&#x2212;0.001</td>
</tr>
<tr>
<td align="left">
<italic>medown1</italic>
</td>
<td align="char" char=".">0.167</td>
<td align="char" char=".">0.027</td>
</tr>
<tr>
<td align="left">
<italic>mybody1</italic>
</td>
<td align="char" char=".">0.178</td>
<td align="char" char=".">&#x2212;0.015</td>
</tr>
<tr>
<td align="left">
<italic>twobodies1</italic>
</td>
<td align="char" char=".">&#x2212;0.006</td>
<td align="char" char=".">0.521</td>
</tr>
<tr>
<td align="left">
<italic>memirror2</italic>
</td>
<td align="char" char=".">0.175</td>
<td align="char" char=".">&#x2212;0.024</td>
</tr>
<tr>
<td align="left">
<italic>medown2</italic>
</td>
<td align="char" char=".">0.180</td>
<td align="char" char=".">&#x2212;0.024</td>
</tr>
<tr>
<td align="left">
<italic>mybody2</italic>
</td>
<td align="char" char=".">0.176</td>
<td align="char" char=".">0.000</td>
</tr>
<tr>
<td align="left">
<italic>twobodies2</italic>
</td>
<td align="char" char=".">&#x2212;0.036</td>
<td align="char" char=".">0.529</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-2">
<title>Anxiety</title>
<p>
<xref ref-type="fig" rid="F3">Figures 3A&#x2013;C</xref> shows the scatter diagrams of <italic>dstai</italic> (<xref ref-type="disp-formula" rid="e1">Eq. 1</xref>) by the covariate <italic>staitrait</italic>, the trait anxiety measured some days prior to the first exposure. The results suggest that <italic>dstai</italic> is positively associated with <italic>staitrait</italic> in the Cartoon condition, negatively in the Human condition, and there seems to be no association in the 3PP condition. <xref ref-type="fig" rid="F3">Figure&#x20;3D</xref> shows the means and standard errors of <italic>dstai</italic> by the conditions without taking into account background anxiety, suggesting the decrease in anxiety is greater for the Cartoon and 3PP conditions. The means and standard errors are also shown in <xref ref-type="sec" rid="s11">Supplementary Table S2</xref>. However, these do not take into account the predisposition towards anxiety as measured by <italic>staitrait</italic>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Plots of <italic>dstai</italic> (<xref ref-type="disp-formula" rid="e1">Eq. 1</xref>)&#x2014;the difference between the anxiety score after exposure 3 compared to the mean anxiety score prior to exposures 1 and 2 by staitrait. <bold>(A&#x2013;C)</bold>&#x2014;scatter diagrams by condition. <bold>(D)</bold> Bar chart of <italic>dstai</italic> showing the means and standard errors by condition.</p>
</caption>
<graphic xlink:href="frvir-02-695673-g003.tif"/>
</fig>
</sec>
<sec id="s3-3">
<title>Statistical Analysis</title>
<p>Bayesian statistical methods have been increasingly employed over recent years including in psychology (<xref ref-type="bibr" rid="B37">Kruschke, 2011</xref>; <xref ref-type="bibr" rid="B57">Van De Schoot et&#x20;al., 2017</xref>). In classical (frequentist) statistics, in order to consider whether a parameter value is in a certain range (for example, the mean of a population being positive compared with being zero) we compute the probability that the particular observed data would have been generated on the assumption that the parameter value were 0, referred to as the significance level. If this probability is small (typically &#x3c;0.05) then we reject the hypothesis that the parameter value is 0. In classical statistics the probability of an event is based exclusively on its long run frequency of occurrence in a large number of independent trials. Hence this method essentially compares the observed data with what <italic>might</italic> have been observed in a large number of independent repetitions of the experiment. In Bayesian statistics in contrast we start with a probability distribution for the parameter based on prior knowledge (or a distribution with large variance in the absence of prior knowledge) and then we can compute a posterior distribution conditional on the observed data, so that the data updates our prior. From this we can compute probabilities of the parameter value being in any range of interest. Moreover, if there are multiple parameters the posterior distribution will be the joint distribution of all the parameters, and we can make as many probability statements as we like over several parameters. In classical statistics when we carry out more than one significance test then the significance levels are no longer valid and we have to resort to <italic>ad hoc</italic> corrections such as Bonferroni. In classical statistics confidence intervals are mathematically equivalent to significance tests, and a 95% confidence interval cannot be interpreted as a probability of 0.95 of a parameter being between the computed limits. In Bayesian statistics a 95% credible interval is a range of values where the actual probability of a parameter value being within that range is 0.95. What is particularly informative is to compare the credible interval based on the prior distribution of the parameter and the credible interval calculated from the posterior distribution. This is a very useful way to understand how the data has updated the credible interval.</p>
<p>A Bayesian analysis was carried out that includes both response variables (<italic>dstai</italic> and <italic>own</italic>) simultaneously. The method is equivalent to an analysis of variance model with a covariate in the case of <italic>dstai</italic>, and a simpler model without a covariate in the case of <italic>own</italic>. The mathematical formulation is identical to ANOVA except that the parameters have prior distributions.</p>
<p>Let <inline-formula id="inf16">
<mml:math id="m17">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mn>15</mml:mn>
<mml:mo>;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1,2,3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> be the <italic>dstai</italic> value for the <italic>i</italic>th participant (<italic>i</italic>&#x20;&#x3d; 1,2,&#x2026;,15) in the <italic>j</italic>th condition (1 &#x3d; Cartoon, 2&#x20;&#x3d; Human, 3&#x20;&#x3d; 3PP). Similarly for <inline-formula id="inf17">
<mml:math id="m18">
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>. Let the corresponding means be <inline-formula id="inf18">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> for <italic>dstai</italic>, and <inline-formula id="inf19">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> for ownership. Then the model for <italic>dstai</italic> is as follows:<disp-formula id="e2">
<mml:math id="m21">
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3b2;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo>&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mn>3</mml:mn>
</mml:munderover>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo>&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mn>3</mml:mn>
</mml:munderover>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#x223c;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(2)</label>
</disp-formula>The parameter <inline-formula id="inf20">
<mml:math id="m22">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the general mean. <inline-formula id="inf21">
<mml:math id="m23">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the effect of the <italic>j</italic>th condition (<italic>j</italic>&#x20;&#x3d; Cartoon, Human, 3PP), <inline-formula id="inf22">
<mml:math id="m24">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b2;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the coefficient of the covariate <italic>staitrait</italic> irrespective of condition, and <inline-formula id="inf23">
<mml:math id="m25">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> allows the slope of the relationship between <italic>dstai</italic> and the covariate to be different depending on condition. For ease of comparison between the conditions we adopt a centred parameterisation where the parameter values are constrained to sum to 0. <inline-formula id="inf24">
<mml:math id="m26">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the standard deviation.</p>
<p>The prior distributions of the parameters are chosen as weakly informative&#x2014;e.g. (<xref ref-type="bibr" rid="B38">Lemoine, 2019</xref>), i.e.,&#x20;assuming very little prior information. Weakly informative priors are proper probability distributions, but with wide variance. Specifically <inline-formula id="inf25">
<mml:math id="m27">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> &#x223c; Gamma(shape &#x3d; 2, rate &#x3d; 0.1). This has a prior 95% credible interval of 2.4 to 55.7. All the other parameters have prior distribution normal(0,20) which leads to 95% credible intervals of -40 to 40, except that due to the sum to zero constraints <inline-formula id="inf26">
<mml:math id="m28">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf27">
<mml:math id="m29">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> will have normal(0, 28.3) distributions with prior credible intervals -55 to 55. However, the choice of condition 3 for this is arbitrary, and any of the other 2 conditions could have been chosen to have this wider prior distribution without affecting the results.</p>
<p>For <italic>own</italic> the model is similar but simpler since there is no covariate:<disp-formula id="equ1">
<mml:math id="m30">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="e3">
<mml:math id="m31">
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mn>3</mml:mn>
</mml:munderover>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mstyle>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#x223c;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(3)</label>
</disp-formula>with the same prior distributions for the parameters.</p>
<p>The model was implemented using the Stan probabilistic programming language (<xref ref-type="bibr" rid="B52">Stan Development Team, 2011-2019</xref>; <xref ref-type="bibr" rid="B15">Carpenter et&#x20;al., 2017</xref>) (<ext-link ext-link-type="uri" xlink:href="https://mc-stan.org/">https://mc-stan.org/</ext-link>) through the RStudio interface (<ext-link ext-link-type="uri" xlink:href="https://www.rstudio.com/">https://www.rstudio.com/</ext-link>). The execution used 2000 iterations on four chains. All Rhat &#x3d; 1 indicating that the four chains converged and successfully mixed. Use of the &#x2018;leave-one-out&#x2019; method (<xref ref-type="bibr" rid="B60">Vehtari et&#x20;al., 2017</xref>), equivalent to repeated fits to the data with one observation left out each time, similarly indicated no problem with convergence or outliers.</p>
<p>
<xref ref-type="table" rid="T4">Table&#x20;4</xref> shows the summaries of the posterior distributions of the parameters. Notice that the posterior 95% credible intervals are narrow compared to the prior intervals. For example, for <inline-formula id="inf28">
<mml:math id="m32">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> the prior 95% credible interval was <inline-formula id="inf29">
<mml:math id="m33">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>55</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> whereas the posterior is &#x2212;1.38 to &#x2212;0.85. The means of the distributions can be considered as effect sizes. For example, the mean of the posterior distribution of <inline-formula id="inf30">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is &#x2212;1.11. The interpretation is that the 3PP condition is associated on the average with a decrease of 1.11 in the <italic>own</italic>ership response variable, other things being equal. Notice similarly that the prior 95% credible interval for the standard deviations of the model were 2.4 to 55.7, whereas the posteriors are 0.52 to 0.81 in the case of <italic>own</italic>, and 6.19 to 9.68 in the case of&#x20;<italic>dstai</italic>.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Summaries of the posterior distributions of the parameters showing the distribution means, standard deviations, 95% credible intervals. Prob &#x3e;0 is the posterior probability that the parameter is positive.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Parameter</th>
<th align="center">Term</th>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">2.5%</th>
<th align="center">97.5%</th>
<th align="center">Prob &#x3e;0</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<italic>
<bold>own:</bold>
</italic>
</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">
<inline-formula id="inf31">
<mml:math id="m35">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
<td align="char" char=".">0.00</td>
<td align="char" char=".">0.10</td>
<td align="char" char=".">&#x2212;0.19</td>
<td align="char" char=".">0.19</td>
<td align="char" char=".">0.499</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf32">
<mml:math id="m36">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Cartoon</td>
<td align="char" char=".">0.64</td>
<td align="char" char=".">0.14</td>
<td align="char" char=".">0.38</td>
<td align="char" char=".">0.90</td>
<td align="char" char=".">1.000</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf33">
<mml:math id="m37">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Human</td>
<td align="char" char=".">0.47</td>
<td align="char" char=".">0.13</td>
<td align="char" char=".">0.20</td>
<td align="char" char=".">0.74</td>
<td align="char" char=".">1.000</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf34">
<mml:math id="m38">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">3PP</td>
<td align="char" char=".">&#x2212;1.11</td>
<td align="char" char=".">0.13</td>
<td align="char" char=".">&#x2212;1.38</td>
<td align="char" char=".">&#x2212;0.85</td>
<td align="char" char=".">0.000</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf35">
<mml:math id="m39">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
<td align="char" char=".">0.65</td>
<td align="char" char=".">0.07</td>
<td align="char" char=".">0.52</td>
<td align="char" char=".">0.81</td>
<td align="left"/>
</tr>
<tr>
<td align="left">
<bold>
<italic>dstai</italic>
</bold>:</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">
<inline-formula id="inf36">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
<td align="char" char=".">&#x2212;8.94</td>
<td align="char" char=".">4.09</td>
<td align="char" char=".">&#x2212;16.94</td>
<td align="char" char=".">&#x2212;0.66</td>
<td align="char" char=".">0.017</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf37">
<mml:math id="m41">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Cartoon</td>
<td align="char" char=".">&#x2212;15.99</td>
<td align="char" char=".">5.73</td>
<td align="char" char=".">&#x2212;26.85</td>
<td align="char" char=".">&#x2212;4.28</td>
<td align="char" char=".">0.004</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf38">
<mml:math id="m42">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Human</td>
<td align="char" char=".">12.61</td>
<td align="char" char=".">5.42</td>
<td align="char" char=".">1.44</td>
<td align="char" char=".">23.07</td>
<td align="char" char=".">0.986</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf39">
<mml:math id="m43">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">3PP</td>
<td align="char" char=".">3.37</td>
<td align="char" char=".">5.30</td>
<td align="char" char=".">&#x2212;7.05</td>
<td align="char" char=".">14.01</td>
<td align="char" char=".">0.731</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf40">
<mml:math id="m44">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b2;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Staistrait</td>
<td align="char" char=".">0.09</td>
<td align="char" char=".">0.19</td>
<td align="char" char=".">&#x2212;0.27</td>
<td align="char" char=".">0.46</td>
<td align="char" char=".">0.700</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf41">
<mml:math id="m45">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Cartoon &#xd7; staitrait</td>
<td align="char" char=".">0.74</td>
<td align="char" char=".">0.28</td>
<td align="char" char=".">0.20</td>
<td align="char" char=".">1.27</td>
<td align="char" char=".">0.994</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf42">
<mml:math id="m46">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Human &#xd7; staitrait</td>
<td align="char" char=".">&#x2212;0.49</td>
<td align="char" char=".">0.23</td>
<td align="char" char=".">&#x2212;0.94</td>
<td align="char" char=".">&#x2212;0.01</td>
<td align="char" char=".">0.024</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf43">
<mml:math id="m47">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3b3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">3PP &#xd7; staitrait</td>
<td align="char" char=".">&#x2212;0.25</td>
<td align="char" char=".">0.25</td>
<td align="char" char=".">&#x2212;0.74</td>
<td align="char" char=".">0.22</td>
<td align="char" char=".">0.156</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf44">
<mml:math id="m48">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
<td align="char" char=".">7.70</td>
<td align="char" char=".">0.89</td>
<td align="char" char=".">6.19</td>
<td align="char" char=".">9.68</td>
<td align="left"/>
</tr>
</tbody>
</table>
</table-wrap>
<p>From the first block of <xref ref-type="table" rid="T4">Table&#x20;4</xref> the posterior probabilities of the parameters of Cartoon <inline-formula id="inf45">
<mml:math id="m49">
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> and Human conditions <inline-formula id="inf46">
<mml:math id="m50">
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> being positive and the 3PP condition <inline-formula id="inf47">
<mml:math id="m51">
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>&#x3b1;</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> negative are 1. Hence the evidence is overwhelming that the Cartoon and Human condition had the highest levels of body ownership, and the 3PP condition the lowest.</p>
<p>In the case of <italic>dstai</italic> the interaction terms are important. Notice how the mean (CI: credible interval) for Cartoon &#xd7; staitrait is 0.74 (CI: 0.20 to 1.27) whereas for Human it is -0.49 (CI: &#x2212;0.94 to &#x2212;0.01). Hence, for those in the Cartoon condition the greater the <italic>staitrait</italic> the greater the <italic>dstai</italic> (prob &#x3d; 0.994) so that the state variable is proportional to the trait. However, for those in the Human condition the relationship is reversed&#x2014;the greater the trait the lower the value of <italic>dstai.</italic> The distribution of the coefficient has mean &#x2212;0.49 with credible interval &#x2212;0.94 to &#x2212;0.01, and the probability of it being positive is 0.024 (so it has prob &#x3d; 1&#x2014;0.024 &#x3d; 0.976 of being negative). For those in the 3PP condition there is a moderate probability of there being a small negative association between state and trait (prob &#x3d; 1&#x2014;0.156 &#x3d; 0.844). Hence, overall, and with high posterior probability, for those in the Cartoon condition <italic>dstai</italic> is positively correlated with trait, for those in the Human condition <italic>dstai</italic> is negatively correlated with trait. The correlation between <italic>dstai</italic> and trait is possibly negative for the 3PP condition. These results are in accord with <xref ref-type="fig" rid="F3">Figures 3A&#x2013;C</xref>.</p>
<p>The equivalent to <xref ref-type="table" rid="T4">Table&#x20;4</xref> for the alternative response variables <italic>dstai1</italic> and <italic>dstai2</italic> where the <italic>staitrait</italic> in the third (human) exposure is compared to <italic>staitrait</italic> in the first or second exposure, is given in <xref ref-type="sec" rid="s11">Supplementary Table&#x20;S3</xref>.</p>
<p>The mean <italic>staitrait</italic> is 21.4&#x20;&#xb1; 7.0 (S.D.) and the median is 21. In addition to examining the relationship between the change in STAI state (<italic>dstai</italic>) and this covariate, we can consider what happens at its mean. <xref ref-type="fig" rid="F4">Figure&#x20;4</xref> shows the posterior distributions for the predicted <italic>dstai</italic> for each of the Cartoon, Human and 3PP conditions. It can be seen that the distributions reflect <xref ref-type="fig" rid="F3">Figure&#x20;3D</xref>. From these distributions we can compute the posterior probabilities of, for example, <italic>dstai</italic> &#x3c; &#x2212;10, and <italic>dstai</italic> &#x3c; &#x2212;5, and the two corresponding vertical lines are shown in <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>, and the probabilities we require are the areas to the left of those lines under the curves.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Posterior distributions of <italic>dstai</italic> at the mean level of <italic>staitrait</italic> for the Cartoon, Human and 3PP conditions.</p>
</caption>
<graphic xlink:href="frvir-02-695673-g004.tif"/>
</fig>
<p>The probabilities are shown in <xref ref-type="table" rid="T5">Table&#x20;5</xref>. A decrease of five in <italic>dstai</italic> has probability almost double for the Cartoon condition compared to the Human, and more than double in the case of the 3PP condition. For a decrease of 10 the Cartoon condition has a probability of 10&#x20;times the Human condition, and the 3PP condition more than 30&#x20;times greater. Hence although considered overall the 3PP condition <italic>dstai</italic> does not change much with <italic>staitrait</italic> and the Cartoon condition is proportional to it, the model predicts that for a participant with the average trait anxiety the 3PP condition appears to be the one that reduces anxiety the&#x20;most.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Posterior probabilities of the change in dstai being less than &#x2212;5 or &#x2212;10 at the mean level of staitrait.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Probability</th>
<th align="center">Cartoon</th>
<th align="center">Human</th>
<th align="center">3PP</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<inline-formula id="inf48">
<mml:math id="m52">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>5</mml:mn>
<mml:mtext>&#x7c;</mml:mtext>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="char" char=".">0.848</td>
<td align="char" char=".">0.445</td>
<td align="char" char=".">0.974</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf49">
<mml:math id="m53">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>10</mml:mn>
<mml:mtext>&#x7c;</mml:mtext>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="char" char=".">0.090</td>
<td align="char" char=".">0.009</td>
<td align="char" char=".">0.282</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-4">
<title>Goodness of Fit of the Model</title>
<p>Using the Stan program 4000 pseudo random observations were generated from the model, leading to posterior predicted distributions of the two response variables for each individual. We take the mean of each of these distributions per individual as a point estimate for the predicted value so that for each individual we obtain predicted values of the two response variables. The correlation between the observed and predicted values of <italic>own</italic> is r &#x3d; 0.79, with 95% confidence interval 0.66 to 0.88. For <italic>dstai</italic> the correlation is r &#x3d; 0.50, with 95% confidence interval 0.24 to 0.69. We quote confidence intervals here not for formal significance, but only to show the strength of the relationships. Hence, overall we conclude that the model fit to the data is acceptable.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>There are two findings of this study. The first is that the level of body ownership did not differ between embodiment in a cartoon character or as a human, and that the level of body ownership was high and comparable with previous results. In contrast the level of body ownership was lower for the 3PP condition. The second is that contrary to our original idea embodiment as the cartoon character, in the more humorous situation, did not result in a reduction of anxiety in relation to the background trait anxiety, but the change in state anxiety was proportional to the level of trait anxiety. However, in the case of human embodiment and audience the change in state anxiety was inversely related to trait anxiety. There was little or no effect of the 3PP condition, which means that irrespective of trait the change in state anxiety was essentially constant and small, with some evidence of a small decline. Further, a prediction of the model is that for the <italic>average</italic> level of trait anxiety the 3PP condition is associated with the greatest reduction in state anxiety.</p>
<p>In the remainder of this section we first discuss the findings in relation to body ownership, paying particular attention to embodiment of non-human characters. We then review studies of public speaking anxiety in VR, and move on to provide a possible explanation of our findings in relation to a well-known theoretical model of social anxiety. We conclude by pointing out some limitations of our study and future&#x20;work.</p>
<sec id="s4-1">
<title>Body Ownership</title>
<p>Although given the state of technology, all studies of embodiment in VR inevitably use characters that are not photorealistic, and could be described as &#x2018;cartoony&#x2019;, our study was different in the sense that the character was deliberately designed as a cartoon character, a bunny rabbit. Our question was whether this deliberately non-human character would lead to levels of body ownership we have seen in previous studies with embodiment as humans (e.g., (<xref ref-type="bibr" rid="B10">Banakou and Slater, 2014</xref>)). Our expectation was that this would be the case, since as discussed in the introduction the form of the virtual body does not seem to influence the level of body ownership, which is derived from multisensory integration rather than top down identification with the appearance of the body. However, all our previous studies have been with human characters, even if distorted by having a long arm or a tail, or being of the colour purple, or being a different age or&#x20;race.</p>
<p>There have been several studies with non-human characters. In (<xref ref-type="bibr" rid="B2">Ahn et&#x20;al., 2016</xref>) participants were embodied with a virtual cow body using 1PP and visuomotor synchrony (the cow body moved with the movements of the participant on all fours) and there was visuotactile synchrony (the cow body was prodded which was felt synchronously by the participant). The results showed that the level of body ownership was significantly higher than a condition where participants watched a video of the same events. However, the mean reported level of ownership was 2.57 on a five point scale, which is proportionally equivalent to 3.6 on a 7 point scale. In absolute terms this is much lower than the typical values we obtain (median at least 5, with the whole interquartile range above the mid-point of 4) as can be seen in <xref ref-type="fig" rid="F2">Figures 2A&#x2013;C</xref>, although the questionnaire used in the two cases overlapped but were different. In (<xref ref-type="bibr" rid="B35">Krekhov et&#x20;al., 2019</xref>) participants were embodied in several different types of animal body&#x2014;a bat, spider, tiger as well as human. Their equivalent scores for body ownership (&#x201c;acceptance&#x201d;) were on a scale from 0 to 6. Embodiment as the human had the lowest mean score (2.79 equivalent to 3.3 on a 7-point scale), the score for the bat was considerably higher (4.33) and for the spider 3.63. Again, the questionnaires overlapped with ours but were not the same, but the low score for human embodiment is unusual. This may be related to the fact that a measure of the degree of control over the virtual bodies was highest for the bat. This was a within-groups study so that participants were comparing the different experiences, and it is possible that factors such as novelty or excitement played a role in the different evaluations. In (<xref ref-type="bibr" rid="B16">Charbonneau et&#x20;al., 2017</xref>) participants were embodied in a giant Godzilla-like creature. Body ownership was not directly measured, but the point was to use this embodiment to improve gait while using a rehabilitation walking device. Since there was some evidence of gait improvement it is likely that there was an element of body ownership involved. In (<xref ref-type="bibr" rid="B5">Aymerich-Franch et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B3">Aymerich-Franch et&#x20;al., 2019</xref>) people were embodied in physical humanoid robots that they saw through a HMD mounted as eyes on the robot, and ownership scores were high and comparable those typical of VR embodiment studies.</p>
<p>We suggest the following summary. It is possible to obtain some level of body ownership in completely non-human characters, and when there is multisensory integration that provides evidence that the virtual body is the person&#x2019;s body, then there will be greater scores in that synchronous condition than in other control conditions. However, these are based on comparisons. What&#x2019;s equally important is not just that a synchronous multisensory condition results in higher scores than a control condition but that the absolute scores also are themselves greater than would be expected by chance. In other words if we obtain random results on a questionnaire that is on a 7 point scale, then the median result will be around 4. A high score in absolute terms should be clearly greater than this, and there is little evidence of this at the moment. However, if the virtual body is humanoid, upright, with a face and limbs approximating humans, then the absolute body ownership scores will be high in themselves not just in comparison with a non-synchronous condition. In (<xref ref-type="bibr" rid="B42">Osimo et&#x20;al., 2015</xref>; <xref ref-type="bibr" rid="B48">Slater et&#x20;al., 2019</xref>) participants were able to compare embodiment in a virtual body that closely resembled their own body, and embodiment in a much older body. Even though one of the virtual bodies looked like themselves still the body ownership scores were not different between these two conditions. In the present study we have a direct comparison between embodiment as a bunny rabbit and a human body, in a between groups situation so that participants did not know of the other conditions. Still, we found that the body ownership was high and the same across these conditions, but dropped greatly for the non-synchronous (3PP) condition. This lends weight to the hypothesis suggested&#x20;above.</p>
</sec>
<sec id="s4-2">
<title>Public Speaking Anxiety</title>
<p>Although in our experiment we did not find that the humorous situation (embodiment as a bunny rabbit with a cartoon audience) improved outcomes overall, our finding is in accord with large number of previous studies. In our case two embodiments as a human with a human audience led to a reduction of state anxiety in comparison with trait anxiety at the third session, supporting previous findings with respect to exposure therapy.</p>
<p>The first study of the efficacy of virtual reality for public speaking anxiety was reported in (<xref ref-type="bibr" rid="B41">North et&#x20;al., 1998</xref>). It exposed participants to an audience of about 100 in a large auditorium, and although the characters forming the audience were static they could be heard to speak and could ask questions. There were five sessions in an exposure therapy, and the control group had equivalent VR exposure, but unrelated to public speaking. It was found that the VR exposure therapy was successful in reducing public speaking anxiety compared to the control group. This approach is standard for the use of VR to help people with anxiety disorders, where the VR is used as a substitute for a real life experience. Logistically it is far easier for the clinician to expose people to the anxiety provoking situation in the office, in real-time with the clinician there, than to arrange real situations such as getting an audience together for multiple sessions, or to give the client &#x201c;homework&#x201d; which is carried out in the absence of the clinician.</p>
<p>There has been significant additional research over the past 3&#x2002;decades. In a meta-analysis of 30 randomised control trials that attempted to reduce fear of public speaking using a variety of methods (<xref ref-type="bibr" rid="B20">Ebrahimi et&#x20;al., 2019</xref>) it was found that there were no differences between outcomes that used face-to-face counseling and virtual reality. In the general area of social anxiety disorders a further study found that VR based therapy was effective in reducing anxiety, and in comparison with <italic>in vivo</italic> or exposure based on imagination again there was no difference in effect size (<xref ref-type="bibr" rid="B17">Chesham et&#x20;al., 2018</xref>). Overall a comprehensive meta-analysis of VR based psychology therapy found that it is effective, although studies are often small in size and not always RCTs (<xref ref-type="bibr" rid="B24">Freeman et&#x20;al., 2017</xref>).</p>
<p>By the time of the third talk, participants in the Human condition would have already given two previous talks, to the same virtual human audience and under the same conditions. Therefore, in accord with exposure therapy it is not surprising that their level of stress declined relative to their trait level of stress. However, those in the Cartoon condition had previously given two talks to the cartoon audience so that the third &#x201c;test&#x201d; scenario was the first time that they had experienced this Human audience. Since the humour idea was ineffective then the simpler explanation for the results is based on number of exposures.</p>
</sec>
<sec id="s4-3">
<title>The Cognitive Model of Social Phobia</title>
<p>Why did the cartoon idea not work in the sense that the change in state anxiety simply reflected trait anxiety? Our original idea was that the humour of the situation would allow participants to speak without anxiety to an audience, and thereby learn that this is possible, with this learning carrying over to later talks in front of a human audience. In the cognitive model of social phobia by Clark and Wells (<xref ref-type="bibr" rid="B19">Clark et&#x20;al., 1995</xref>) one of the factors is self-focussed attention and the accentuation of negative thoughts about the self especially with respect to the notion of supposed negative evaluation from others. In that case if a person with social phobia had to talk in front of an audience but as someone else we should expect that their anxiety would be reduced, which is what we expected for the Cartoon condition. In the study reported in (<xref ref-type="bibr" rid="B4">Aymerich-Franch et&#x20;al., 2014</xref>) participants gave a speech in front of a human virtual audience embodied in a human virtual body with a face that was their own likeness or the face of another. In a pre-exposure test participants indicated preference for the face that was unlike their own. However, the exposure results showed that there was at best a marginal reduction of anxiety for those with the dissimilar&#x20;face.</p>
<p>However, we did not take into account the possibility that even in the cartoon situation participants might still interpret the audience as responding negatively. In the Clark and Wells model social phobia sufferers, to the extent that they process external cues rather than be internally focussed, would be likely to interpret such cues as negative: &#x201c;In particular, they may be more likely to notice and remember responses from others that they interpret as signs of disapproval&#x201d; and that this would be particularly pointed in public speaking (<xref ref-type="bibr" rid="B18">Clark, 2001</xref>). In the Cartoon condition the cartoon audience, since it was so strange, would be particularly salient. However, for people with strong social phobia there would be no reason why they would not interpret the responses of the audience as negative, even seemingly positive events such as clapping being interpreted as negative (e.g., &#x201c;They are only clapping because they feel sorry for me&#x201d;).</p>
<p>Our results suggest that at the <italic>average</italic> level of trait anxiety the 3PP condition proved to be the one that had the greatest probability of reducing anxiety. This fits the Clark and Wells model since the 3PP condition was the one where they saw themselves from the outside, and thus had the maximum psychological distance from themselves as speaker. This accords well with self-distancing theory (<xref ref-type="bibr" rid="B36">Kross and Ayduk, 2017</xref>) where people recall an event that caused anxiety from a third person perspective as a &#x201c;fly on the wall&#x201d; rather than from an embodied first person perspective. Participants are instructed when recalling an affectively negative past event: &#x201c;Now take a few steps back. Move away from the situation to a point where you can now watch the event unfold from a distance and see yourself in the event.&#x201d; Research on self-distancing theory shows that this leads to a reduction of negative affect. Participants answered the questionnaire after the event itself, so it is possible that their disembodied third-person experience resulted in less stress. However, this finding about the average level of trait anxiety is an inference from the posterior statistical model and would need to be verified with a further experimental&#x20;study.</p>
</sec>
<sec id="s4-4">
<title>Limitations</title>
<p>The first limitation of this study is that the sample consisted only of women, and it remains to be seen if these results would generalise to other genders. Second, the sample sizes were relatively small, however, the posterior distributions were clearly dominated by the data, as evidenced by the narrow and focussed posterior credible intervals compared to the prior intervals. Third, it would be possible to extend the experimental design to two factors: type of embodiment (Cartoon, Human, 3PP) and type of audience (Cartoon, Human). This would be interesting further work to elicit how much the results were due to the embodiment and the audience, the design being able to separate these two factors.</p>
<p>Although we did not find any advantage for the Cartoon condition in this application to fear of public speaking, it is possible that it may be beneficial in other psychological conditions. The role of humour in promoting mental and physical health is well-known&#x2014;e.g., (<xref ref-type="bibr" rid="B26">Gelkopf and Kreitler, 1996</xref>)&#x2014;and has in particular been studied in relation to overcoming depression (<xref ref-type="bibr" rid="B54">Tagalidou et&#x20;al., 2019</xref>). This could be a useful line of further research.</p>
</sec>
</sec>
</body>
<back>
<sec id="s5">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s11">Supplementary Material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s6">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by This experiment was approved by the Comisi&#xf3;n de Bio&#xe9;tica de la Universitat de Barcelona (IRB00003099). The patients/participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec id="s7">
<title>Author Contributions</title>
<p>AB designed and implemented the virtual reality scenario, carried out the experiment and compiled the data. XN contributed to the implementation of the virtual reality scenario. DB and RO contributed to the design and implementation of the experiment. VO contributed to the design and implementation of the characters. MS formulated the original concept, designed the experiment, carried out the analysis, wrote the first draft of the paper and obtained the funding. All authors contributed to a review of the draft&#x20;paper.</p>
</sec>
<sec id="s8">
<title>Funding</title>
<p>This research was originally funded under the European Seventh Framework Program, Future and Emerging Technologies (FET), Project Virtual Embodiment and Robotic Re-Embodiment (VERE) Grant Agreement Number 257695, and completed under the ERC Advanced Grant MoTIVE 742989.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors, and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ack>
<p>The authors would like to thank Sofia Seinfeld for helping with the experiments, and Xenxo &#xc1;lvarez for helping with the cartoon avatars.</p>
</ack>
<sec id="s11">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frvir.2021.695673/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frvir.2021.695673/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Video1.MOV" id="SM1" mimetype="application/MOV" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Table1.DOCX" id="SM2" mimetype="application/DOCX" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="DataSheet1.XLSX" id="SM3" mimetype="application/XLSX" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abu&#xed;n</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>Rivera</surname>
<given-names>L. d.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>La medici&#xf3;n de s&#xed;ntomas psicol&#xf3;gicos y psicosom&#xe1;ticos: el Listado de S&#xed;ntomas Breve (LSB-50)</article-title>. <source>Cl&#xed;nica y Salud</source> <volume>25</volume> (<issue>2</issue>), <fpage>131</fpage>&#x2013;<lpage>141</lpage>. <pub-id pub-id-type="doi">10.1016/j.clysa.2014.06.001</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ahn</surname>
<given-names>S. J.&#x20;G.</given-names>
</name>
<name>
<surname>Bostick</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ogle</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Nowak</surname>
<given-names>K. L.</given-names>
</name>
<name>
<surname>McGillicuddy</surname>
<given-names>K. T.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.&#x20;N.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Experiencing Nature: Embodying Animals in Immersive Virtual Environments Increases Inclusion of Nature in Self and Involvement with Nature</article-title>. <source>J.&#x20;Comput-mediat Comm.</source> <volume>21</volume> (<issue>6</issue>), <fpage>399</fpage>&#x2013;<lpage>419</lpage>. <pub-id pub-id-type="doi">10.1111/jcc4.12173</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aymerich-Franch</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kishore</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>When Your Robot Avatar Misbehaves You Are Likely to Apologize: an Exploration of Guilt during Robot Embodiment</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>12</volume>, <fpage>217</fpage>&#x2013;<lpage>226</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00556-5</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aymerich-Franch</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kizilcec</surname>
<given-names>R. F.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.&#x20;N.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>The Relationship between Virtual Self Similarity and Social Anxiety</article-title>. <source>Front. Hum. Neurosci.</source> <volume>8</volume>, <fpage>944</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2014.00944</pub-id> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aymerich-Franch</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Petit</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ganesh</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kheddar</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Non-human Looking Robot Arms Induce Illusion of Embodiment</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>9</volume> (<issue>4</issue>), <fpage>479</fpage>&#x2013;<lpage>490</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-017-0397-8</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ayres</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1990</year>). <article-title>Situational Factors and Audience Anxiety</article-title>. <source>Commun. Educ.</source> <volume>39</volume> (<issue>4</issue>), <fpage>283</fpage>&#x2013;<lpage>291</lpage>. <pub-id pub-id-type="doi">10.1080/03634529009378810</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kishore</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Virtually Being Einstein Results in an Improvement in Cognitive Task Performance and a Decrease in Age Bias</article-title>. <source>Front. Psychol.</source> <volume>9</volume> (<issue>917</issue>), <fpage>917</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2018.00917</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Groten</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Illusory Ownership of a Virtual Child Body Causes Overestimation of Object Sizes and Implicit Attitude Changes</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>110</volume>, <fpage>12846</fpage>&#x2013;<lpage>12851</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1306779110</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Hanumanthu</surname>
<given-names>P. D.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Virtual Embodiment of White People in a Black Virtual Body Leads to a Sustained Reduction in Their Implicit Racial Bias</article-title>. <source>Front. Hum. Neurosci.</source> <volume>10</volume>, <fpage>601</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2016.00601</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Body Ownership Causes Illusory Self-Attribution of Speaking and Influences Subsequent Real Speaking</article-title>. <source>Proc. Natl. Acad. Sci. USA</source> <volume>111</volume> (<issue>49</issue>), <fpage>17678</fpage>&#x2013;<lpage>17683</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1414936111</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barberia</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Oliva</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Bourdin</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Virtual Mortality and Near-Death Experience after a Prolonged Exposure in a Shared Virtual Reality May lead to Positive Life-Attitude Changes</article-title>. <source>PloS one</source> <volume>13</volume> (<issue>11</issue>), <fpage>e0203358</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0203358</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bedder</surname>
<given-names>R. L.</given-names>
</name>
<name>
<surname>Bush</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Peck</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Burgess</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A Mechanistic Account of Bodily Resonance and Implicit Bias</article-title>. <source>Cognition</source> <volume>184</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2018.11.010</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blanke</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Serino</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Behavioral, Neural, and Computational Principles of Bodily Self-Consciousness</article-title>. <source>Neuron</source> <volume>88</volume> (<issue>1</issue>), <fpage>145</fpage>&#x2013;<lpage>166</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2015.09.029</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Botvinick</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cohen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Rubber Hands &#x27;feel&#x27; Touch that Eyes See</article-title>. <source>Nature</source> <volume>391</volume> (<issue>6669</issue>), <fpage>756</fpage>. <pub-id pub-id-type="doi">10.1038/35784</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carpenter</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Gelman</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hoffman</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Goodrich</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Betancourt</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Stan: A Probabilistic Programming Language</article-title>. <source>J.&#x20;Stat. Softw.</source> <volume>76</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v076.i01</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Charbonneau</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Dallaire-C&#xf4;t&#xe9;</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>C&#xf4;t&#xe9;</surname>
<given-names>S. S-P.</given-names>
</name>
<name>
<surname>Labbe</surname>
<given-names>D. R.</given-names>
</name>
<name>
<surname>Mezghani</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Shahnewaz</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Gaitzilla: Exploring the Effect of Embodying a Giant Monster on Lower Limb Kinematics and Time Perception</article-title>,&#x201d; in <conf-name>2017 International Conference on Virtual Rehabilitation (ICVR)</conf-name> (<publisher-name>IEEE</publisher-name>). <pub-id pub-id-type="doi">10.1109/icvr.2017.8007535</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chesham</surname>
<given-names>R. K.</given-names>
</name>
<name>
<surname>Malouff</surname>
<given-names>J.&#x20;M.</given-names>
</name>
<name>
<surname>Schutte</surname>
<given-names>N. S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Meta-analysis of the Efficacy of Virtual Reality Exposure Therapy for Social Anxiety</article-title>. <source>Behav. Change</source> <volume>35</volume> (<issue>3</issue>), <fpage>152</fpage>&#x2013;<lpage>166</lpage>. <pub-id pub-id-type="doi">10.1017/bec.2018.15</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>D. M.</given-names>
</name>
</person-group> (<year>2001</year>). &#x201c;<article-title>A Cognitive Perspective on Social Phobia</article-title>,&#x201d; in <source>International Handbook of Social Anxiety: Concepts, Research and Interventions Relating to the Self and Shyness</source>, <fpage>405&#xf1;30</fpage>. </citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>Wells</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>1995</year>). &#x201c;<article-title>A Cognitive Model of Social Phobia</article-title>,&#x201d; in <source>Social Pobia: Diagnosis, Assessment, and Treatment</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Heimberg</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Liebowitz</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Hope</surname>
<given-names>D A.</given-names>
</name>
<name>
<surname>Schneier</surname>
<given-names>R.</given-names>
</name>
</person-group> (<publisher-loc>New York</publisher-loc>: <publisher-name>Guilford Press</publisher-name>), <fpage>69</fpage>&#x2013;<lpage>93</lpage>. </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ebrahimi</surname>
<given-names>O. V.</given-names>
</name>
<name>
<surname>Pallesen</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kenter</surname>
<given-names>R. M. F.</given-names>
</name>
<name>
<surname>Nordgreen</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Psychological Interventions for the Fear of Public Speaking: a Meta-Analysis</article-title>. <source>Front. Psychol.</source> <volume>10</volume>, <fpage>488</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2019.00488</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>The Concept of Body Ownership and its Relation to Multisensory Integration</article-title>,&#x201d; in <source>The New Handbook of Multisensory Processes</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Stein</surname>
<given-names>B E</given-names>
</name>
</person-group> (<publisher-loc>Cambridge, MA, USA</publisher-loc>: <publisher-name>MIT Press</publisher-name>), <fpage>775</fpage>&#x2013;<lpage>792</lpage>. </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
<name>
<surname>Spence</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Passingham</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>That&#x27;s My Hand! Activity in Premotor Cortex Reflects Feeling of Ownership of a Limb</article-title>. <source>Science</source> <volume>305</volume>, <fpage>875</fpage>&#x2013;<lpage>877</lpage>. <pub-id pub-id-type="doi">10.1126/science.1097011</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferreira Marinho</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>Mesquita de Medeiros</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>C&#xf4;rtes Gama</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>Caldas Teixeira</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Fear of Public Speaking: Perception of College Students and Correlates</article-title>. <source>J.&#x20;Voice</source> <volume>31</volume> (<issue>1</issue>), <fpage>127</fpage>&#x2013;<lpage>e11</lpage>. <comment>e7-e11</comment>. <pub-id pub-id-type="doi">10.1016/j.jvoice.2015.12.012</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Freeman</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Reeve</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Robinson</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ehlers</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Clark</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Virtual Reality in the Assessment, Understanding, and Treatment of Mental Health Disorders</article-title>. <source>Psychol. Med.</source> <volume>47</volume>, <fpage>2393</fpage>&#x2013;<lpage>2400</lpage>. <pub-id pub-id-type="doi">10.1017/S003329171700040X</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gallego</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Botella</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Quero</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Garcia-Palacios</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ba&#xf1;os</surname>
<given-names>R. M.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Validation of the Personal Report Confidence as Speaker in a Spanish Clinical Sample</article-title>. <source>Behav. Psychol.</source> <volume>17</volume> (<issue>3</issue>), <fpage>413</fpage>&#x2013;<lpage>431</lpage>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.behavioralpsycho.com/producto/validacion-del-cuestionario-de-confianza-para-hablar-en-publico-en-una-muestra-clinica-espanola/">https://www.behavioralpsycho.com/producto/validacion-del-cuestionario-de-confianza-para-hablar-en-publico-en-una-muestra-clinica-espanola/</ext-link>
</comment> </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gelkopf</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kreitler</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Is Humor Only Fun, an Alternative Cure or Magic? the Cognitive Therapeutic Potential of Humor</article-title>. <source>J.&#x20;Cogn. Psychother</source> <volume>10</volume> (<issue>4</issue>), <fpage>235</fpage>&#x2013;<lpage>254</lpage>. <pub-id pub-id-type="doi">10.1891/0889-8391.10.4.235</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonzalez-Franco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ofek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Antley</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>The Rocketbox Library and the Utility of Freely Available Rigged Avatars</article-title>. <source>Front. Virtual Real.</source> <volume>1</volume>, <fpage>561558</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2020.561558</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guterstam</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gentile</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>The Invisible Hand Illusion: Multisensory Integration Leads to the Embodiment of a Discrete Volume of Empty Space</article-title>. <source>J.&#x20;Cogn. Neurosci.</source> <volume>25</volume> (<issue>7</issue>), <fpage>1078</fpage>&#x2013;<lpage>1099</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_00393</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guterstam</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Petkova</surname>
<given-names>V. I.</given-names>
</name>
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>The Illusion of Owning a Third Arm</article-title>. <source>PloS one</source> <volume>6</volume> (<issue>2</issue>), <fpage>e17208</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0017208</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hoyet</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Argelaguet</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Nicole</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>L&#xe9;cuyer</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>"Wow! I Have Six Fingers!": Would You Accept Structural Changes of Your Hand in VR?</article-title> <source>Front. Robot. AI</source> <volume>3</volume>, <fpage>27</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2016.00027</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jackson</surname>
<given-names>J.&#x20;M.</given-names>
</name>
<name>
<surname>Latan&#xe9;</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>All Alone in Front of All Those People: Stage Fright as a Function of Number and Type of Co-performers and Audience</article-title>. <source>J.&#x20;Personal. Soc. Psychol.</source> <volume>40</volume> (<issue>1</issue>), <fpage>73</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.40.1.73</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Jones</surname>
<given-names>J.&#x20;A.</given-names>
</name>
<name>
<surname>Swan</surname>
<given-names>J.&#x20;E.</given-names>
</name>
<name>
<surname>Singh</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kolstad</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ellis</surname>
<given-names>S. R.</given-names>
</name>
</person-group> (<year>2008</year>). &#x201c;<article-title>The Effects of Virtual Reality, Augmented Reality, and Motion Parallax on Egocentric Depth Perception</article-title>,&#x201d; in <conf-name>Proceedings of the 5th Symposium on Applied Perception in Graphics and Visualization</conf-name> (<publisher-loc>Los Angeles, CA, USA</publisher-loc>: <publisher-name>ACM</publisher-name>). </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Groten</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>The Sense of Embodiment in Virtual Reality</article-title>. <source>Presence: Teleoperators and Virtual Environments</source> <volume>21</volume>, <fpage>373</fpage>&#x2013;<lpage>387</lpage>. <pub-id pub-id-type="doi">10.1162/pres_a_00124</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilteni</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Normand</surname>
<given-names>J.-M.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Extending Body Space in Immersive Virtual Reality: A Very Long Arm Illusion</article-title>. <source>PLoS ONE</source> <volume>7</volume>, <fpage>e40867</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0040867</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Krekhov</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cmentowski</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kr&#xfc;ger</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>The Illusion of Animal Body Ownership and its Potential for Virtual Reality Games</article-title>,&#x201d; in <conf-name>2019 IEEE Conference on Games (CoG)</conf-name> (<publisher-name>IEEE</publisher-name>). <pub-id pub-id-type="doi">10.1109/cig.2019.8848005</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kross</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ayduk</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Self-Distancing</article-title>,&#x201d; in <source>Advances in Experimental Social Psychology</source> (<publisher-name>Elsevier</publisher-name>), <fpage>81</fpage>&#x2013;<lpage>136</lpage>. <pub-id pub-id-type="doi">10.1016/bs.aesp.2016.10.002</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kruschke</surname>
<given-names>J.&#x20;K.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Introduction to Special Section on Bayesian Data Analysis</article-title>. <source>Perspect. Psychol. Sci.</source> <volume>6</volume> (<issue>3</issue>), <fpage>272</fpage>&#x2013;<lpage>273</lpage>. <pub-id pub-id-type="doi">10.1177/1745691611406926</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lemoine</surname>
<given-names>N. P.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Moving beyond Noninformative Priors: Why and How to Choose Weakly Informative Priors in Bayesian Analyses</article-title>. <source>Oikos</source> <volume>128</volume> (<issue>7</issue>), <fpage>912</fpage>&#x2013;<lpage>928</lpage>. <pub-id pub-id-type="doi">10.1111/oik.05985</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Lin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>J&#xf6;rg</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Need a Hand? How Appearance Affects the Virtual Hand Illusion</article-title>,&#x201d; in <conf-name>SAP '16: Proceedings of the ACM Symposium on Applied Perception</conf-name>, <conf-loc>Anaheim, CA</conf-loc>, <conf-date>July 22&#x2013;23, 2016</conf-date> (<publisher-loc>New York City, NY</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>69</fpage>&#x2013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1145/2931002.2931006</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Maister</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Tsakiris</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Changing Bodies Changes Minds: Owning Another Body Affects Social Cognition</article-title>. <source>Trends Cogn. Sci.</source> <volume>19</volume> (<issue>1</issue>), <fpage>6</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2014.11.001</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>North</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>North</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Coble</surname>
<given-names>J.&#x20;R.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Virtual Reality Therapy: an Effective Treatment for the Fear of Public Speaking</article-title>. <source>Ijvr</source> <volume>3</volume> (<issue>3</issue>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.20870/ijvr.1998.3.3.2625</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Osimo</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Pizarro</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Conversations between Self and Self as Sigmund Freud-A Virtual Body Ownership Paradigm for Self Counselling</article-title>. <source>Sci. Rep.</source> <volume>5</volume>, <fpage>13899</fpage>. <pub-id pub-id-type="doi">10.1038/srep13899</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Paul</surname>
<given-names>G. L.</given-names>
</name>
</person-group> (<year>1966</year>). <source>Insight vs. Desensitization in Psychotherapy: An experiment in Anxiety Reduction</source>. <publisher-name>Stanford University Press</publisher-name>. </citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peck</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Seinfeld</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Aglioti</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Putting Yourself in the Skin of a Black Avatar Reduces Implicit Racial Bias</article-title>. <source>Conscious. Cogn.</source> <volume>22</volume>, <fpage>779</fpage>&#x2013;<lpage>787</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2013.04.016</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Perez-Marcos</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Is My Hand Connected to My Body? the Impact of Body Continuity and Arm Alignment on the Virtual Hand Illusion</article-title>. <source>Cogn. Neurodyn</source> <volume>6</volume> (<issue>4</issue>), <fpage>295</fpage>&#x2013;<lpage>305</lpage>. <pub-id pub-id-type="doi">10.1007/s11571-011-9178-5</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pertaub</surname>
<given-names>D.-P.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Barker</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>An experiment on Public Speaking Anxiety in Response to Three Different Types of Virtual Audience</article-title>. <source>Presence: Teleoperators &#x26; Virtual Environments</source> <volume>11</volume> (<issue>1</issue>), <fpage>68</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1162/105474602317343668</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Seisdedos</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>1988</year>). <source>Adaptaci&#xf3;n Espa&#xf1;ola del STAI, Cuestionario de ansiedad estado-rasgo [Spanish adaptation of the STAI, State-Trait Anxiety Inventory]</source>. <publisher-loc>Madrid</publisher-loc>: <publisher-name>Tea Ediciones</publisher-name>. </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Neyret</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Johnston</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Iruretagoyena</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Crespo</surname>
<given-names>M. &#xc1;. d. l. C.</given-names>
</name>
<name>
<surname>Alab&#xe8;rnia-Segura</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>An Experimental Study of a Virtual Reality Counselling Paradigm Using Embodied Self-Dialogue</article-title>. <source>Sci. Rep.</source> <volume>9</volume> (<issue>1</issue>), <fpage>10903</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-46877-3</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Spanlang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Sanchez-Vives</surname>
<given-names>M. V.</given-names>
</name>
<name>
<surname>Blanke</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>First Person Experience of Body Transfer in Virtual Reality</article-title>. <source>PLOS ONE</source> <volume>5</volume> (<issue>5</issue>), <fpage>e10564</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0010564</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Spielberger</surname>
<given-names>C. D.</given-names>
</name>
</person-group> (<year>1983</year>). <source>Manual for the State-Trait Anxiety Inventory STAI (Form Y) (&#x201c;self-Evaluation Questionnaire&#x201d;)</source>. </citation>
</ref>
<ref id="B51">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Spielberger</surname>
<given-names>C. D.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>State&#x2010;Trait Anxiety Inventory</article-title>,&#x201d; in <source>The Corsini Encyclopedia of Psychology</source>, <fpage>1</fpage>. </citation>
</ref>
<ref id="B52">
<citation citation-type="book">
<collab>Stan Development Team</collab> (<year>2011-2019</year>). <source>Stan Modeling Language Users Guide and Reference Manual 2.25</source>. </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Steptoe</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Human Tails: Ownership and Control of Extended Humanoid Avatars</article-title>. <source>IEEE Trans. Vis. Comput. Graphics</source> <volume>19</volume>, <fpage>583</fpage>&#x2013;<lpage>590</lpage>. <pub-id pub-id-type="doi">10.1109/tvcg.2013.32</pub-id> </citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tagalidou</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Distlberger</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Loderer</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Laireiter</surname>
<given-names>A. R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Efficacy and Feasibility of a Humor Training for People Suffering from Depression, Anxiety, and Adjustment Disorder: a Randomized Controlled Trial</article-title>. <source>BMC psychiatry</source> <volume>19</volume> (<issue>1</issue>), <fpage>93</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1186/s12888-019-2075-x</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tajadura-Jim&#xe9;nez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Banakou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bianchi-Berthouze</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Embodiment in a Child-like Talking Virtual Body Influences Object Size Perception, Self-Identification, and Subsequent Real Speaking</article-title>. <source>Sci. Rep.</source> <volume>7</volume> (<issue>1</issue>), <fpage>9637</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-09497-3</pub-id> </citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tieri</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Tidoni</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pavone</surname>
<given-names>E. F.</given-names>
</name>
<name>
<surname>Aglioti</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Mere Observation of Body Discontinuity Affects Perceived Ownership and Vicarious agency over a Virtual Hand</article-title>. <source>Exp. Brain Res.</source> <volume>233</volume> (<issue>4</issue>), <fpage>1247</fpage>&#x2013;<lpage>1259</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-015-4202-3</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van De Schoot</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Winter</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Ryan</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Zondervan-Zwijnenburg</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Depaoli</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>A Systematic Review of Bayesian Articles in Psychology: The Last 25&#x20;Years</article-title>. <source>Psychol. Methods</source> <volume>22</volume> (<issue>2</issue>), <fpage>217</fpage>&#x2013;<lpage>239</lpage>. <pub-id pub-id-type="doi">10.1037/met0000100</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>van der Hoort</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Guterstam</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ehrsson</surname>
<given-names>H. H.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Being Barbie: the Size of One&#x27;s Own Body Determines the Perceived Size of the World</article-title>. <source>PLoS ONE</source> <volume>6</volume>, <fpage>e20195</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0020195</pub-id> </citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vanni</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Conversano</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Del Debbio</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Landi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Carlini</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Fanciullacci</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). <article-title>A Survey on Virtual Environment Applications to Fear of Public Speaking</article-title>. <source>Eur. Rev. Med. Pharmacol. Sci.</source> <volume>17</volume> (<issue>12</issue>), <fpage>1561</fpage>&#x2013;<lpage>1568</lpage>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://europepmc.org/article/med/23832719">https://europepmc.org/article/med/23832719</ext-link>
</comment> </citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vehtari</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gelman</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gabry</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Practical Bayesian Model Evaluation Using Leave-One-Out Cross-Validation and WAIC</article-title>. <source>Stat. Comput.</source> <volume>27</volume> (<issue>5</issue>), <fpage>1413</fpage>&#x2013;<lpage>1432</lpage>. <pub-id pub-id-type="doi">10.1007/s11222-016-9696-4</pub-id> </citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Won</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lanier</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Homuncular Flexibility in Virtual Reality</article-title>. <source>J.&#x20;Comput-mediat Comm.</source> <volume>20</volume> (<issue>3</issue>), <fpage>241</fpage>&#x2013;<lpage>259</lpage>. <pub-id pub-id-type="doi">10.1111/jcc4.12107</pub-id> </citation>
</ref>
<ref id="B62">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Won</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.&#x20;N.</given-names>
</name>
<name>
<surname>Lanier</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Homuncular Flexibility: the Human Ability to Inhabit Nonhuman Avatars</article-title>,&#x201d; in <source>Emerging Trends in the Social and Behavioral Sciences: An Interdisciplinary, Searchable, and Linkable Resource</source>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1002/9781118900772.etrds0165</pub-id> </citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yee</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>The Proteus Effect: The Effect of Transformed Self-Representation on Behavior</article-title>. <source>Hum. Comm Res</source> <volume>33</volume>, <fpage>271</fpage>&#x2013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1111/j.1468-2958.2007.00299.x</pub-id> </citation>
</ref>
<ref id="B64">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Yuan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Steed</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>Is the Rubber Hand Illusion Induced by Immersive Virtual Reality?</article-title>&#x201d; in <conf-name>2010 IEEE Virtual Reality Conference (VR)</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>95</fpage>&#x2013;<lpage>102</lpage>. </citation>
</ref>
</ref-list>
</back>
</article>