<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">644529</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2021.644529</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>An Immersive Investment Game to Study Human-Robot Trust</article-title>
<alt-title alt-title-type="left-running-head">Z&#xf6;rner et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Immersive Investment Game for HRI</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Z&#xf6;rner</surname>
<given-names>Sebastian</given-names>
</name>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1096678/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Arts</surname>
<given-names>Emy</given-names>
</name>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1236963/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Vasiljevic</surname>
<given-names>Brenda</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Srivastava</surname>
<given-names>Ankit</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Schmalzl</surname>
<given-names>Florian</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mir</surname>
<given-names>Glareh</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Bhatia</surname>
<given-names>Kavish</given-names>
</name>
<uri xlink:href="https://loop.frontiersin.org/people/1178455/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Strahl</surname>
<given-names>Erik</given-names>
</name>
<uri xlink:href="https://loop.frontiersin.org/people/916439/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Peters</surname>
<given-names>Annika</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Alpay</surname>
<given-names>Tayfun</given-names>
</name>
<uri xlink:href="https://loop.frontiersin.org/people/1179659/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wermter</surname>
<given-names>Stefan</given-names>
</name>
<uri xlink:href="https://loop.frontiersin.org/people/21776/overview"/>
</contrib>
</contrib-group>
<aff>Knowledge Technology Group, Department of Informatics, Universit&#xe4;t Hamburg, <addr-line>Hamburg</addr-line>, <country>Germany</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/953010/overview">Mohammad Obaid</ext-link>, Chalmers University of Technology, Sweden</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/105752/overview">Francesco Rea</ext-link>, Italian Institute of Technology (IIT), Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1226351/overview">Michael Heron</ext-link>, Chalmers University of Technology, Sweden</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Sebastian Z&#xf6;rner, <email>sebastian.zoerner@informatik.uni-hamburg.de</email>
</corresp>
<fn fn-type="equal" id="fn1">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors have contributed equally to this work and share first authorship</p>
</fn>
<fn fn-type="other">
<p>This article was submitted to Human-Robot Interaction, a section of the journal Frontiers in Robotics and&#x20;AI</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>04</day>
<month>06</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>8</volume>
<elocation-id>644529</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>12</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>04</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Z&#xf6;rner, Arts, Vasiljevic, Srivastava, Schmalzl, Mir, Bhatia, Strahl, Peters, Alpay and Wermter.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Z&#xf6;rner, Arts, Vasiljevic, Srivastava, Schmalzl, Mir, Bhatia, Strahl, Peters, Alpay and Wermter</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>As robots become more advanced and capable, developing trust is an important factor of human-robot interaction and cooperation. However, as multiple environmental and social factors can influence trust, it is important to develop more elaborate scenarios and methods to measure human-robot trust. A widely used measurement of trust in social science is the <italic>investment game</italic>. In this study, we propose a scaled-up, immersive, science fiction Human-Robot Interaction (HRI) scenario for intrinsic motivation on human-robot collaboration, built upon the investment game and aimed at adapting the investment game for human-robot trust. For this purpose, we utilize two Neuro-Inspired COmpanion (NICO) - robots and a projected scenery. We investigate the applicability of our space mission experiment design to measure trust and the impact of non-verbal communication. We observe a correlation of 0.43 (<inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.02</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) between self-assessed trust and trust measured from the game, and a positive impact of non-verbal communication on trust (<inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.0008</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and robot perception for anthropomorphism (<inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.007</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and animacy (<inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.00002</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). We conclude that our scenario is an appropriate method to measure trust in human-robot interaction and also to study how non-verbal communication influences a human&#x2019;s trust in robots.</p>
</abstract>
<kwd-group>
<kwd>human-robot interaction</kwd>
<kwd>investment game</kwd>
<kwd>non-verbal communication</kwd>
<kwd>science fiction</kwd>
<kwd>human-robot trust</kwd>
</kwd-group>
<contract-sponsor id="cn001">Deutsche Forschungsgemeinschaft<named-content content-type="fundref-id">10.13039/501100001659</named-content>
</contract-sponsor>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>As robot capabilities become more and more sophisticated, we not only want them to solve increasingly complex tasks independently but ultimately aid humans in their day-to-day life. Moreover, such social robots should act in a way that is reliable, transparent, and builds trust in their capabilities as well as their intentions (<xref ref-type="bibr" rid="B24">Felzmann et&#x20;al., 2019</xref>). As soon as humans and robots autonomously work in a team on collaborative tasks, trust becomes essential for effective human-robot interaction (<xref ref-type="bibr" rid="B14">Casper and Murphy, 2003</xref>). This shows the need for a deeper understanding of what makes us willing to cooperate with robots and which factors enhance or destroy trust during interactions.</p>
<p>We approach this topic by adopting the investment game by <xref ref-type="bibr" rid="B5">Berg et&#x20;al. (1995)</xref>, a widely used experiment to measure trust in <italic>human-human</italic> collaboration. In the investment game, trust is measured as the amount of money a person is willing to give to an anonymous counterpart, in the prospect of a future profit. While others have used it in an HRI setting, some report limitations and differences when applying it to human-robot collaboration (which we elaborate on in <xref ref-type="sec" rid="s2">Section 2</xref>). We, therefore, adapt the original investment game toward a persuasive HRI cooperative scenario by scaling up both the robotic agent as well as the environment. With scaling up we allude to the progression toward a human-like interaction: a realistic cooperative scenario as opposed to an abstract exchange of money. We do this by introducing a plausible currency for both humans as well as robotic agents, along with a weighted choice between two trustees, and removing the ability of the participant to make choices based on domain knowledge. The result is an HRI scenario, concealed as a futuristic, immersive spaceship adventure containing multiple rounds of the investment game for participants to develop intrinsic motivation to collaborate with robots.</p>
<p>In this scenario, we utilize two Neuro-Inspired COmpanion (NICO) humanoid robots by <xref ref-type="bibr" rid="B40">Kerzel et&#x20;al. (2020)</xref> to advise the participant who acts as a spaceship commander. A voice-controlled artificial intelligence system which we refer to by &#x201c;Wendigo&#x201d; guides the participant through the experiment, where a large curved projector screen with an interactive video feed simulates the inside of the ship&#x2019;s cockpit (see <xref ref-type="fig" rid="F1">Figure&#x20;1</xref>). The setup is fully autonomous, with automatic speech recognition, visual detection as well as dialogue management implemented as ROS (<xref ref-type="bibr" rid="B63">Quigley et&#x20;al., 2009</xref>) services, yet&#x20;allows the experimenter to intervene on necessity. During the scenario, participants encounter four similar challenges (navigation malfunctioning, impending asteroids, engine failures and leaks in the cooling system): after the problem is announced by the ship AI, the robotic advisers propose two diverging solutions. Subsequently, the participants are asked to make a choice by distributing the ship&#x2019;s energy resources between the two robots and themselves, which we evaluate as a quantitative measurement for&#x20;trust.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>The experimental setup. On the table there are three compartments, with the one in the front (closest to the participant) containing the total amount of 7 energy cells to distribute.</p>
</caption>
<graphic xlink:href="frobt-08-644529-g001.tif"/>
</fig>
<p>The immersive setup allows controlling the emergence, destruction, and reconstruction of trust in the robotic companions throughout the game. To improve the robot&#x2019;s image and to ensure an experience which results in a more human-like interaction, we add non-verbal cues to our robots such as eye gaze toward the participants, facial expressions and gestures (see <xref ref-type="sec" rid="s3-2-3">Section 3.2.3</xref> for details). Such features of non-verbal communication (NVC), generally defined as &#x201c;unspoken dialogue&#x201d; (<xref ref-type="bibr" rid="B12">Burgoon et&#x20;al., 2016</xref>), have previously been shown to account for over 60% of the meaning in communication for human interactions (<xref ref-type="bibr" rid="B72">Saunderson and Nejat, 2019</xref>), as they allow us to communicate mental states such as thoughts and feelings (<xref ref-type="bibr" rid="B1">Ambady and Weisbuch, 2010</xref>). They are also thought to play an important role in human-<italic>robot</italic> interaction, as the implicit, robotic, non-verbal communication improves the efficiency and transparency of the interaction, leading to a better cooperation between human subjects and robots (<xref ref-type="bibr" rid="B9">Breazeal et&#x20;al., 2005</xref>).</p>
<p>As non-verbal communication is essential to both human-human and human-robot trust (<xref ref-type="bibr" rid="B19">DeSteno et&#x20;al., 2012</xref>; <xref ref-type="bibr" rid="B12">Burgoon et&#x20;al., 2016</xref>), we strive to measure the effect of NVCs in our HRI scenario to assess how well it simulates a natural interaction. Therefore, we utilize our novel investment game scenario to investigate two research questions related to both evaluating trust as well as the impact of NVCs on trust:<list list-type="simple">
<list-item>
<p>1. Does our variant of the investment game provide a reliable measurement for human-robot trust?</p>
</list-item>
<list-item>
<p>2. Does non-verbal communication (NVC) affect human-robot trust positively?</p>
</list-item>
</list>
</p>
<p>After surveying the latest research on measuring trust in human-robot interaction and its shortcomings (Chapter 2) we describe our approach (Chapter 3) and introduce an empirical study to evaluate our hypotheses (Chapter 4). We discuss the results as well as the limitations of this study (Chapter 5) and conclude our findings (Chapter 6) with an outlook on further research.</p>
</sec>
<sec id="s2">
<title>2 Related Work</title>
<sec id="s2-1">
<title>2.1 Trust and the Investment Game</title>
<p>One of the biggest challenges in human-robot interaction is to develop a more natural relationship with robots. Previous research shows that people refrain from accepting, tolerating, and using robotic agents in everyday tasks, mainly because robots still appear like intruders (<xref ref-type="bibr" rid="B84">Zanatto, 2019</xref>). A survey by the institute DemoSCOPE (<inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1007</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) has found that while <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:mn>50</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> would accept information from a robot, only <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:mn>16</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> would be willing to work in a team with one (<xref ref-type="bibr" rid="B16">[Dataset] Statista, 2019</xref>). A considerable portion of the general population still fears robots and artificial intelligence, caused by a range of concerns about the negative impact on interpersonal relationships and potential job displacement (<xref ref-type="bibr" rid="B47">Liang and Lee, 2017</xref>; <xref ref-type="bibr" rid="B27">Gherhe&#x15f;, 2018</xref>).</p>
<p>This begs the question of what could aid in easing humans into collaboration with a robot. As robots become more advanced and take greater responsibility in social jobs such as in the education sector (<xref ref-type="bibr" rid="B42">Kubilinskiene et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B31">Hameed et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B59">Neumann, 2019</xref>) or healthcare industry (<xref ref-type="bibr" rid="B57">Mukai et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B48">Logan et&#x20;al., 2019</xref>), this requires humans to be able to trust them. Whereas human-human trust has been extensively studied, human-robot trust poses new and complex research challenges. According to <xref ref-type="bibr" rid="B65">Rempel et&#x20;al. (1985)</xref> in human-human trust we can distinguish <italic>cognitive trust</italic> - the willingness to rely on another person&#x2019;s competence and reliability - from <italic>affective trust</italic> - the confidence that the other person&#x2019;s actions are intrinsically motivated.</p>
<p>In both cases, the prediction and predictability of behavior are fundamental (<xref ref-type="bibr" rid="B82">Wortham and Theodorou, 2017</xref>). Constructs such as emotional empathy, shared attention, and mental perspective-taking are essential to understand, recognize, and predict human behavior, as well as adhere to people&#x2019;s expectations of appropriate behavior given circumstances (<xref ref-type="bibr" rid="B10">Breazeal et&#x20;al., 2008</xref>). The behavioral prediction is transferred when assessing human-robot trust (<xref ref-type="bibr" rid="B82">Wortham and Theodorou, 2017</xref>), as humans build a mental model, thus anthropomorphizing the machine. During the first encounter, humans tend to apply social norms to robots just as they do to humans (<xref ref-type="bibr" rid="B64">Rai and Diermeier, 2015</xref>). Cognitive trust is measured by assessing the robot&#x2019;s <italic>performance</italic> and affective trust by assessing a robot&#x2019;s <italic>motives</italic>. Prominent factors that influence cognitive trust in a robot are its task performance and characteristics (<xref ref-type="bibr" rid="B32">Hancock et&#x20;al., 2011</xref>; <xref ref-type="bibr" rid="B7">Bernotat et&#x20;al., 2019</xref>), the timing and magnitude of errors (<xref ref-type="bibr" rid="B66">Rossi et&#x20;al., 2017a</xref>; <xref ref-type="bibr" rid="B67">Rossi et&#x20;al., 2017b</xref>) and even physical appearance such as a gender-specific body shape (<xref ref-type="bibr" rid="B7">Bernotat et&#x20;al., 2019</xref>). In contrast to this however stands the &#x201c;uncanny valley&#x201d; phenomenon: when a robot exhibits aesthetic characteristics too similar to a human, this can negatively impact trust. (<xref ref-type="bibr" rid="B52">Mathur and Reichling, 2016</xref>).</p>
<p>To quantitatively measure human-human trust, previous work relies heavily on the <italic>investment game</italic> (also referred to as the <italic>trust game</italic>) (<xref ref-type="bibr" rid="B5">Berg et&#x20;al., 1995</xref>), an economic experiment derived from game theory. Berg et&#x20;al. introduced the investment game in 1995, where a subject (the trustor) invests money in a counterpart (the trustee). At the beginning of the experiment, the trustor is provided with a monetary resource amount <italic>r</italic>. They can then anonymously decide which fraction <italic>p</italic> of their monetary resource <italic>r</italic> they want to give to the trustee. This fraction is then multiplied by a predetermined factor to incentivize investment. The receiving person (trustee) is free to keep the whole of the increased amount or can opt to send a fraction <italic>q</italic> of the received sum back to the trustor, thereby reciprocating. Trust then is quantitatively measured as the amount of money invested by the trustor in the trustee.</p>
<p>As the investment game has been established to measure trust between humans, some researchers have also used it to empirically measure trust between humans and robots, to varying degrees of success. While most studies kept the original setup, some extended the environment toward a virtual reality setup (<xref ref-type="bibr" rid="B30">Hale et&#x20;al., 2018</xref>), settings with multiple robots (<xref ref-type="bibr" rid="B26">George et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B83">Zanatto et&#x20;al., 2020</xref>) or switched the roles so that the human becomes the trustee dependant on the robot&#x2019;s willingness to invest (<xref ref-type="bibr" rid="B73">Schniter et&#x20;al., 2020</xref>). Other variants such as the <italic>Give-Some Game</italic> slightly change the rules toward an economic analogue of the prisoner&#x2019;s dilemma (<xref ref-type="bibr" rid="B18">DeSteno et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B19">DeSteno et&#x20;al., 2012</xref>). In the original Investment Game, interaction among the trustor and trustee is intentionally prohibited. Designed as a double-blind procedure, neither the participant nor the experimenter knows which trustor is matched to which trustee. A different approach by <xref ref-type="bibr" rid="B28">Glaeser et&#x20;al. (2000)</xref> specifically fosters participants to get to know each other before the experiment, instead of the double-blind procedure originally proposed, thereby opening up possibilities to study the influence of social interaction on&#x20;trust.</p>
<p>As previously mentioned, in every social interaction involving trust, predictability is essential. This predictability is where non-verbal communication (NVC) plays a major role (<xref ref-type="bibr" rid="B19">DeSteno et&#x20;al., 2012</xref>): various studies show supportive evidence that implicit robotic non-verbal communication improves the efficiency and transparency of interaction (<xref ref-type="bibr" rid="B9">Breazeal et&#x20;al., 2005</xref>) and report increased measures of trustworthiness when displaying non-verbal cues. <xref ref-type="bibr" rid="B33">Haring et&#x20;al. (2013)</xref> measured the impact of proximity (physical distance) and character of the subject (trustor) on trust. <xref ref-type="bibr" rid="B19">DeSteno et&#x20;al. (2012)</xref> demonstrate that the accuracy of judging the trustworthiness of robotic partners is heightened when the trustee displays non-verbal cues while holding voice constant. Robotic arm gestures have been shown to reinforce anthropomorphism, liveliness and sympathy (<xref ref-type="bibr" rid="B70">Salem et&#x20;al., 2011</xref>; <xref ref-type="bibr" rid="B69">Salem et&#x20;al., 2013</xref>) - regardless of gesture congruency (<xref ref-type="bibr" rid="B72">Saunderson and Nejat, 2019</xref>). In fact, a lack of social cues of a robot may cause the participant to employ unwanted <italic>testing</italic> behavior where they try to outwit the machine (<xref ref-type="bibr" rid="B55">Mota et&#x20;al., 2016</xref>).</p>
<p>A lot of research has gone into the study of non-verbal communication <italic>via</italic> the investment game in human-agent interaction (<xref ref-type="bibr" rid="B21">Duffy, 2008</xref>; <xref ref-type="bibr" rid="B33">Haring et&#x20;al., 2013</xref>; <xref ref-type="bibr" rid="B55">Mota et&#x20;al., 2016</xref>; <xref ref-type="bibr" rid="B30">Hale et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B84">Zanatto, 2019</xref>; <xref ref-type="bibr" rid="B83">Zanatto et&#x20;al., 2020</xref>). However, only a few of them have used robots that can be considered anthropomorphic and humanoid, which leaves doubt to whether the trust measured is comparable to human-human trust. How much people invest in the investment game may in fact reflect a mixture of the generalized trust (a stable individual characteristic) and their specific trust toward the trustee (<xref ref-type="bibr" rid="B30">Hale et&#x20;al., 2018</xref>), thus suggesting a different scenario setup to measure specific trust separately. It also remains questionable to what extent humans perceive money as valuable currency for robotic agents.</p>
<p>To the best of our knowledge, there has not yet been any research definitively confirming whether the investment game is indeed suitable for measuring human-robot trust. While it is a valid, established trust measuring experiment, the original version lacks certain features to make it suitable for a human-robot interaction scenario: a plausible currency for both humans as well as robotic agents and a human-like interaction without the possibility to make choices based on domain knowledge. The current work addresses this gap and aims to create a scenario that provides these features under which trust in robots can be built and destroyed, in order to clearly measure the correlation between the trust experienced by a human, and the trust that is displayed in the trust&#x20;game.</p>
</sec>
<sec id="s2-2">
<title>2.2 Study Design in the Context of Game Design</title>
<p>To keep participants engaged and immersed in a study that is built around a game or scenario with gamification elements, it is important to consider generally established guidelines for the design of game mechanics and the overall gameplay. In game design, the Mechanics-Dynamics-Aesthetics [MDA; <xref ref-type="bibr" rid="B36">Hunicke et&#x20;al. (2004)</xref>] framework is often used to break down a player&#x2019;s gameplay experience into three components: the formal rules of a game (mechanics), how they react to player input (dynamics), and the player&#x2019;s emotional experience of the game (aesthetics). From a design perspective, a game&#x2019;s mechanics determine its dynamics, which generate the aesthetics experienced by the player.</p>
<p>Consequently, careful design of game mechanics is critical in eliciting specific responses from the player. According to <xref ref-type="bibr" rid="B23">Fabricatore (2007)</xref>, minimizing the learning time required to master core game mechanics is an essential guideline for successful design. This is particularly important in user studies where the amount of time spent in the game is limited. Additional important guidelines are limiting the number of core mechanics, making them simple to learn, and keeping them relevant throughout most of the&#x20;game.</p>
<p>For the purpose of collecting data from a scientific study, it is desirable to limit the possibilities of experiencing different narratives and events between different players to be able to infer that different gameplay experiences are solely a result of different subjective experiences. This can be particularly important to control for confounding variables in small to medium-scale sample sizes (<xref ref-type="bibr" rid="B68">Saint-Mont, 2015</xref>). At the same time, the player&#x2019;s choice has to feel meaningful such that their actions have consequences (<xref ref-type="bibr" rid="B79">Stang, 2019</xref>). Therefore, the ideal game design requires a balance between a player&#x2019;s need to influence the game&#x2019;s environment and a study designer&#x2019;s need to limit the set of game states and player actions for the purpose of drawing conclusions.</p>
<p>One important approach for achieving this balance is to provide an <italic>illusion</italic> of choice (<xref ref-type="bibr" rid="B25">Fendt et&#x20;al., 2012</xref>) within a set of predetermined outcomes that are nevertheless dependent on the user&#x2019;s actions. The success of this approach is tied to the well-studied illusion of control, first described by <xref ref-type="bibr" rid="B45">Langer (1975)</xref> as people&#x2019;s tendency to overestimate their ability to control outside events.</p>
<p>Another important factor for player engagement is the reward design (<xref ref-type="bibr" rid="B37">Jakobsson et&#x20;al., 2011</xref>). According to <xref ref-type="bibr" rid="B81">Wang and Sun (2011)</xref>, well-designed reward systems offer positive experiences: balance between challenge and skill, clear goals, and immediate feedback. Clear goals and immediate feedback are especially important for comparability to the original investment game in this case, as these are shared characteristics. Reward is the primary driver in how the player progresses the game and how resources are shared in multi-agent games. Reward is often tied to a currency or item and the perceived value is its impact on the reward or the advantage it provides to progress in the&#x20;game.</p>
<p>These aspects, i.e. the chosen reward system, set of available actions, perceived control over choices, and easy-to-follow rules can contribute to the overall <italic>immersion</italic> that a player feels. Immersion plays a key role in the design of our experiment as it fosters a more natural-like human-robot interaction. <xref ref-type="bibr" rid="B58">Murray (1997)</xref> defines immersion as a metaphorical term derived from the physical experience of being submerged in water: &#x201c;the sensation of being surrounded by a completely other reality [&#x2026;] that takes over all of our attention, our whole perceptual apparatus.&#x201d; Such a cognitive state of involvement can span across multiple forms of media such as digital games, films, books or pen-and-paper role-playing games (<xref ref-type="bibr" rid="B13">Cairns et&#x20;al., 2014</xref>). Massively multiplayer online role-playing game (MMORPG) fantasy games are known to immerse the player, as they can engage in real-time communication, role-play, and character customization (<xref ref-type="bibr" rid="B61">Peterson, 2010</xref>).</p>
<p>
<xref ref-type="bibr" rid="B76">Slater and Wilbur (1997)</xref> and <xref ref-type="bibr" rid="B15">Cummings and Bailenson (2016)</xref>, however, distinguish presence - the subjective psychological experience of &#x201c;being there&#x201d; - from immersion as an objective characteristic of a technology: <xref ref-type="bibr" rid="B76">Slater and Wilbur (1997)</xref> propose to assess immersion as a system&#x2019;s ability to create a vivid illusion of reality to the senses of a human participant. Presence then is the state of submerged consciousness that may be induced by immersion. By looking at immersion as a property of the (virtual) environment one can measure its influencing factors. <xref ref-type="bibr" rid="B15">Cummings and Bailenson (2016)</xref> summarize that immersion can be achieved by:<list list-type="simple">
<list-item>
<p>1. high-fidelity simulations through multiple sensory modalities</p>
</list-item>
<list-item>
<p>2. mapping a participant&#x2019;s physical actions to their virtual counterparts</p>
</list-item>
<list-item>
<p>3. removing the participant from the external world through self-contained plots and narratives.</p>
</list-item>
</list>
</p>
<p>Such properties then let participants become psychologically engaged in the virtual task at hand rather than having to deal with the input mechanisms themselves <xref ref-type="bibr" rid="B15">Cummings and Bailenson (2016)</xref>.</p>
<p>In our experiment, we provide a high fidelity simulation through visual and auditory sensory modalities by the use of curved screen projections, dry ice fog upon entrance, and surround sound audio. We map the participant&#x2019;s physical actions to their virtual counterparts&#x2019; by providing a tangible currency consisting of cubes that are physically moved to represent energy distribution. Lastly, the participant is removed from the external world through self-contained plots and narrative drawn from science fiction.</p>
<p>Science fiction is used to further enhance immersion as it is known to have a positive impact on engagement (<xref ref-type="bibr" rid="B56">Mubin et&#x20;al., 2016</xref>). The more immersive the system, the more likely individuals feel present in the environment, thereby letting the virtual setting dominate over physical reality in determining their responses (<xref ref-type="bibr" rid="B15">Cummings and Bailenson, 2016</xref>). An example would be a jump scare reaction during a horror movie, or when being ambushed while playing a first-person shooter.</p>
<p>Put differently: the greater the degree of immersion, the greater the chance that participants will behave as they do in similar circumstances of everyday reality (<xref ref-type="bibr" rid="B76">Slater and Wilbur, 1997</xref>). This concept of <italic>presence as realism</italic> however has two aspects that need to be distinguished: <italic>social</italic> and <italic>perceptual</italic> realism. According to <xref ref-type="bibr" rid="B49">Lombard and Ditton (1997)</xref>, <italic>social realism</italic> is the extent to which a media portrayal is plausible in that it reflects events that could occur. As an example, characters and events in animated series may reflect high social realism but - because they are not &#x201c;photorealistic&#x201d; - low perceptual realism. A scene from a science fiction program, on the other hand, may be low in social realism but high in perceptual realism, i.e. although the events portrayed are unlikely, objects and characters in the program look and sound as one would expect if they did in fact exist (<xref ref-type="bibr" rid="B50">Lombard et&#x20;al., 2009</xref>).</p>
<p>We strive to minimize social realism to prohibit that participants draw from past experience while retaining high perceptual realism to psychologically engage them in the virtual&#x20;task.</p>
</sec>
</sec>
<sec id="s3">
<title>3 HRI Scenario Design</title>
<sec id="s3-1">
<title>3.1 An Immersive Extension of the Investment Game</title>
<p>We base our study design around a variant of the investment game, in which two robotic counsellors compete for investments from the human participant. However, in contrast to previous competitive variants (<xref ref-type="bibr" rid="B30">Hale et&#x20;al., 2018</xref>), our design allows the human participant to allocate their investment proportionally between the two robots and themselves.</p>
<p>Motivated by the goal to avoid prior experience in the game as an influence for player investments, we deliberately exaggerate the design of our game scenario: in our space mission, the participants impersonate the commander of a spaceship with the task to deliver critical cargo to a distant planet. For this mission, they are accompanied by two robotic officers. Throughout their journey through outer space, the crew encounters challenges such as asteroid fields and ship malfunctions that require immediate intervention and collaborative solutions. The robotic officers counsel the participant by individually proposing solutions, and the participant proportionally decides on their preferred action by moving energy cubes into respective compartments. However, the two robots&#x2019; advice is designed to be incomprehensible technical jargon, leaving the participant with no other choice than to base their decision on the officer&#x2019;s persona&#x2019;s subjective impression.</p>
<p>By allocating energy resources, we hypothesize that the participant effectively invests in the robotic officer&#x2019;s trustworthiness. This scenario setup entails two important requirements: i) making the participant reliant on the robots&#x2019; expertize to foster cooperation, and ii) ensuring that the invested currency and investment outcome have an inherent value to both the participant and the robots. We achieve the former by designing a challenging scenario setting of a space journey: all participants will have negligible expertize regarding space travel. Thus, the robotic officers that are introduced as specifically designed to advise in interstellar travel will be perceived as more knowledgeable in the subject matter. In combination, this should prevent participants from making decisions based on their previous experiences, leaving the participant primarily reliant on the robots&#x2019; advice.</p>
<p>To achieve the second requirement, we employ a currency that is considered valuable for both the human trustor and the robotic trustee, to create intrinsic motivation to distribute the currency. As we anticipate that participants do not perceive money as a valuable currency for robotic agents, we adopt a fictional currency of <italic>energy cells</italic>, represented by cubes. From the perspective of game design, the value of items is often determined by their aesthetics and functionality (<xref ref-type="bibr" rid="B34">Ho, 2014</xref>), i.e. their usefulness to progress within the game. Therefore, we use cubes that visually fit into the given science fiction setting and tie their value to the ability to invest in the robots&#x2019; choices. Consequently, these energy cells have a value to the player as they function as a resource that can provide the ship&#x2019;s engine with the extra power to reach the destination planet faster. At the same time, the robotic officers require such energy to execute their solutions to ensure safety during the journey. To ascertain that players feel the impact of their choices and investments, the ship AI gives feedback at the end of each round, explaining the consequences of the taken actions for the&#x20;crew.</p>
<p>A comparison between the original and our immersive extension in terms of defining features can be seen in <xref ref-type="table" rid="T1">Table&#x20;1</xref>. In contrast, the original investment game uses the same monetary currency for both the investment and the return, which forms the basis for an exchange of benefits and characterizes the reciprocity of the game&#x2019;s interaction (<xref ref-type="bibr" rid="B71">Sandoval et&#x20;al., 2016</xref>). In our case, rather than a return of the invested currency, we provide a different benefit that is tied to the game progression: a reduction of the mission time, which brings participants closer to their goal. A successful distribution causes the presented emergency to be resolved by the robot that received most of the currency. As such, a participant&#x2019;s distribution of the energy cells is followed by feedback from the ship AI with regards to whether the robots invested in were successful or not in executing their proposed strategies. This builds the basis for reward within our scenario as the return of investment is countered by the robots to execute their problem-solving strategies. We aim to resolve the challenges that i) the participant could perceive a real-world currency as less &#x201c;useful&#x201d; for the robots than for themselves, and ii) the energy cubes may be perceived as not valuable enough for the participant to make a meaningful investment choice. Therefore, we add the reward to the game progression caused by the robots.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Comparison of features from the original investment game and our immersive version.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Feature</th>
<th align="center">Original investment game</th>
<th align="center">Immersive investment game</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Setting</td>
<td align="left">&#x201c;Plain&#x201d; experiment room</td>
<td align="left">Sci-Fi game</td>
</tr>
<tr>
<td align="left">Parties involved</td>
<td align="left">2 participants</td>
<td align="left">Participant and 2 robots</td>
</tr>
<tr>
<td align="left">Interaction between parties</td>
<td align="left">No</td>
<td align="left">Yes</td>
</tr>
<tr>
<td align="left">Currency invested</td>
<td align="left">Monetary</td>
<td align="left">Energy cells</td>
</tr>
<tr>
<td align="left">Goal of participant</td>
<td align="left">Maximize monetary profit</td>
<td align="left">Optimize cargo delivery time</td>
</tr>
<tr>
<td align="left">Currency returned</td>
<td align="left">Monetary</td>
<td align="left">Reduced mission time</td>
</tr>
<tr>
<td align="left">Motivation for participant</td>
<td align="left">Monetary incentive</td>
<td align="left">Positive reinforcement</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Each resolved emergency reduces the delivery time of the cargo, progresses the game and rewards the player. An unsuccessful distribution of the energy cells indicates the loss of the invested currency, comparable to the original investment game. The loss of the currency increases the delivery time since the energy not invested in either robot&#x2019;s solution speeds up the ship. By giving a functional value to the energy cells for all, the participant and the robots, and providing a return for the investment, we create a currency that is perceived as valuable to both trustor and trustee.</p>
<p>Lastly, participants can proportionally choose how much they invest, i.e.,&#x20;they can freely distribute their energy cells between both robots and themselves. However, as 7 cells are provided in total, they are unable to distribute all energy cells evenly among the 3 options (officer A, officer B, ship engine), effectively forcing them to voice a preference.</p>
<p>These three aspects - i) the inability of the participants to make choices based on prior existing domain knowledge, ii) a shared currency between human trustor and robot trustee, and iii) the weighted choice between two agents - allow us to go beyond an anonymous exchange of money while maintaining the structure of the investment game, and meet the requirements for a suitable human-robot interaction scenario.</p>
</sec>
<sec id="s3-2">
<title>3.2 Experimental Setup</title>
<p>One of the main goals of our scenario design is to achieve an immersive and enjoyable experience for the participants. Besides concealing our research question, our scenario needs to establish enough involvement to allow trust-building toward the robots. For this purpose, we developed a fully autonomous system that only requires intervention in case of larger technical failures or misunderstandings, which most likely would then result in a cancellation of the experiment run. A schematic of our experimental setup can be seen in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>. The participant (P) is seated in the cockpit of the ship (depicted by the interactive video feed screen [S]), containing the two robots (R1 and R2) and a table with three compartments containing the total of seven energy cubes (E). Separated by a curtain, the experimenter (X) and operator (O) monitor the experiment, to intervene only in case of technical difficulties. Otherwise, the system acts through a state machine, implemented in Python using the SMACH<xref ref-type="fn" rid="fn2">
<sup>1</sup>
</xref> state management library. The state machine orchestrates and synchronizes several ROS (<xref ref-type="bibr" rid="B63">Quigley et&#x20;al., 2009</xref>) services built on top of the following components:</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Schematic of the experiment setup from above: The participant (P) sits at the cockpit table, with the two robots (R1, R2) opposite on each side. Behind the robots, the curved screen (S) displays the virtual interior. In the middle of the table, three heptagonal compartments depict where energy cubes (E) can be placed. The top view camera (C1) tracks the energy cube allocation, while two additional cameras (C2) allow to monitor the participant during the experiment. A microphone (M) and loudspeakers (L) allow for voice interactivity and auditory immersion. Behind a privacy curtain, the experimenter (X) keeps additional notes, while an operator (O) monitors the experiment to intervene in case of technical difficulties.</p>
</caption>
<graphic xlink:href="frobt-08-644529-g002.tif"/>
</fig>
<sec id="s3-2-1">
<title>3.2.1 The Environment</title>
<p>For our environment setup we utilized the multi-sensory Virtual Reality lab of the <italic>Knowledge Technology</italic> group at the <italic>Universit&#xe4;t Hamburg</italic> (<xref ref-type="bibr" rid="B4">Bauer et&#x20;al., 2012</xref>). The participant is seated at a small table in the center of a half-spherical screen canvas with a diameter of 2.6&#xa0;m and a height of 2.2&#xa0;m. On the table, in front of the player, there are three heptagonal-shaped compartment areas containing in total seven plastic cubes, as can be seen in <xref ref-type="fig" rid="F1">Figures 1</xref>, <xref ref-type="fig" rid="F3">3</xref>. A condensator microphone is located in the middle of the table for speech recognition. Next to the microphone lies a laminated sheet with possible questions that can be asked to the robots during the game. Four Optoma GT 750&#x20;4k projectors aimed at the canvas in front of the participant display still images as well as video feeds, simulating the inside view of a spaceship cockpit.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Top view of the commanding table. One energy cell is assigned to each robot and the participant kept&#x20;five.</p>
</caption>
<graphic xlink:href="frobt-08-644529-g003.tif"/>
</fig>
<p>The canvas shows the journey through the galaxy by displaying transition videos between scenes and provides visual feedback such as warnings in case of emergency situations. We use multiple surround loudspeakers installed behind the canvas for the ship AI&#x2019;s voice and special sound effects such as ambient music, engine noise and alarm sounds. Turquoize ambient lighting and dry ice fog create an atmospheric environment throughout the game, while red lights are used occasionally to indicate the emergency encounters.</p>
</sec>
<sec id="s3-2-2">
<title>3.2.2 The Robots</title>
<p>The two robot officers, non-descriptively named <italic>732-A</italic> and <italic>732-B</italic>, are located at <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mn>45</mml:mn>
</mml:mrow>
<mml:mo>&#x2218;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mn>135</mml:mn>
</mml:mrow>
<mml:mo>&#x2218;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> respectively from the circle origin, at a maximum angular distance to each other and the participant. We chose their names to be as neutral and unrelated to any prior experience of participants as possible.</p>
<p>We utilize NICO (Neuro-Inspired COmpanion) (<xref ref-type="bibr" rid="B40">Kerzel et&#x20;al., 2020</xref>, <xref ref-type="bibr" rid="B41">2017</xref>), an open-source social robotics platform for humanoid robots (see <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>) designed by the <italic>Knowledge Technology</italic> group at the <italic>Universit&#xe4;t Hamburg</italic>. NICO is a child-sized humanoid robot that has a range of programmable human-like sensory and motor capabilities, accessible and customisable through the Robot Operating System (ROS) (<xref ref-type="bibr" rid="B63">Quigley et&#x20;al., 2009</xref>), characterized in particular by combining social interaction capabilities. It has 10 degrees-of-freedom in the torso (head and arms) and 22 degrees-of-freedom in the hands (under-actuated, 8 motors) with additional joints for fingers, which allows for fine-grained gestures and body language.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>The neuro-inspired COmpanion (NICO).</p>
</caption>
<graphic xlink:href="frobt-08-644529-g004.tif"/>
</fig>
<p>NICO is also capable of displaying a range of programmable facial expressions through LED matrices in its eyebrows and mouth. The utterance of spoken messages is enabled <italic>via</italic> an Embodied Dialogue System, integrated with loudspeakers in the robotic torsos to produce enhanced speech.</p>
</sec>
<sec id="s3-2-3">
<title>3.2.3&#x20;Non-Verbal Communication</title>
<p>As elaborated in <xref ref-type="sec" rid="s2">Section 2</xref>, non-verbal communication (NVC) plays a key role in human-human trust (<xref ref-type="bibr" rid="B19">DeSteno et&#x20;al., 2012</xref>). For our investigation of the effect of non-verbal communication on human-robot trust we equip both robotic officers with sets of non-verbal cues, one set more elaborate than the&#x20;other.</p>
<p>These more elaborate cues include: adapting the gaze direction <italic>via</italic> head movements toward the participant and the other robot, four different facial expressions (happiness, sadness, surprise, anger), as well as gestures toward the participant such as pointing, saluting or beat gestures. These facial expressions and body movements show evidence to improve the transparency of the interaction and reinforce the spoken word (<xref ref-type="bibr" rid="B10">Breazeal et&#x20;al., 2008</xref>).</p>
<p>The other robot adheres to a <italic>minimal</italic> set of neutral movements to keep the illusion of life (<xref ref-type="bibr" rid="B55">Mota et&#x20;al., 2016</xref>), such as looking down at the allocated energy cells and turning their head toward the speaker. We alternate the condition between participants in order to control for potential biases.</p>
</sec>
<sec id="s3-2-4">
<title>3.2.4 The Vision System</title>
<p>To support the full autonomy of the system, we developed an automatic object detection system. It handles the energy cell counting during allocation as well confirms that the robot&#x2019;s compartments are empty before proceeding to the next&#x20;scene.</p>
<p>On the table, in front of the participant are three heptagonal-shaped compartments holding the energy cells. All compartments have seven quadratic markers on which the energy cells must be placed for successful allocation. Am RGB-camera is mounted on top of the commanding table near the ceiling to count and track energy cubes allocation and de-allocation from the robot compartments. A picture of the commanding table taken by this camera can be seen in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>.</p>
<p>After a request from the dialogue manager state machine, the object detection algorithm processes an image taken from the RGB-camera mounted on top of the commanding table using the <italic>OpenCV</italic> library (<xref ref-type="bibr" rid="B8">Bradski, 2000</xref>), to detect the number of energy cells allocated to each heptagon-shaped compartment. The allocation distribution is sent back to the dialogue manager <italic>via</italic> ROS service response. Two additional cameras are used by the experimenter and operator to observe the participant and monitor the experiment flow. Using a camera mounted behind the participant, the operator verifies the movements of the robots for technical faults, with the other placed on top of the canvas the experimenter examines the participants&#x2019; expressions and movements for possible difficulties.</p>
</sec>
<sec id="s3-2-5">
<title>3.2.5 The Speech Systems</title>
<p>Interactive dialogue <italic>via</italic> spoken words is a cornerstone to enable natural human-like human-robot interaction (<xref ref-type="bibr" rid="B78">Spiliotopoulos et&#x20;al., 2001</xref>; <xref ref-type="bibr" rid="B43">Kulyukin, 2006</xref>). We, therefore, built the spaceship AI named <italic>Wendigo</italic> as a closed dialogue manager utilizing the SMACH state management library, the Automatic Speech Recognition system DOCKS2 developed by <xref ref-type="bibr" rid="B80">Twiefel et&#x20;al. (2014)</xref>, and the Amazon Polly<xref ref-type="fn" rid="fn3">
<sup>2</sup>
</xref> Speech Synthesis service.</p>
<p>The participants can directly interact with <italic>Wendigo</italic> and the robotic officers <italic>via</italic> a microphone located in the middle of the commanding table. The dialogue is restricted in allowing the participants to only pick questions from a predefined list and confirming that they are ready to go on with the experiment. Both NICO robot officers exhibit the same voice persona represented by loudspeakers embodied in their torso, allowing for a natural sound-source localisation.</p>
</sec>
</sec>
<sec id="s3-3">
<title>3.3 Protocol and Game Scenes</title>
<p>As formulated in <xref ref-type="sec" rid="s3-2">Section 3.2</xref>, we strive to automate the experiment procedure as much as possible to limit variability and experimenter bias. In the remaining human interventions, the experimenter, therefore, follows a scripted protocol (all detailed lines can be inspected in the full experiment protocol publicly available at<xref ref-type="fn" rid="fn4">
<sup>3</sup>
</xref>): The participants are welcomed and brought to the anteroom, where they are asked to fill out the consent and data privacy forms as well as a pre-experiment questionnaire.</p>
<p>This questionnaire asks for standard demographic questions such as age, sex, former experience with robots and computers, and general attitude toward robots. We include the 30-item Big Five Inventory-2 Short Form questionnaire (<xref ref-type="bibr" rid="B77">Soto and John, 2017</xref>) to assess the Big Five personality domains, which measure individual differences in people&#x2019;s characteristic patterns of thinking, feeling, and behaving (<xref ref-type="bibr" rid="B29">Goldberg and Kilkowski, 1985</xref>). Participants rate each item statement using a 5-point Likert scale ranging from &#x201c;disagree strongly&#x201d; to &#x201c;agree strongly&#x201d;. We choose the shortened forms to minimize assessment time and respondent fatigue while retaining much of the full Big Five measure&#x2019;s reliability and validity. Moreover, we measure the general risk-taking tendencies <italic>via</italic> the Risk Propensity Scale (RPS) by <xref ref-type="bibr" rid="B54">Meertens and Lion (2008)</xref>, as well as the self-reported trust propensity using the 4-item form by <xref ref-type="bibr" rid="B74">Schoorman et&#x20;al. (1996)</xref>. The scales use 5-point Likert-type items with anchors of agree and disagree for each scale&#x20;point.</p>
<p>After completing the pre-experiment questionnaire, the experimenter then guides the participant toward the experiment room with the half-spherical canvas, depicted as the spaceship cockpit. By entering the cockpit, the experiment context is set and immersion is fostered by the screen depicting the outside view of a space cargo hangar, dimmed lights, dry ice, as well as the experimenter from now on addressing the participants as &#x201c;commander&#x201d;. Following the scripted introductory narrative, the experimenter instructs the participants to the space mission task, their goal as the commander to deliver important cargo safe and fast, and makes them aware of the two robotic officers who accompany them on their journey. The participants are encouraged to familiarize themselves with the cockpit environment, the energy cells, the allocation compartments, and the list of possible questions that can be asked to the robots during the game. The experimenter also elaborates on the meaning and impact of the energy cubes, and demonstrates how they can be distributed by way of example. The experimenter asks for any remaining questions, then steps back out of the experiment room behind a curtain before the trial scene 0 begins.</p>
<p>In this scene, the voice-controlled artificial intelligence system <italic>Wendigo</italic> and the robotic officers introduce themselves, then conduct an introductory round of the cube allocation, which is concealed as a system check. This trial round serves to acquaint the participants with the experiment procedure and reveal possible misunderstandings. It familiarizes them with the ship&#x2019;s visual and auditory feedback mechanics and accustoms them to the delay between voice input and feedback response. The trial round furthermore allows the operator behind the curtain to possibly re-calibrate the microphone sensitivity without breaking immersion.</p>
<p>After the trial scene 0, the experimenter briefly enters the cockpit again to answer any remaining questions before the start of the actual experiment. At this point, we consider participants to be informed about the game mechanics, prepared for the upcoming tasks, and motivated to achieve the game&#x2019;s objective, following their mental model they have formed about the&#x20;game.</p>
<p>
<xref ref-type="fig" rid="F5">Figure&#x20;5</xref> depicts the overall course of the experiment narrative: every participant passes through the same scripted events, followed by the same type of feedback (neutral, negative, or positive). While the specific feedback lines are adjusted to the individual allocation choices, the resulting feedback characteristic is always predetermined for each round to ensure comparability between different participants&#x2019; interactions. In each scene, the participant goes through the following steps (as visualized in <xref ref-type="fig" rid="F6">Figure&#x20;6</xref>):<list list-type="simple">
<list-item>
<p>1. <italic>Wendigo</italic> draws attention to the challenge at hand (Scene 1: malfunctioning navigation system, Scene 2: interfering asteroids, Scene 3: entering the atmosphere, Scene 4: leaking cooling system).</p>
</list-item>
<list-item>
<p>2. Both robotic officers advertise their solution for which they require energy&#x20;cells.</p>
</list-item>
<list-item>
<p>3. The participant can ask a question from the list of predefined options, to which the robotic officers reply one after another and in a randomized&#x20;order.</p>
</list-item>
<list-item>
<p>4. The participant is asked to distribute the energy cells as they see fit, and say &#x2018;<italic>Wendigo</italic>, I am done! &#x2018;when they are&#x20;done.</p>
</list-item>
<list-item>
<p>5. <italic>Wendigo</italic> provides feedback on the decision outcome (Scene 1: neutral, Scene 2: negative, Scene 3 and 4: positive).</p>
</list-item>
<list-item>
<p>6. After the participant places all energy cells back into their own compartment, the state machine autonomously transitions to the next&#x20;scene.</p>
</list-item>
</list>
</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>General course of the experiment. Each scene is followed by a feedback statement with predetermined characteristic (neutral, negative or positive).</p>
</caption>
<graphic xlink:href="frobt-08-644529-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Each of the four scenes follows the same structure: the participant is presented with an emergency for which the robots suggest different solutions. The player can engage in a conversation with both robots to determine their investment. Based on which round is played, the player&#x2019;s investments have lead to robot actions with either positive or negative consequences, resolving the emergency and transitioning to the next&#x20;scene.</p>
</caption>
<graphic xlink:href="frobt-08-644529-g006.tif"/>
</fig>
<p>Note that after each cube allocation, we employ rich visual and auditory feedback (see step 5) in terms of ambient light and spoken response lines disguised as status reports, such as &#x201c;Unsuccessful. Ship damaged. The breach has been closed but the life support system is damaged.&#x201d; as an example for negative feedback. By design, in the second scene, the feedback for the investment decision (regardless of how the energy cubes were distributed) will be portrayed as unsuccessful, while on each of the other investments the participant receives positive feedback instead. This control of the narrative, regardless of the participant&#x2019;s concrete decision, enables us to reproducibly observe the effects of building and destroying&#x20;trust.</p>
<p>During the experiment, the experimenter behind the curtain, observing on the extra camera view (provided by the cameras indicated as C2 in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>), takes free-form observation notes about the progression of the experiment, as well as any noteworthy occurrence that could invalidate the participant&#x2019;s data. After the final scene, the experimenter steps back in, congratulates the participant on a successful mission, and escorts them back into the anteroom. The participant is provided with the post-study questionnaire that asks to evaluate their perception of the experiment and their impression of each&#x20;robot.</p>
<p>For the purpose of rating the robot&#x2019;s impression, we employ the Godspeed questionnaire (<xref ref-type="bibr" rid="B3">Bartneck et&#x20;al., 2008</xref>), a standardized measurement tool for human-robot interaction using semantic differential scales on five key concepts in human-robot interaction: anthropomorphism, animacy, likeability, perceived intelligence, and perceived safety. We omit questions related to <italic>Perceived Safety</italic>, since there is no physical interaction between the participants and the robots and distance is kept throughout the experiment. The post-study questionnaire furthermore asks the participant to rate the trustworthiness (<xref ref-type="bibr" rid="B7">Bernotat et&#x20;al., 2019</xref>) and performance of each robot. Inspired by <xref ref-type="bibr" rid="B6">Bernotat et&#x20;al. (2017)</xref>, we adapt seven items on the measurement of cognitive trust (grouped into &#x201c;content&#x201d; and &#x201c;speech&#x201d; clusters), and six items on affective trust (grouped into &#x201c;cooperation&#x201d; and &#x201c;sociability&#x201d; clusters) (<xref ref-type="bibr" rid="B38">Johnson and Grayson, 2005</xref>). Lastly, the participants are asked to choose which robot they preferred as an assistant, and to provide additional feedback about shortcomings, immersion, and their overall experience during the experiment.</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<title>4 Results</title>
<p>The study was conducted over two consecutive weeks at the end of February 2020 on the campus of the computer science department of Universit&#xe4;t Hamburg. It was advertised <italic>via</italic> flyers and word of mouth to people with at least some experience and familiarity with computers and robots, who are comfortable with participating in a science fiction game and could understand and speak English fairly well. In the following sections, we start by discussing general population statistics and overall perception of the robots. We then proceed to evaluate whether our scenario is a valid augmentation of the Investment Game. For this, we introduce two derived metrics from the energy cube allocation to compare trust measurements among two conditions. Lastly, we report on the results of the trust measurements and the effect of non-verbal communication (NVC) on&#x20;trust.</p>
<sec id="s4-1">
<title>4.1 Population Statistics</title>
<p>Our study was conducted with 53 participants, of whom 45 finished the experiment successfully. For 8 participants the experiment was started but had to be aborted because of technical issues such as robot actuator overloading, language barriers or a misunderstanding of the game rules. All following statistics, therefore, apply to the 45 participants who completed the experiment without complications. Our participants&#x2019; mean age (<inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>26.8</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>7.0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) lies in the range of young adults, with <inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:mn>95</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> between ages 19 and 34.60% of them identified as male, <inline-formula id="inf13">
<mml:math id="m13">
<mml:mrow>
<mml:mn>38</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> as female, <inline-formula id="inf14">
<mml:math id="m14">
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> made no statement. All of the participants were familiar with computers and <inline-formula id="inf15">
<mml:math id="m15">
<mml:mrow>
<mml:mn>51</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> of them have programming experience. While <inline-formula id="inf16">
<mml:math id="m16">
<mml:mrow>
<mml:mn>29</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> of the participants had worked with robots previously as a developer, <inline-formula id="inf17">
<mml:math id="m17">
<mml:mrow>
<mml:mn>42</mml:mn>
<mml:mtext>%</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> had never interacted with a robot prior to the experiment.</p>
<p>We compared our participants to the general German population of a similar age group with results obtained from other studies (<xref ref-type="bibr" rid="B44">Lang et&#x20;al., 2011</xref>). The comparison was conducted with a Welch&#x2019;s t-test for independent samples on descriptive statistics with significance level 0.01. Based on the personality questionnaire (<xref ref-type="sec" rid="s3">Section 3</xref>) results, the participants had average scores for extroversion (<inline-formula id="inf18">
<mml:math id="m18">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>4.68</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf19">
<mml:math id="m19">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.30</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), agreeableness (<inline-formula id="inf20">
<mml:math id="m20">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>5.22</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf21">
<mml:math id="m21">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.03</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and neuroticism (<inline-formula id="inf22">
<mml:math id="m22">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>3.62</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf23">
<mml:math id="m23">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.78</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). However they scored below-average in conscientiousness (<inline-formula id="inf24">
<mml:math id="m24">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>4.79</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf25">
<mml:math id="m25">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.99</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and above-average in openness (<inline-formula id="inf26">
<mml:math id="m26">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>5.50</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf27">
<mml:math id="m27">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.04</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) compared to the general German population of a similar age group (<xref ref-type="bibr" rid="B44">Lang et&#x20;al., 2011</xref>). We refrained from assessing the detailed facet-level trait properties of the Big Five domains, as this is recommended by the authors for a sample size below 400 (<xref ref-type="bibr" rid="B77">Soto and John, 2017</xref>).</p>
<p>The trust and risk propensity questionnaires showed that our participants were less prone to take risks (<inline-formula id="inf28">
<mml:math id="m28">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>4.05</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf29">
<mml:math id="m29">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.32</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) than the general population (<xref ref-type="bibr" rid="B54">Meertens and Lion, 2008</xref>) yet more prone to trust (<xref ref-type="bibr" rid="B53">Mayer and Davis, 1999</xref>) (<inline-formula id="inf30">
<mml:math id="m30">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2.93</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf31">
<mml:math id="m31">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.61</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). We used the cognitive trust items described in <xref ref-type="sec" rid="s3-3">Section 3.3</xref> as a rating of the robot&#x2019;s performance to compare our population to other findings compiled by <xref ref-type="bibr" rid="B22">Esterwood and Robert (2020)</xref>: we found a very strong correlation between the cognitive and affective trust items (<inline-formula id="inf32">
<mml:math id="m32">
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.82</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf33">
<mml:math id="m33">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>7.2</mml:mn>
<mml:mi>e</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>23</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), confirming that cognitive and affective trust go hand in&#x20;hand.</p>
<p>We can confirm the finding by <xref ref-type="bibr" rid="B75">Sehili et&#x20;al. (2014)</xref> for a positive relationship between neuroticism and an anthropomorphic perception of the robot (<inline-formula id="inf34">
<mml:math id="m34">
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.22</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf35">
<mml:math id="m35">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.035</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). In contrast to <xref ref-type="bibr" rid="B51">Looije et&#x20;al. (2010)</xref>, we cannot confirm any relationship between self-reported conscientiousness of a participant and the perceived sociability of a robot. We moreover cannot confirm a significant relationship between perceived anthropomorphism and robotic performance like <xref ref-type="bibr" rid="B62">Powers and Kiesler (2006)</xref> did, however similar to <xref ref-type="bibr" rid="B11">Broadbent et&#x20;al. (2013)</xref> we find a moderate relationship between perceived anthropomorphism and affective trust (<inline-formula id="inf36">
<mml:math id="m36">
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf37">
<mml:math id="m37">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.057</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>).</p>
</sec>
<sec id="s4-2">
<title>4.2 Metrics and Grouping Criteria</title>
<p>We now introduce two metrics specific to our scenario that allow us to quantify the differences in the trust placed between the robots.</p>
<sec id="s4-2-1">
<title>4.2.1 Allocation Metric</title>
<p>Measures the investment displayed <italic>via</italic> energy cells allocated to each single robot. The allocation metric is calculated as<disp-formula id="e1">
<mml:math id="m38">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf38">
<mml:math id="m39">
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> stands for the energy cells allocated to one of the robots <inline-formula id="inf39">
<mml:math id="m40">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. <inline-formula id="inf40">
<mml:math id="m41">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> indicates a preference for <inline-formula id="inf41">
<mml:math id="m42">
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf42">
<mml:math id="m43">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> a preference for <inline-formula id="inf43">
<mml:math id="m44">
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, while the magnitude in the differences is indicated by <inline-formula id="inf44">
<mml:math id="m45">
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
</sec>
<sec id="s4-2-2">
<title>4.2.2 Relative Trust Metric</title>
<p>Measures the trust expressed in each robot according to the post-experiment questionnaire. Relative trust is calculated as<disp-formula id="e2">
<mml:math id="m46">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf45">
<mml:math id="m47">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the value obtained from the different trustworthiness Likert items in the post-interaction questionnaire, normalized to lie within <inline-formula id="inf46">
<mml:math id="m48">
<mml:mrow>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mn>0,1</mml:mn>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. As before, <inline-formula id="inf47">
<mml:math id="m49">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> indicates a preference for <inline-formula id="inf48">
<mml:math id="m50">
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> or a preference for <inline-formula id="inf49">
<mml:math id="m51">
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> otherwise, and the magnitude in the differences is indicated by <inline-formula id="inf50">
<mml:math id="m52">
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>R</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>Inspecting both the Allocation Metric and the Relative Trust metric over consecutive scenes, we now segment the participants into two groups:</p>
</sec>
<sec id="s4-2-3">
<title>4.2.3 The Alternating-Minimum Investment Group (<italic>N</italic>&#x20;&#x3d; 16)</title>
<p>During the exploratory data analysis, two outstanding gameplay patterns were observed. These two patterns are defined by specific behavior throughout the game, participants that showed either one or both of these behaviors were grouped together:<list list-type="simple">
<list-item>
<p>&#x2022; <italic>Minimum Investment Behavior:</italic> This behavior resembles a lack of engagement in the game. Three of the participants investing less than one-third of the available cubes were considered disengaged. A threshold of fewer than 10 energy cells allocated in total throughout the four scenes was considered as a criterion for this&#x20;group.</p>
</list-item>
<list-item>
<p>&#x2022; <italic>Alternating Investment Behavior:</italic> The energy cell allocation results indicated that some participants changed their minds about the robot they trusted more throughout the game. A group of 14 participants changed their mind at every scene as they would alternate between either allocating more energy cells to one robot or the other, or allocating an equal amount to both robots. These alternating participants did not particularly trust or prefer one robot over another to invest in throughout the&#x20;game.</p>
</list-item>
</list>
</p>
<p>
<xref ref-type="fig" rid="F7">Figure&#x20;7</xref> highlights these two behaviors in the context of the number of preference changes and amount of cubes invested throughout the game. The group of participants showing either of those behaviors is further referred to as the alternating-minimum investment group and consists of 16 participants. Further analysis of the alternating-minimum investment group showed that there is no link between these patterns and one specific robot, nor the NVC variable. As such, this behavior did not depend on the content of speech or appearance of either of the robots.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Occurrence of alternating behavior <bold>(A)</bold> and minimum investment behavior <bold>(B)</bold>, highlighted in red in the distribution of relevant gameplay metrics (amount of preference changes and total cubes allocated, shown in steps of 4 cubes).</p>
</caption>
<graphic xlink:href="frobt-08-644529-g007.tif"/>
</fig>
</sec>
<sec id="s4-2-4">
<title>4.2.4 The Main Group (<italic>N</italic>&#x20;&#x3d; 29)</title>
<p>This is the group of participants that did not show either of the two aforementioned behaviors: the majority of the participants. With a Mann-Whitney U test for independent samples, we found that these participants had no notable differences to the alternating-minimum investment group with regards to risk and trust propensity. They, however, obtained a lower score in Neuroticism (<inline-formula id="inf51">
<mml:math id="m53">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.024</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) in the personality questionnaire than the alternating-minimum investment&#x20;group.</p>
</sec>
</sec>
<sec id="s4-3">
<title>4.3 Transferability of the Investment Game</title>
<p>The aim of our study is to verify that our scaled-up version of the investment game can be used to measure trust in HRI. The results were evaluated separately on the main group (<inline-formula id="inf52">
<mml:math id="m54">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>29</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and the alternating-minimum investment group (<inline-formula id="inf53">
<mml:math id="m55">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>16</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). For this the coherence between measured trust and self-assessed trust was evaluated by means of the Spearman test for correlation on the previously introduced metrics: the allocation metric represents the measured trust and the relative trust metric represents the self-assessed&#x20;trust.</p>
<p>A statistically significant correlation can be observed for the main group (<inline-formula id="inf54">
<mml:math id="m56">
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.43</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf55">
<mml:math id="m57">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.02</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), however not for the alternating-minimum investment group (<inline-formula id="inf56">
<mml:math id="m58">
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.24</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf57">
<mml:math id="m59">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.37</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). A comparison between both groups can be seen in <xref ref-type="fig" rid="F8">Figure&#x20;8</xref>. In the standard human-human investment game, the amount of money invested by the trustor represents the trust in the trustee. As such, the observed correlation supports the hypothesis that our variation of the investment game between human and robot works much like the investment game between two humans.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Correlation of relative trust and allocation metric for the two participant groups: Main group (A) and Alternating-Minimum Investment group (B).</p>
</caption>
<graphic xlink:href="frobt-08-644529-g008.tif"/>
</fig>
<p>The fact that alternating-minimum investment behavior was found also in a simple setting (<xref ref-type="bibr" rid="B55">Mota et&#x20;al., 2016</xref>) and that there was no relationship between the alternating behavior of the participants and the robot characteristics show that the setting had no impact on the effectiveness of the trust game. This supports our hypothesis, that our scaled-up version of the investment game can indeed be used as a measure of&#x20;trust.</p>
</sec>
<sec id="s4-4">
<title>4.4 Impact of Non-Verbal Communication on the Perception of the Robot</title>
<p>After ensuring that it is indeed possible to measure trust in human-robot interaction with our scaled-up version of the investment game, we further look into the impact of NVC on trust in the robot but also at other characteristics of the robot. As has been mentioned previously, NVC plays a significant role in human interaction but also in the efficiency and transparency of the interaction between humans and robots (<xref ref-type="bibr" rid="B14">Casper and Murphy, 2003</xref>). In our case, we find that these non-verbal cues have indeed made an impact on the trust in the robot as well as on its perceived anthropomorphism and animacy.</p>
<p>We analyze the main group which didn&#x2019;t show alternating-minimum investment behavior (<inline-formula id="inf58">
<mml:math id="m60">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>29</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) where it has been established that the game does measure trust. For this main group, the non-verbal communication of the robot had an impact on the number of energy cells received. This impact was observed in the first scene, the only scene where the participant had no previous disappointment related to any of the robots, but had already gotten to know the robot. In this scene, the robot that showed non-verbal communication obtained a significantly higher amount of energy cells compared to the other. The one-sided Wilcoxon test for independent samples between the distribution of the energy cells for the robot with NVCs and the robot with minimal NVC (MNVC) confirmed this (<inline-formula id="inf59">
<mml:math id="m61">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.0008</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>).</p>
<p>Independent of the gameplay choices, for all participants (<inline-formula id="inf60">
<mml:math id="m62">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>45</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) the robot showing NVC seemed more human-like and animated. As can be seen in <xref ref-type="fig" rid="F9">Figure&#x20;9</xref>, the Godspeed values for anthropomorphism (<inline-formula id="inf61">
<mml:math id="m63">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.008</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and animacy (<inline-formula id="inf62">
<mml:math id="m64">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.00001</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) are significantly distinct when comparing the NVC/MNVC conditions with a Mann-Whitney U test, whereas this is not the case for likeability (<inline-formula id="inf63">
<mml:math id="m65">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.23</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and intelligence (<inline-formula id="inf64">
<mml:math id="m66">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.24</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). The observed values for anthropomorphism support our hypothesis that the NVC robot invokes more trust, which is consistent with findings of similar studies. <xref ref-type="bibr" rid="B82">Wortham and Theodorou (2017)</xref> state that the perceived anthropomorphism of the robot increases the trust in the robot, especially for non-specialist humans, as the human needs to create a mental model for the robot to trust it. Furthermore, an increase in NVC leads to an increase in motion which subsequently leads to more perceived animacy (<xref ref-type="bibr" rid="B60">Parisi and Schlesinger, 2002</xref>).</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Effect of non-verbal communication (NVC) and minimal non-verbal communication (MNVC) on Godspeed&#x20;items.</p>
</caption>
<graphic xlink:href="frobt-08-644529-g009.tif"/>
</fig>
<p>However, likeability does not seem to be affected by the use of NVC, potentially because the quantity and type of gestures used for non-verbal communication vary with culture (<xref ref-type="bibr" rid="B20">DeVito et&#x20;al., 2000</xref>). Thus the degree to which a robot moves does not necessarily influence the likeability of the robot, as this is a personal preference that can vary across participants. Consistent with previous research (<xref ref-type="bibr" rid="B17">Deshmukh et&#x20;al., 2018</xref>), there are no perceived differences in intelligence either.</p>
<p>Our results show a correlation between trust measured by the investment game and the self-reported trust from the questionnaire. This gives us evidence that the scaled-up investment game can be used as a tool for measuring human-robot trust and therefore it can have practical applications in future experiments to study the impact of different variables (such as NVCs) between robots on how trustworthy the human perceives them. We anticipate that this serves as a positive example of extending socioeconomic experiments to a human-robot social interaction setting.</p>
</sec>
</sec>
<sec id="s5">
<title>5 Discussion and Future Work</title>
<p>Our experiment revolves around three main characteristics: the weighted choice between two agents, the participants&#x2019; inability to make choices based on prior domain knowledge, and the additional incentive for interaction between the trustor and the trustee. Maintaining these characteristics, we believe our game design can be adapted to various situations and environments where trust and NVCs play a role. Such environments comprise, but are not limited to, a work environment or a public service environment.</p>
<p>Overall, our results show that our variant of the investment game provides a reliable measure for human-robot trust and that non-verbal communication positively affects human-robot trust. However, there are some points of discussion which we address further in the following section.</p>
<sec id="s5-1">
<title>5.1 Science Fiction and Immersion</title>
<p>In our study, we chose a futuristic environment since most people know robots from media and science fiction stories (<xref ref-type="bibr" rid="B35">Horstmann and Kr&#xe4;mer, 2019</xref>). While we hypothesize that this is not a limiting factor for our study&#x2019;s replication, this should be subject to further research. It is essential to note that participants likely acted following a mental model, acting as a player in a game based around a fictional narrative (see <xref ref-type="sec" rid="s3-1">Section 3.1</xref> for a summary or<xref ref-type="fn" rid="fn5">
<sup>4</sup>
</xref> for the full narrative). As such, our presented results should be interpreted within this context. For example, as a byproduct of high immersion, we cannot exclude that some participants might have engaged so strongly in role-playing their alter ego so that their observed behavior might have started to differ from their usual self. Consequently, generalisability from contained game studies to real-world settings is an additional open question that is subject to academic debate and research, even in normal trust games with minimal role-play (<xref ref-type="bibr" rid="B46">Levitt and List, 2007</xref>; <xref ref-type="bibr" rid="B39">Johnson and Mislin, 2011</xref>). Furthermore, we argue that our investigated NVCs and trust factors are likely to be experienced on a more intuitive level and therefore difficult to &#x201c;fake&#x201d; when role-playing, given a certain degree of independence from the actual decision-making process in our&#x20;game.</p>
</sec>
<sec id="s5-2">
<title>5.2 Gameplay Behavior</title>
<p>We found two different gameplay behaviors that identify the two groups on which results were compared: the main group (<inline-formula id="inf65">
<mml:math id="m67">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>29</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and the alternating-minimum investment group (<inline-formula id="inf66">
<mml:math id="m68">
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>16</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). The alternating-minimum investment group (described in <xref ref-type="sec" rid="s4-2-3">Section 4.2.3</xref>) either alternated their investment or invested little in the robots, which shows no engagement in the game. There was no significant trust correlation for the alternating-minimum investment group, whereas the main group showed a significant correlation. As 16 participants is quite a high number, we hypothesize that the participants in the alternating-minimum investment group could have been alternating their strategies to infer the experiment research question or to simply test the system similar to as experienced by <xref ref-type="bibr" rid="B55">Mota et&#x20;al. (2016)</xref>, possibly due to the fact of the experiment being advertised in a computer science department. This group also showed higher scores for neuroticism compared to the main group in our personality test. Some participants may have not liked the experimental setup or may have felt not immersed enough to participate. However, the lack of immersion does not reflect the game&#x2019;s general perception, since most participants in the post-interview stated toward the experimenter to have felt immersed and motivated to win the&#x20;game.</p>
<p>
<xref ref-type="bibr" rid="B55">Mota et&#x20;al. (2016)</xref> observed that when a human needs to judge a robot&#x2019;s trustworthiness, they draw on past social experiences with humans or try to build social experience with the robot. Due to insufficient shared social cues between humans and robots, humans are mostly incapable of determining a robot&#x2019;s trustworthiness based on past experiences. The alternating and minimum investment behavior observed could indicate an insufficient social experience, thus preventing the establishment. However, further research is necessary to study the particular motivations.</p>
<p>In our experiment, almost half of all 45 participants had never interacted with a robot previously. We fostered building social experience with the robot by making the participant ask them one question before each of the four rounds of cube allocation. Potentially, participants in the alternating-minimum group may have needed more rounds to build social experience reliably. From this perspective, adding more rounds to the game could potentially lead to the behavior regularizing over time. Future work might want to investigate the optimal number of rounds, thereby balancing the trade-off between the experiment&#x2019;s length and the number of collected data points.</p>
<p>For the small number of three participants who showed non-engaging behavior (see <xref ref-type="sec" rid="s4-2-3">Section 4.2.3</xref>), this could result from misunderstanding the rules of the game, the relative worth of the energy cubes, or a general aversion to decision-making or the presented scenario. The non-engaging behavior may also be an&#x20;attempt to delay decision-making until enough social experience has been built between the participant and the robots.</p>
</sec>
<sec id="s5-3">
<title>5.3 Improvements for Future Studies</title>
<p>While we observed and measured trust through the player&#x2019;s investments, we suggest weighing the following points in future studies. Clearer and more detailed results could likely be obtained with a more prolonged experiment and a bigger participant pool with a revised scenario, mitigating some of this experiment&#x2019;s limitations.</p>
<p>Since our robots functioned fully autonomously, the natural language interface sometimes malfunctioned due to usage or technical errors, potentially prolonging the time until feedback. The participants who had to repeat themselves, some multiple times, may have experienced a break in immersion. Although our post-interviews did not reflect it, we cannot eliminate that some participants may have felt frustrated by a bumpy interaction. Future experiments could investigate the effect of simplified design choices on our measurements, for example, by substituting our autonomous setup with a wizard-of-oz design for timely interaction. The processing time of the many parts of the experimental setup sometimes leads to slight delays between user action and robot reaction, which could have led to a break of the immersion and frustration.</p>
<p>Our study is limited to the NICO robots. We have encountered some technical limitations, including the lack of a more extensive range of different facial expressions and a wider range of human-like movements. Moreover, NICO has a childlike appearance. It is unclear how the perceived robot age can affect human perception of honesty and reliability, even though we introduced the NICOs as specialists in the complex field of space exploration.</p>
<p>It is important to note that we merely compared non-verbal communication (NVC) against minimal non-verbal communication (MNVC). There is currently no widely established baseline or notion of <italic>minimal</italic> NVC, and the impact of our interpretation and subsequent design choices on the participants is an open question. Our study showed that the mere presence of NVC has a positive impact on both the trust in the robot and the perceived characteristics of it. Future studies should investigate where the boundaries of minimal and too much NVC lie. As both robots showed at least a baseline of non-verbal cues, the difference between the two conditions may have been diminished. Future studies may also investigate how different gestures affect trust, as there is no clear consensus that social cues translate to &#x201c;reliable&#x201d; or &#x201c;unreliable&#x201d;, and no obvious way to categorize these&#x20;cues.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s6">
<title>6 Conclusion</title>
<p>We provided an elaborate HRI scenario to model the building of trust more closely to human relationships than in the original investment game. Our experimental setup includes social interaction, non-verbal communication, a shared goal, and intrinsic motivation, thereby allowing participants to collaborate with robots more realistically than in the original investment game, and measuring trust reliably. The environmental variables that our scenario (and its life-like agents) adds to the data are a natural reflection of the many factors, internal and external, that influence human trust and how different levels of trust affect human behavior in different contexts, modeling aspects of human-robot trust that the original trust game does not&#x20;cover.</p>
<p>We found a correlation between the self-assessed trust and the trust measured from the game for the majority of participants (main group). These same participants allocated more energy cells to the robot with non-verbal communication (NVC) in the first scene of the game. We were therefore able to replicate the positive effect of non-verbal communication on trust and robot perception. The Godspeed (<xref ref-type="bibr" rid="B3">Bartneck et&#x20;al., 2008</xref>) values for anthropomorphism and animacy were increased by NVC for all participants.</p>
<p>Future research should comprise an investigation of the gameplay behaviors observed and could explore the effects of the use of different robots in this setup. Moreover, a similar setup can be used in future studies as a platform for studying trust and other potential factors that influence trust, in a real-world scenario and without losing the complex dynamics of building, breaking and maintaining trust given life-like agents and complex real-world situations. We can use it to formulate an in-depth trust analysis without losing the complex dynamics between internal and external factors that influence the human ability to trust others - be they humans or robots.</p>
</sec>
</body>
<back>
<sec id="s7">
<title>Data Availability Statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s8">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by Universit&#xe4;t Hamburg Ethics Committee. The patients/participants provided their written informed consent to participate in this&#x20;study.</p>
</sec>
<sec id="s9">
<title>Author Contributions</title>
<p>SZ and EA have contributed to the conception, writing and organization of the manuscript. BV, AS, FS, GM, and KB contributed to writing the first draft. SZ, EA, BV, AS, FS, GM, and KB have implemented the system, conducted, and evaluated the study. EA and GM performed the statistical analysis, SZ aided in interpreting the results. ES has aided in the technical realization of the presented study. ES, AP, and TA have supervised the project. AP and TA have helped in editing and revising the paper. SW has contributed to manuscript reading and revision and approved the submitted version.</p>
</sec>
<sec id="s10">
<title>Funding</title>
<p>The authors gratefully acknowledge partial support from the German Research Foundation (DFG) under Project CML (TRR-169).</p>
</sec>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<ack>
<p>This article builds upon <xref ref-type="bibr" rid="B2">Arts et&#x20;al. (2020)</xref>, presented at HAI 2020/8th International Conference on Human-Agent Interaction, 10&#x2013;13th November 2020&#x20;&#x2014;&#x20;Sydney, Australia, and was invited by the Frontiers HRI Topic Editors for a 30% extension of the original contribution. The study would not have been possible without Ahmed Abdelghany, Connor G&#xe4;de, Vadym Gryshchuk, Matthew Ng, Shahd Safarani, Nilesh Vijayrania, Christoper Glenn Wulur, and Sophia&#x20;Zell.</p>
</ack>
<sec id="s12">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frobt.2021.644529/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frobt.2021.644529/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.PDF" id="SM1" mimetype="application/PDF" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn2">
<label>1</label>
<p>
<ext-link ext-link-type="uri" xlink:href="%20http://wiki.ros.org/smach">http://wiki.ros.org/smach</ext-link> (accessed 2021-03-10)</p>
</fn>
<fn id="fn3">
<label>2</label>
<p>
<ext-link ext-link-type="uri" xlink:href="%20https://aws.amazon.com/polly/">https://aws.amazon.com/polly/</ext-link> (accessed 2021-03-10)</p>
</fn>
<fn id="fn4">
<label>3</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frobt.2021.644529/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frobt.2021.644529/full#supplementary-material</ext-link>
</p>
</fn>
<fn id="fn5">
<label>4</label>
<p>
<ext-link ext-link-type="uri" xlink:href="%20https://gist.github.com/SZoerner/12cefe9ca612b4ae57385b9ea47bf999">https://gist.github.com/SZoerner/12cefe9ca612b4ae57385b9ea47bf999</ext-link>
</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ambady</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Weisbuch</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Nonverbal Behavior</article-title>. <source>Handbook Soc. Psychol.</source> <volume>1</volume>, <fpage>464</fpage>&#x2013;<lpage>487</lpage>. <pub-id pub-id-type="doi">10.1002/9780470561119.socpsy001013</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arts</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Z&#xf6;rner</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bhatia</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Mir</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Schmalzl</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Srivastava</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Exploring Human-Robot Trust through the Investment Game: An Immersive Space Mission Scenario</article-title>,&#x201d; in <conf-name>Proceedings of the 8th International Conference on Human-Agent Interaction</conf-name>, <fpage>121</fpage>&#x2013;<lpage>130</lpage>. </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bartneck</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kuli&#x107;</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Croft</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zoghbi</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Measurement Instruments for the Anthropomorphism, Animacy, Likeability, Perceived Intelligence, and Perceived Safety of Robots</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>1</volume>, <fpage>71</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-008-0001-3</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bauer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>D&#xe1;vila-Chac&#xf3;n</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Strahl</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wermter</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Smoke and Mirrors &#x2014;&#x20;Virtual Realities for Sensor Fusion Experiments in Biomimetic Robotics</article-title>,&#x201d; in <conf-name>2012 IEEE International Conference on Multisensor Fusion and Integration for Intelligent Systems (MFI). IEEE (IEEE)</conf-name>, <fpage>114</fpage>&#x2013;<lpage>119</lpage>. <pub-id pub-id-type="doi">10.1109/MFI.2012.6343022</pub-id> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berg</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dickhaut</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>McCabe</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Trust, Reciprocity, and Social History</article-title>. <source>Games Econ. Behav.</source> <volume>10</volume>, <fpage>122</fpage>&#x2013;<lpage>142</lpage>. <pub-id pub-id-type="doi">10.1006/game.1995.1027</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bernotat</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Eyssel</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sachse</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Shape it - the Influence of Robot Body Shape on Gender Perception in Robots</article-title>. In <conf-name>International Conference on Social Robotics</conf-name>. <publisher-name>Springer</publisher-name>, <fpage>75</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-70022-9_8</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bernotat</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Eyssel</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sachse</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>The (Fe)male Robot: How Robot Body Shape Impacts First Impressions and Trust towards Robots</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>11</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00562-7</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bradski</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>The OpenCV Library</article-title>. <source>Dr. Dobb&#x2019;s J.&#x20;Softw. Tools</source>. <volume>25</volume>, <fpage>120</fpage>-<lpage>125</lpage>.</citation>
</ref>
<ref id="B9">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Breazeal</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kidd</surname>
<given-names>C. D.</given-names>
</name>
<name>
<surname>Thomaz</surname>
<given-names>A. L.</given-names>
</name>
<name>
<surname>Hoffman</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Berlin</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Effects of Nonverbal Communication on Efficiency and Robustness in Human-Robot Teamwork</article-title>. In <conf-name>IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<publisher-name>IEEE)</publisher-name>, <fpage>708</fpage>&#x2013;<lpage>713</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2005.1545011</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Breazeal</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Takanishi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kobayashi</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2008</year>). <source>Social Robots that Interact with People</source>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>, <fpage>1349</fpage>&#x2013;<lpage>1369</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-540-30301-5_59</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Broadbent</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Kumar</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Sollers</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Stafford</surname>
<given-names>R. Q.</given-names>
</name>
<name>
<surname>MacDonald</surname>
<given-names>B. A.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). <article-title>Robots with Display Screens: a Robot with a More Humanlike Face Display Is Perceived to Have More Mind and a Better Personality</article-title>. <source>PloS one</source> <volume>8</volume>, <fpage>e72589</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0072589</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Burgoon</surname>
<given-names>J.&#x20;K.</given-names>
</name>
<name>
<surname>Guerrero</surname>
<given-names>L. K.</given-names>
</name>
<name>
<surname>Floyd</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>
<italic>Nonverbal Communication</italic> (Routledge)</article-title>
<pub-id pub-id-type="doi">10.4324/9781315663425</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cairns</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Cox</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Nordin</surname>
<given-names>A. I.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Immersion in Digital Games: Review of Gaming Experience Research</article-title>. <source>Handbook of Digital Games</source> <volume>1</volume>, <fpage>767</fpage>. </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Casper</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Murphy</surname>
<given-names>R. R.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Human-robot Interactions during the Robot-Assisted Urban Search and Rescue Response at the World Trade Center</article-title>. <source>IEEE Trans. Syst. Man. Cybern. B</source> <volume>33</volume>, <fpage>367</fpage>&#x2013;<lpage>385</lpage>. <pub-id pub-id-type="doi">10.1109/tsmcb.2003.811794</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cummings</surname>
<given-names>J.&#x20;J.</given-names>
</name>
<name>
<surname>Bailenson</surname>
<given-names>J.&#x20;N.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>How Immersive Is Enough? a Meta-Analysis of the Effect of Immersive Technology on User Presence</article-title>. <source>Media Psychol.</source> <volume>19</volume>, <fpage>272</fpage>&#x2013;<lpage>309</lpage>. <pub-id pub-id-type="doi">10.1080/15213269.2015.1015740</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="web">
<collab>[Dataset] Statista</collab> (<year>2019</year>). <article-title>Roboter Wie Pepper &#xdc;bernehmen Immer Mehr T&#xe4;tigkeiten in Unserem Alltag</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://de.statista.com/statistik/daten/studie/1005815/umfrage/akzeptanz-von-roboter-dienstleistungen-in-der-schweiz">https://de.statista.com/statistik/daten/studie/1005815/umfrage/akzeptanz-von-roboter-dienstleistungen-in-der-schweiz</ext-link>
</comment> (<comment>accessed 03&#x20;2021, 10</comment>). </citation>
</ref>
<ref id="B17">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Deshmukh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Craenen</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Vinciarelli</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Foster</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<year>2018</year>). <conf-name>Shaping Robot Gestures to Shape Users&#x27; Perception: Proceedings of the 6th International Conference on Human-Agent Interaction</conf-name>, <volume>18</volume>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery)</publisher-name>, <fpage>293</fpage>&#x2013;<lpage>300</lpage>. <pub-id pub-id-type="doi">10.1145/3284432.3284445</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>DeSteno</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bartlett</surname>
<given-names>M. Y.</given-names>
</name>
<name>
<surname>Baumann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Williams</surname>
<given-names>L. A.</given-names>
</name>
<name>
<surname>Dickens</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Gratitude as Moral Sentiment: Emotion-Guided Cooperation in Economic Exchange</article-title>. <source>Emotion</source> <volume>10</volume>, <fpage>289</fpage>&#x2013;<lpage>293</lpage>. <pub-id pub-id-type="doi">10.1037/a0017883</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>DeSteno</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Breazeal</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Frank</surname>
<given-names>R. H.</given-names>
</name>
<name>
<surname>Pizarro</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Baumann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dickens</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>Detecting the Trustworthiness of Novel Partners in Economic Exchange</article-title>. <source>Psychol. Sci.</source> <volume>23</volume>, <fpage>1549</fpage>&#x2013;<lpage>1556</lpage>. <pub-id pub-id-type="doi">10.1177/0956797612448793</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>DeVito</surname>
<given-names>J.&#x20;A.</given-names>
</name>
<name>
<surname>O&#x2019;Rourke</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>O&#x2019;Neill</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2000</year>). <source>Human Communication</source>. <publisher-name>New York: Longman</publisher-name>. <pub-id pub-id-type="doi">10.21236/ada377245</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Duffy</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2011)</year>). <article-title>Trust in Second Life</article-title>. <comment>Southern Econ. J.</comment> <volume>78</volume> <fpage>53</fpage>-<lpage>62</lpage>. <pub-id pub-id-type="doi">10.4284/0038-4038-78.1.53</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Esterwood</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Robert</surname>
<given-names>L. P.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Personality in Healthcare Human Robot Interaction (H-HRI)</article-title>,&#x201d; in <source>Personality in Healthcare Human Robot Interaction (H-HRI): A Literature Review and Brief Critique, Proceedings of the 8th International Conference on Human-Agent Interaction</source>. Editors <person-group person-group-type="author">
<name>
<surname>Esterwood</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Robert</surname>
<given-names>L. P.</given-names>
</name>
</person-group>.<fpage>10</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1145/3406499.3415075</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Fabricatore</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2007</year>). <source>ENLACES (MINEDUC Chile) -OECD Expert Meeting On Videogames And Education</source>.<article-title>Gameplay and Game Mechanics: A Key to Quality in Videogames</article-title> </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Felzmann</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Fosch-Villaronga</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Lutz</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Tamo-Larrieux</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Robots and Transparency: The Multiple Dimensions of Transparency in the Context of Robot Technologies</article-title>. <source>IEEE Robot. Automat. Mag.</source> <volume>26</volume>, <fpage>71</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1109/mra.2019.2904644</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Fendt</surname>
<given-names>M. W.</given-names>
</name>
<name>
<surname>Harrison</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ware</surname>
<given-names>S. G.</given-names>
</name>
<name>
<surname>Cardona-Rivera</surname>
<given-names>R. E.</given-names>
</name>
<name>
<surname>Roberts</surname>
<given-names>D. L.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Achieving the Illusion of Agency</article-title>. In <conf-name>International Conference on Interactive Digital Storytelling</conf-name>. <publisher-name>Springer</publisher-name>, <fpage>114</fpage>&#x2013;<lpage>125</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-34851-8_11</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>George</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Eiband</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hufnagel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hussmann</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Trusting Strangers in Immersive Virtual Reality</article-title>,&#x201d; in <conf-name>Proceedings of the 23rd International Conference on Intelligent User Interfaces Companion</conf-name>. <fpage>1</fpage>&#x2013;<lpage>2</lpage>. </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gherhe&#x15f;</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Why Are We Afraid of Artificial Intelligence (AI)?</article-title> <source>Eur. Rev. Appl. Sociol.</source> <volume>11</volume>, <fpage>6</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1515/eras-2018-0006</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Glaeser</surname>
<given-names>E. L.</given-names>
</name>
<name>
<surname>Laibson</surname>
<given-names>D. I.</given-names>
</name>
<name>
<surname>Scheinkman</surname>
<given-names>J.&#x20;A.</given-names>
</name>
<name>
<surname>Soutter</surname>
<given-names>C. L.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Measuring Trust&#x2a;</article-title>. <source>Q. J.&#x20;Econ.</source> <volume>115</volume>, <fpage>811</fpage>&#x2013;<lpage>846</lpage>. <pub-id pub-id-type="doi">10.1162/003355300554926</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Goldberg</surname>
<given-names>L. R.</given-names>
</name>
<name>
<surname>Kilkowski</surname>
<given-names>J.&#x20;M.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>The Prediction of Semantic Consistency in Self-Descriptions: Characteristics of Persons and of Terms that Affect the Consistency of Responses to Synonym and Antonym Pairs</article-title>. <source>J.&#x20;Personal. Soc. Psychol.</source> <volume>48</volume>, <fpage>82</fpage>&#x2013;<lpage>98</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.48.1.82</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hale</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Payne</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Taylor</surname>
<given-names>K. M.</given-names>
</name>
<name>
<surname>Paoletti</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>De C Hamilton</surname>
<given-names>A. F.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The Virtual Maze: A Behavioural Tool for Measuring Trust</article-title>. <source>Q. J.&#x20;Exp. Psychol.</source> <volume>71</volume>, <fpage>989</fpage>&#x2013;<lpage>1008</lpage>. <pub-id pub-id-type="doi">10.1080/17470218.2017.1307865</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hameed</surname>
<given-names>I. A.</given-names>
</name>
<name>
<surname>Strazdins</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Hatlemark</surname>
<given-names>H. A. M.</given-names>
</name>
<name>
<surname>Jakobsen</surname>
<given-names>I. S.</given-names>
</name>
<name>
<surname>Damdam</surname>
<given-names>J.&#x20;O.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Robots that Can Mix Serious with Fun</article-title>, <fpage>595</fpage>&#x2013;<lpage>604</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-74690-6&#x2013;5810.1007/978-3-319-74690-6_58</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hancock</surname>
<given-names>P. A.</given-names>
</name>
<name>
<surname>Billings</surname>
<given-names>D. R.</given-names>
</name>
<name>
<surname>Schaefer</surname>
<given-names>K. E.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.&#x20;Y. C.</given-names>
</name>
<name>
<surname>de Visser</surname>
<given-names>E. J.</given-names>
</name>
<name>
<surname>Parasuraman</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>A Meta-Analysis of Factors Affecting Trust in Human-Robot Interaction</article-title>. <source>Hum. Factors</source> <volume>53</volume>, <fpage>517</fpage>&#x2013;<lpage>527</lpage>. <pub-id pub-id-type="doi">10.1177/0018720811417254</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haring</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Matsumoto</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Watanabe</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>How Do People Perceive and Trust a Lifelike Robot?</article-title> In <conf-name>Proceedings of the world congress on engineering and computer science (Citeseer)</conf-name>, vol. <volume>1</volume> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ho</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>The Value of Being Powerful or Beautiful in Games - How Game Design Affects the Value of Virtual Items</article-title>. <source>Comput. Game J.</source> <volume>3</volume>, <fpage>54</fpage>&#x2013;<lpage>61</lpage>. <pub-id pub-id-type="doi">10.1007/bf03392357</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Horstmann</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>Kr&#xe4;mer</surname>
<given-names>N. C.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Great Expectations? Relation of Previous Experiences with Social Robots in Real Life or in the Media and Expectancies Based on Qualitative and Quantitative Assessment</article-title>. <source>Front. Psychol.</source> <volume>10</volume>, <fpage>939</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2019.00939</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hunicke</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Leblanc</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Zubek</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Mda: A Formal Approach to Game Design and Game Research</article-title>. In <conf-name>Proceedings of the Challenges in Games AI Workshop, Nineteenth National Conference of Artificial Intelligence (Press)</conf-name>, <fpage>1</fpage>&#x2013;<lpage>5</lpage>. </citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jakobsson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sotamaa</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Moore</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Begy</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Consalvo</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gazzard</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2011</year>). <article-title>Game Reward Systems</article-title>. <source>Game Stud.</source> <volume>11</volume>. </citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Johnson</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Grayson</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Cognitive and Affective Trust in Service Relationships</article-title>. <source>J.&#x20;Business Res.</source> <volume>58</volume>, <fpage>500</fpage>&#x2013;<lpage>507</lpage>. <pub-id pub-id-type="doi">10.1016/s0148-2963(03)00140-1</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Johnson</surname>
<given-names>N. D.</given-names>
</name>
<name>
<surname>Mislin</surname>
<given-names>A. A.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Trust Games: A Meta-Analysis</article-title>. <source>J.&#x20;Econ. Psychol.</source> <volume>32</volume>, <fpage>865</fpage>&#x2013;<lpage>889</lpage>. <pub-id pub-id-type="doi">10.1016/j.joep.2011.05.007</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kerzel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pekarek-Rosin</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Strahl</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Heinrich</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wermter</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Teaching Nico How to Grasp: An Empirical Study on Crossmodal Social Interaction as a Key Factor for Robots Learning from Humans</article-title>. <source>Front. Neurorobot.</source> <volume>14</volume>, <fpage>28</fpage>. <pub-id pub-id-type="doi">10.3389/fnbot.2020.00028</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kerzel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Strahl</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Magg</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Navarro-Guerrero</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Heinrich</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wermter</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Nico&#x2014;neuro-inspired Companion: A Developmental Humanoid Robot Platform for Multimodal Interaction</article-title>. In <source>2017 26th IEEE International Symposium on Robot and Hum.an Interactive Communication</source>. (<publisher-loc>RO-MAN</publisher-loc>: <publisher-name>IEEE)</publisher-name>, <fpage>113</fpage>&#x2013;<lpage>120</lpage>. </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kubilinskiene</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zilinskiene</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Zilinskiene</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Dagiene</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Sinkevi&#x10d;ius</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Applying Robotics in School Education: a Systematic Review</article-title>. <source>Bjmc</source> <volume>5</volume>, <fpage>50</fpage>&#x2013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.22364/bjmc.2017.5.1.04</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kulyukin</surname>
<given-names>V. A.</given-names>
</name>
</person-group> (<year>2006</year>). &#x201c;<article-title>On Natural Language Dialogue with Assistive Robots</article-title>,&#x201d; in <conf-name>Proceedings of the 1st ACM SIGCHI/SIGART Conference on Human-robot Interaction</conf-name>, <fpage>164</fpage>&#x2013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1145/1121241.1121270</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lang</surname>
<given-names>F. R.</given-names>
</name>
<name>
<surname>John</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>L&#xfc;dtke</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Schupp</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wagner</surname>
<given-names>G. G.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Short Assessment of the Big Five: Robust across Survey Methods except Telephone Interviewing</article-title>. <source>Behav. Res.</source> <volume>43</volume>, <fpage>548</fpage>&#x2013;<lpage>567</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-011-0066-z</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Langer</surname>
<given-names>E. J.</given-names>
</name>
</person-group> (<year>1975</year>). <article-title>The Illusion of Control</article-title>. <source>J.&#x20;Personal. Soc. Psychol.</source> <volume>32</volume>, <fpage>311</fpage>&#x2013;<lpage>328</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.32.2.311</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Levitt</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>List</surname>
<given-names>J.&#x20;A.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Viewpoint: On the Generalizability of Lab Behaviour to the Field</article-title>. <source>Can. J.&#x20;Economics/Revue canadienne d&#x27;&#xe9;conomique</source> <volume>40</volume>, <fpage>347</fpage>&#x2013;<lpage>370</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2966.2007.00412.x</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>S. A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Fear of Autonomous Robots and Artificial Intelligence: Evidence from National Representative Data with Probability Sampling</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>9</volume>, <fpage>379</fpage>&#x2013;<lpage>384</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-017-0401-3</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Logan</surname>
<given-names>D. E.</given-names>
</name>
<name>
<surname>Breazeal</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Goodwin</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Jeong</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>O&#x2019;Connell</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Smith-Freedman</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Social Robots for Hospitalized Children</article-title>. <source>Pediatrics</source> <volume>144</volume>, <fpage>e20181511</fpage>. <pub-id pub-id-type="doi">10.1542/peds.2018-1511</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lombard</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ditton</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>At the Heart of it All: The Concept of Presence</article-title>. <source>J.&#x20;Computer-mediated Commun.</source> <volume>3</volume>, <fpage>JCMC321</fpage>. </citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lombard</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ditton</surname>
<given-names>T. B.</given-names>
</name>
<name>
<surname>Weinstein</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>Measuring Presence: the Temple Presence Inventory</article-title>,&#x201d; in <conf-name>Proceedings of the 12th annual International Workshop on Presence</conf-name>, <fpage>1</fpage>&#x2013;<lpage>15</lpage>. </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Looije</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Neerincx</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Cnossen</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Persuasive Robotic Assistant for Health Self-Management of Older Adults: Design and Evaluation of Social Behaviors</article-title>. <source>Int. J.&#x20;Human-Computer Stud.</source> <volume>68</volume>, <fpage>386</fpage>&#x2013;<lpage>397</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2009.08.007</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mathur</surname>
<given-names>M. B.</given-names>
</name>
<name>
<surname>Reichling</surname>
<given-names>D. B.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Navigating a Social World with Robot Partners: A Quantitative Cartography of the Uncanny Valley</article-title>. <source>Cognition</source> <volume>146</volume>, <fpage>22</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2015.09.008</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mayer</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>J.&#x20;H.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>The Effect of the Performance Appraisal System on Trust for Management: A Field Quasi-Experiment</article-title>. <source>J.&#x20;Appl. Psychol.</source> <volume>84</volume>, <fpage>123</fpage>&#x2013;<lpage>136</lpage>. <pub-id pub-id-type="doi">10.1037/0021-9010.84.1.123</pub-id> </citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Meertens</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>Lion</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Measuring an Individual&#x27;s Tendency to Take Risks: The Risk Propensity Scale</article-title>. <source>J.&#x20;Appl. Soc. Pyschol</source> <volume>38</volume>, <fpage>1506</fpage>&#x2013;<lpage>1520</lpage>. <pub-id pub-id-type="doi">10.1111/j.1559-1816.2008.00357.x</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mota</surname>
<given-names>R. C. R.</given-names>
</name>
<name>
<surname>Rea</surname>
<given-names>D. J.</given-names>
</name>
<name>
<surname>Le Tran</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Young</surname>
<given-names>J.&#x20;E.</given-names>
</name>
<name>
<surname>Sharlin</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Sousa</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Playing the &#x2018;trust Game&#x2019;with Robots: Social Strategies and Experiences</article-title>. In <source>2016 25th IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)</source>. <publisher-loc>NY</publisher-loc>: <publisher-name>Columbia University; IEEE</publisher-name>, <fpage>519</fpage>&#x2013;<lpage>524</lpage>. </citation>
</ref>
<ref id="B56">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mubin</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Obaid</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Jordan</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Alves-Oliveria</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Eriksson</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Barendregt</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Sjolle</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Fjeld</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Simoff</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Billinghurst</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Towards an Agenda for Sci-Fi Inspired Hci Research</article-title>,&#x201d; in <conf-name>Proceedings of the 13th International Conference on Advances in Computer Entertainment Technology</conf-name> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery)</publisher-name>). <pub-id pub-id-type="doi">10.1145/3001773.3001786</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mukai</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hirano</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nakashima</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kato</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Sakaida</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2010</year>). <source>Development of a Nursing-Care Assistant Robot Riba that Can Lift a Human in its Arms</source>, <fpage>5996</fpage>&#x2013;<lpage>6001</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2010.5651735</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Murray</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1997</year>). <source>Hamlet on the Holodeck: The Future of Narrative in Cyberspace</source>. <publisher-name>MIT Press</publisher-name>.</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Neumann</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Social Robots and Young Children&#x27;s Early Language and Literacy Learning</article-title>. <source>Early Child. Educ J</source> <volume>48</volume>, <fpage>157</fpage>&#x2013;<lpage>170</lpage>. <pub-id pub-id-type="doi">10.1007/s10643-019-00997-7</pub-id> </citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Parisi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Schlesinger</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Artificial Life and Piaget</article-title>. <source>Cogn. Develop.</source> <volume>17</volume>, <fpage>1301</fpage>&#x2013;<lpage>1321</lpage>. <pub-id pub-id-type="doi">10.1016/s0885-2014(02)00119-3</pub-id> </citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peterson</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Massively Multiplayer Online Role-Playing Games as Arenas for Second Language Learning</article-title>. <source>Comp. Assist. Lang. Learn.</source> <volume>23</volume>, <fpage>429</fpage>&#x2013;<lpage>439</lpage>. <pub-id pub-id-type="doi">10.1080/09588221.2010.520673</pub-id> </citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Powers</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kiesler</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2006</year>). &#x201c;<article-title>The Advisor Robot: Tracing People&#x2019;s Mental Model from a Robot&#x2019;s Physical Attributes</article-title>,&#x201d; in <conf-name>Proceedings of the 1st ACM SIGCHI/SIGART Conference on Human-robot Interaction</conf-name>, <fpage>218</fpage>&#x2013;<lpage>225</lpage>. </citation>
</ref>
<ref id="B63">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Quigley</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Conley</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Gerkey</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Faust</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Foote</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Leibs</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2009</year>). <article-title>Ros: an Open-Source Robot Operating System</article-title>. In <source>ICRA Workshop on Open Source Software</source>, <volume>3</volume>. <publisher-loc>Kobe, Japan</publisher-loc> (IEEE), <fpage>5</fpage>. </citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rai</surname>
<given-names>T. S.</given-names>
</name>
<name>
<surname>Diermeier</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Corporations Are Cyborgs: Organizations Elicit Anger but Not Sympathy when They Can Think but Cannot Feel</article-title>. <source>Organizational Behav. Hum. Decis. Process.</source> <volume>126</volume>, <fpage>18</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1016/j.obhdp.2014.10.001</pub-id> </citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rempel</surname>
<given-names>J.&#x20;K.</given-names>
</name>
<name>
<surname>Holmes</surname>
<given-names>J.&#x20;G.</given-names>
</name>
<name>
<surname>Zanna</surname>
<given-names>M. P.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>Trust in Close Relationships</article-title>. <source>J.&#x20;Personal. Soc. Psychol.</source> <volume>49</volume>, <fpage>95</fpage>&#x2013;<lpage>112</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.49.1.95</pub-id> </citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rossi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dautenhahn</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Koay</surname>
<given-names>K. L.</given-names>
</name>
<name>
<surname>Saunders</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017a</year>). &#x201c;<article-title>Investigating Human Perceptions of Trust in Robots for Safe Hri in Home Environments</article-title>,&#x201d; in <conf-name>Proceedings of the Companion of the 2017 ACM/IEEE International Conference on Human-Robot Interaction</conf-name>, <fpage>375</fpage>&#x2013;<lpage>376</lpage>. <pub-id pub-id-type="doi">10.1145/3029798.3034822</pub-id> </citation>
</ref>
<ref id="B67">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Rossi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dautenhahn</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Koay</surname>
<given-names>K. L.</given-names>
</name>
<name>
<surname>Walters</surname>
<given-names>M. L.</given-names>
</name>
</person-group> (<year>2017b</year>). <source>Social Robotics</source>. (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>42</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-70022-9_5</pub-id>
<article-title>How the Timing and Magnitude of Robot Errors Influence Peoples&#x27; Trust of Robots in an Emergency Scenario</article-title> </citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saint-Mont</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Randomization Does Not Help Much, Comparability Does</article-title>. <source>PLOS ONE</source> <volume>10</volume>, <fpage>e0132102</fpage>&#x2013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0132102</pub-id> </citation>
</ref>
<ref id="B69">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Salem</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Eyssel</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Rohlfing</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kopp</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Joublin</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>To Err Is Human(-like): Effects of Robot Gesture on Perceived Anthropomorphism and Likability</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>5</volume>, <fpage>313</fpage>&#x2013;<lpage>323</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-013-0196-9</pub-id> </citation>
</ref>
<ref id="B70">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Salem</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rohlfing</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kopp</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Joublin</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>A Friendly Gesture: Investigating the Effect of Multimodal Robot Behavior in Human-Robot Interaction</article-title>. In , <volume>2011</volume>. (<publisher-loc>Ro-Man</publisher-loc>, <publisher-name>IEEE)</publisher-name>, <fpage>247</fpage>&#x2013;<lpage>252</lpage>. <pub-id pub-id-type="doi">10.1109/ROMAN.2011.6005285</pub-id> </citation>
</ref>
<ref id="B71">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Sandoval</surname>
<given-names>E. B.</given-names>
</name>
<name>
<surname>Brandstetter</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bartneck</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Can a Robot Bribe a Human? the Measurement of the Negative Side of Reciprocity in Human Robot Interaction</article-title>,&#x201d; in <conf-name>11th ACM/IEEE International Conference on Human-Robot Interaction</conf-name> (<publisher-name>IEEE</publisher-name>), <fpage>117</fpage>&#x2013;<lpage>124</lpage>. <pub-id pub-id-type="doi">10.1109/HRI.2016.7451742</pub-id> </citation>
</ref>
<ref id="B72">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saunderson</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nejat</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>How Robots Influence Humans: A Survey of Nonverbal Communication in Social Human-Robot Interaction</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>11</volume>, <fpage>575</fpage>&#x2013;<lpage>608</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00523-0</pub-id> </citation>
</ref>
<ref id="B73">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schniter</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Shields</surname>
<given-names>T. W.</given-names>
</name>
<name>
<surname>Sznycer</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Trust in Humans and Robots: Economically Similar but Emotionally Different</article-title>. <source>J.&#x20;Econ. Psychol.</source> <volume>78</volume>, <fpage>102253</fpage>. <pub-id pub-id-type="doi">10.1016/j.joep.2020.102253</pub-id> </citation>
</ref>
<ref id="B74">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schoorman</surname>
<given-names>F. D.</given-names>
</name>
<name>
<surname>Mayer</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>J.&#x20;H.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Organizational Trust: Philosophical Perspectives and Conceptual Definitions</article-title>. <source>Acad. Manage. Rev.</source> <volume>21</volume>, <fpage>337</fpage>&#x2013;<lpage>340</lpage>. </citation>
</ref>
<ref id="B75">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Sehili</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Leynaert</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Devillers</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>A Corpus of Social Interaction between Nao and Elderly People</article-title>. In <comment>
<italic>5th International Workshop on Emotion, Social Signals, Sentiment &#x0026; Linked Open Data</italic>. Reykjavik, Iceland</comment>. <publisher-name>LREC</publisher-name>. <pub-id pub-id-type="doi">10.1145/2666499.2666502</pub-id> </citation>
</ref>
<ref id="B76">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wilbur</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>A Framework for Immersive Virtual Environments (Five): Speculations on the Role of Presence in Virtual Environments</article-title>. <source>Presence: Teleoperators &#x26; Virtual Environments</source> <volume>6</volume>, <fpage>603</fpage>&#x2013;<lpage>616</lpage>. <pub-id pub-id-type="doi">10.1162/pres.1997.6.6.603</pub-id> </citation>
</ref>
<ref id="B77">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soto</surname>
<given-names>C. J.</given-names>
</name>
<name>
<surname>John</surname>
<given-names>O. P.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Short and Extra-short Forms of the Big Five Inventory-2: The Bfi-2-S and Bfi-2-Xs</article-title>. <source>J.&#x20;Res. Personal.</source> <volume>68</volume>, <fpage>69</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1016/j.jrp.2017.02.004</pub-id> </citation>
</ref>
<ref id="B78">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Spiliotopoulos</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Androutsopoulos</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Spyropoulos</surname>
<given-names>C. D.</given-names>
</name>
</person-group> (<year>2001</year>). &#x201c;<article-title>Human-robot Interaction Based on Spoken Natural Language Dialogue</article-title>,&#x201d; in <conf-name>Proceedings of the European Workshop on Service and Humanoid Robots</conf-name>, <fpage>25</fpage>&#x2013;<lpage>27</lpage>. </citation>
</ref>
<ref id="B79">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stang</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>&#x201c;This Action Will Have Consequences&#x201d;: Interactivity and Player Agency</article-title>. <source>Game Stud.</source> <volume>19</volume>. </citation>
</ref>
<ref id="B80">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Twiefel</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Baumann</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Heinrich</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wermter</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Improving Domain-independent Cloud-Based Speech Recognition with Domain-dependent Phonetic Post-processing</article-title>. <conf-name>Proceedings of the AAAI Conference on Artificial Intelligence</conf-name> <volume>28</volume> </citation>
</ref>
<ref id="B81">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>C.-T.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Game Reward Systems: Gaming Experiences and Social Meanings</article-title>. <source>
<italic>DiGRA Conf.</italic> (Citeseer)</source> <volume>114</volume>. </citation>
</ref>
<ref id="B82">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wortham</surname>
<given-names>R. H.</given-names>
</name>
<name>
<surname>Theodorou</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Robot Transparency, Trust and Utility</article-title>. <source>Connect. Sci.</source> <volume>29</volume>, <fpage>242</fpage>&#x2013;<lpage>248</lpage>. <pub-id pub-id-type="doi">10.1080/09540091.2017.1313816</pub-id> </citation>
</ref>
<ref id="B83">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zanatto</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Patacchiola</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Goslin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Thill</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cangelosi</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <conf-name>Do Humans Imitate Robots? Proceedings of the 2020 ACM/IEEE International Conference on Human-Robot Interaction</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery), HRI &#x2018;20</publisher-name>, <fpage>449</fpage>&#x2013;<lpage>457</lpage>. <pub-id pub-id-type="doi">10.1145/3319502.3374776</pub-id> </citation>
</ref>
<ref id="B84">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zanatto</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>
<italic>When Do We Cooperate with Robots?</italic>
</article-title>,&#x201d; (<publisher-name>University of Plymouth</publisher-name>). <comment>Ph.D. thesis</comment>. </citation>
</ref>
</ref-list>
</back>
</article>