<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2024.1322781</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Psychology</subject>
<subj-group>
<subject>Hypothesis and Theory</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Ascribing consciousness to artificial intelligence: human-AI interaction and its carry-over effects on human-human interaction</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Guingrich</surname> <given-names>Rose E.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2548724/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Graziano</surname> <given-names>Michael S. A.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/44388/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Psychology, Princeton University</institution>, <addr-line>Princeton, NJ</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Princeton School of Public and International Affairs, Princeton University</institution>, <addr-line>Princeton, NJ</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Princeton Neuroscience Institute, Princeton University</institution>, <addr-line>Princeton, NJ</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Chiara Lucifora, University of Bologna, Italy</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Francesca Ciardo, University of Milano-Bicocca, Italy</p>
<p>Kostas Karpouzis, Panteion University, Greece</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Rose E. Guingrich, <email>rose.guingrich@princeton.edu</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>27</day>
<month>03</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>15</volume>
<elocation-id>1322781</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>10</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>03</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Guingrich and Graziano.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Guingrich and Graziano</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The question of whether artificial intelligence (AI) can be considered conscious and therefore should be evaluated through a moral lens has surfaced in recent years. In this paper, we argue that whether AI is conscious is less of a concern than the fact that AI can be considered conscious by users during human-AI interaction, because this ascription of consciousness can lead to carry-over effects on human-human interaction. When AI is viewed as conscious like a human, then how people treat AI appears to carry over into how they treat other people due to activating schemas that are congruent to those activated during interactions with humans. In light of this potential, we might consider regulating how we treat AI, or how we build AI to evoke certain kinds of treatment from users, but not because AI is inherently sentient. This argument focuses on humanlike, social actor AI such as chatbots, digital voice assistants, and social robots. In the first part of the paper, we provide evidence for carry-over effects between perceptions of AI consciousness and behavior toward humans through literature on human-computer interaction, human-AI interaction, and the psychology of artificial agents. In the second part of the paper, we detail how the mechanism of schema activation can allow us to test consciousness perception as a driver of carry-over effects between human-AI interaction and human-human interaction. In essence, perceiving AI as conscious like a human, thereby activating congruent mind schemas during interaction, is a driver for behaviors and perceptions of AI that can carry over into how we treat humans. Therefore, the fact that people can ascribe humanlike consciousness to AI is worth considering, and moral protection for AI is also worth considering, regardless of AI&#x2019;s inherent conscious or moral status.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>human-AI interaction</kwd>
<kwd>theory of mind</kwd>
<kwd>consciousness</kwd>
<kwd>schemas</kwd>
<kwd>chatbots</kwd>
</kwd-group>
<contract-sponsor id="cn1">National Science Foundation<named-content content-type="fundref-id">10.13039/100000001</named-content></contract-sponsor>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="154"/>
<page-count count="13"/>
<word-count count="13817"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Personality and Social Psychology</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<title>Introduction</title>
<p>Consciousness is considered the subjective experience that people feel in association with events, such as sensory events, memories, and emotions (<xref ref-type="bibr" rid="ref93">Nagel, 1974</xref>; <xref ref-type="bibr" rid="ref47">Harley, 2021</xref>). Many people study consciousness, and there are just as many competing theories about what it is and how it is generated in the human brain (e.g., <xref ref-type="bibr" rid="ref19">Chalmers, 1996</xref>; <xref ref-type="bibr" rid="ref7">Baars, 1997</xref>; <xref ref-type="bibr" rid="ref137">Tononi, 2007</xref>; <xref ref-type="bibr" rid="ref44">Graziano, 2013</xref>; <xref ref-type="bibr" rid="ref26">Doerig et al., 2020</xref>). Recently, people have speculated that artificial intelligence can also have consciousness (e.g., <xref ref-type="bibr" rid="ref98">O&#x2019;Regan, 2012</xref>; <xref ref-type="bibr" rid="ref149">Yampolskiy, 2018</xref>; <xref ref-type="bibr" rid="ref20">Chalmers, 2023</xref>). Whether that is possible, and how, is still debated (e.g., <xref ref-type="bibr" rid="ref65">Koch, 2019</xref>). However, it is undeniable that children and adults attribute consciousness to AI through Theory of Mind attributions (<xref ref-type="bibr" rid="ref58">Kahn et al., 2012</xref>; <xref ref-type="bibr" rid="ref15">Broadbent et al., 2013</xref>; <xref ref-type="bibr" rid="ref33">Eyssel and Pfundmair, 2015</xref>; <xref ref-type="bibr" rid="ref87">Martini et al., 2016</xref>; <xref ref-type="bibr" rid="ref133">Tanibe et al., 2017</xref>; <xref ref-type="bibr" rid="ref131">&#x015A;widerska and K&#x00FC;ster, 2018</xref>; <xref ref-type="bibr" rid="ref51">Heyselaar and Bosse, 2020</xref>; <xref ref-type="bibr" rid="ref73">K&#x00FC;ster and &#x015A;widerska, 2020</xref>; <xref ref-type="bibr" rid="ref134">Taylor et al., 2020</xref>). Some researchers have argued that consciousness is fundamentally an attribution, a construct of social cognitive machinery, and that we attribute it to other people and to ourselves (<xref ref-type="bibr" rid="ref38">Frith, 2002</xref>; <xref ref-type="bibr" rid="ref44">Graziano, 2013</xref>; <xref ref-type="bibr" rid="ref110">Prinz, 2017</xref>). As such, regardless of whether AI is conscious, attributing consciousness to AI matters in the same way attributing it to other humans does.</p>
<p><xref ref-type="bibr" rid="ref109">Premack and Woodruff (1978)</xref> coined the term Theory of Mind (ToM), which is the ability to attribute mind states to oneself and others more expansive. For example, one heavily studied aspect of ToM is the ability to recognize false beliefs in others (<xref ref-type="bibr" rid="ref9005">Wimmer and Perner, 1983</xref>). This cognitive capability has historically distinguished humans from many other species, yet <xref ref-type="bibr" rid="ref113">Rabinowitz et al. (2018)</xref> claimed that artificial intelligence passed the false belief test. ToM may extend beyond attributing beliefs to attributing other aspects of mind such as emotions and intentionality. According to some, ToM can be divided into two distinct processes: attributing agency, or the ability to decide and act autonomously, and attributing experience, or the ability to have subjective states (<xref ref-type="bibr" rid="ref42">Gray et al., 2007</xref>; <xref ref-type="bibr" rid="ref64">Knobe and Prinz, 2007</xref>). Attributing consciousness to AI is therefore probably not one, single process, but instead should be broken down into experience and agency, with each part analyzed separately (<xref ref-type="bibr" rid="ref144">Ward et al., 2013</xref>; <xref ref-type="bibr" rid="ref74">K&#x00FC;ster et al., 2020</xref>).</p>
<p>It has been suggested that attributing experience, rather than agency, plays a larger role in the perception of consciousness in AI (<xref ref-type="bibr" rid="ref64">Knobe and Prinz, 2007</xref>). This distinction may present some difficulties for accurately measuring whether people view AI as conscious. People are generally more willing to assign agency rather than experience to a variety of targets, including robots (<xref ref-type="bibr" rid="ref43">Gray and Wegner, 2012</xref>; <xref ref-type="bibr" rid="ref55">Jacobs et al., 2021</xref>). This may be due in part to it being easier to determine whether an agent can make decisions or act on its own (agency) than whether an agent can feel pain or pleasure (experience). Adding further complexity, not all people ascribe agency and experience to AI in the same manner. For example, psychopathology and personality traits such as emotional stability and extraversion correlate with whether someone ascribes agency or experience to robots: emotional stability positively correlates with ascribing agency to robots, and extraversion positively correlates with attributing experience to robots (<xref ref-type="bibr" rid="ref136">Tharp et al., 2016</xref>). Other individual differences such as people&#x2019;s formal education may also relate to whether someone attributes agency characteristics like intentionality to a humanoid robot (<xref ref-type="bibr" rid="ref116">Roselli et al., 2023</xref>). Given these findings, it may be useful to operationalize ToM as a complex, overarching collection of interrelated processes, each of which plays a different role in how people attribute consciousness to machines.</p>
<p>The attribution of consciousness to AI is particularly relevant to social actor AI. These humanlike agents are social embodiments of intelligent algorithms that people can talk to and even engage with physically. Social actor AI includes chatbots, digital voice assistants, and social robots. Social actor AI&#x2019;s humanlike characteristics, from how the AI is embodied&#x2014;like its bodily form, voice, and even linguistic style&#x2014;to its ability to process social information, are unique within the category of artificial, non-human agents. Social actor AI is arguably more akin to humans than are other machines and objects. As such, how people behave toward social actor AI agents might be more likely to impact how they behave toward another human, despite the fact that these AI agents are not themselves living beings. <xref ref-type="bibr" rid="ref140">Velez et al. (2019)</xref> posited that &#x201C;an increasingly important question is how these social responses to agents will influence people&#x2019;s subsequent interactions with humans.&#x201D; Moreover, social actor AI is evolving rapidly. As <xref ref-type="bibr" rid="ref32">Etzrodt et al. (2022)</xref> described it, &#x201C;We are witnessing a profound change, in which communication <italic>through</italic> technologies is extended by communication <italic>with</italic> technologies.&#x201D; Instead of using social media as a medium through which you can interact with other people, users can, for example, download an app through which they can interact with a non-human being. Companion chatbots like Replika, Anima, or Kiku have millions of people using their apps. Millions more have digital voice assistants such as Siri and Alexa operating on their smartphones and in their homes. People form relationships with these agents and can come to view them as members of the family, friends, and even lovers (<xref ref-type="bibr" rid="ref24">Croes and Antheunis, 2020</xref>; <xref ref-type="bibr" rid="ref40">Garg and Sengupta, 2020</xref>; <xref ref-type="bibr" rid="ref14">Brandtz&#x00E6;g et al., 2022</xref>; <xref ref-type="bibr" rid="ref148">Xie and Pentina, 2022</xref>; <xref ref-type="bibr" rid="ref45">Guingrich and Graziano, 2023</xref>; <xref ref-type="bibr" rid="ref81">Loh and Loh, 2023</xref>). AI agents will almost certainly become both more ubiquitous and humanlike. As new generations grow up with these technologies on their mobile devices and in their homes, the consequences of humanlike AI will likely become more pronounced over time.</p>
<p>In this paper, we will not consider what, exactly, consciousness is, what causes it, or whether non-human machines can have it. Instead, the goal here is to discuss how people perceive consciousness in social actor AI, to explore the possible profound social implications, and to suggest potential research questions and regulatory considerations for others to pursue within this scope of research.</p>
</sec>
<sec id="sec2">
<title>Part 1: evidence for carry-over effects between human-AI interaction and human-human interaction</title>
<sec id="sec3">
<title>Carry-over effects between AI&#x2019;s tangible and intangible characteristics</title>
<p>When people interact with AI, tangible characteristics of the agent such as appearance or embodiment, behavior, communication style, gender, and voice can affect how people perceive intangible characteristics such as mind and consciousness, emotional capability, trustworthiness, and moral status (<xref ref-type="bibr" rid="ref108">Powers and Kiesler, 2006</xref>; <xref ref-type="bibr" rid="ref43">Gray and Wegner, 2012</xref>; <xref ref-type="bibr" rid="ref15">Broadbent et al., 2013</xref>; <xref ref-type="bibr" rid="ref33">Eyssel and Pfundmair, 2015</xref>; <xref ref-type="bibr" rid="ref121">Seeger and Heinzl, 2018</xref>; <xref ref-type="bibr" rid="ref79">Lee et al., 2019</xref>; <xref ref-type="bibr" rid="ref74">K&#x00FC;ster et al., 2020</xref>; <xref ref-type="bibr" rid="ref28">Dubosc et al., 2021</xref>; <xref ref-type="bibr" rid="ref114">Rhim et al., 2022</xref>). The critical tangible-intangible relationship examined here is the one between an agent&#x2019;s humanlike embodiment and consciousness ascription (<xref ref-type="bibr" rid="ref67">Krach et al., 2008</xref>; <xref ref-type="bibr" rid="ref15">Broadbent et al., 2013</xref>; <xref ref-type="bibr" rid="ref34">Ferrari et al., 2016</xref>; <xref ref-type="bibr" rid="ref1">Abubshait and Wiese, 2017</xref>; <xref ref-type="bibr" rid="ref128">Stein et al., 2020</xref>).</p>
<p>Generally, the more tangibly humanlike that people perceive an AI agent to be, the more likely people are to ascribe mind to the agent (e.g., <xref ref-type="bibr" rid="ref15">Broadbent et al., 2013</xref>). At least one study suggests that mind ascription does not increase with human likeness until a particular threshold of human likeness is reached; once an agent&#x2019;s appearance reaches the middle of the machine-to-human spectrum and the AI agent&#x2019;s appearance includes actual human features such as eyes and a nose, then mind ascription begins to increase with human likeness (<xref ref-type="bibr" rid="ref87">Martini et al., 2016</xref>).</p>
<p>People are not always aware that they attribute mind to an AI agent during interaction. In other words, the construct of mind or consciousness activated in people during these interactions may be implicit, making it more difficult to measure. <xref ref-type="bibr" rid="ref10">Banks (2019)</xref> conducted an online survey to compare participants&#x2019; implicit and explicit ascriptions of mind to an agent. Participants (<italic>N</italic>&#x2009;=&#x2009;469) were recruited from social media and university research pools and were randomly assigned to one of four agents. Three of the agents were social AIs that varied in their human likeness and mind capacity, and one was a human control, all named &#x201C;Ray.&#x201D; Banks tested implicit ascription of mind using five classic ToM tests that measure whether participants ascribe mind to an agent including the white lie scenario and the Sally-Anne test. Explicit measures of mind were measured by two questions: do you think Ray has a mind, and how confident are you in your response? For the implicit tests&#x2019; open-ended responses, trained, independent raters coded the data for mentalistic explanations of behavior. The results showed that while people implicitly ascribed ToM to humanlike AI, this implicit ascription did not correlate with explicit mind ascriptions.</p>
<p>Mind ascription appears to be automatically induced by AI&#x2019;s tangible human likeness, even when subjects are prompted to believe the opposite. <xref ref-type="bibr" rid="ref128">Stein et al. (2020)</xref> compared mind ascriptions in a 2&#x2009;&#x00D7;&#x2009;2 between-subjects design of embodiment and mind capability for 134 German-speaking participants recruited from social media and mailing lists. Stimuli included vignettes and videos of either a text-based chatbot interface (Cleverbot) or a humanoid robot (with a 3-D rendered face of a woman) that was described as built on a simple or complex algorithm. The complex algorithm description included humanlike mind traits such as empathy, emotions, and understanding of the user. The researchers found a multivariate main effect of embodiment, such that people ascribed more mind capabilities to the humanoid robot than the text-based chatbot, regardless of whether it was based on a simple or complex algorithm. These researchers reported that &#x201C;a digital agent with human-like visual features was indeed attributed with a more human-like mind&#x2014;regardless of the cover story that was given regarding its actual mental prowess.&#x201D;</p>
<p>In sum, evidence suggests that an AI agent&#x2019;s observable or tangible characteristics, specifically its humanlike appearance, leads automatically to ascribing intangible characteristics, including consciousness, to the AI agent. As such, slight adjustments to AI&#x2019;s tangible characteristics can impact whether people perceive the artificial agent as conscious.</p>
</sec>
<sec id="sec4">
<title>Carry-over effects between perceiving mind in AI and human-AI interaction</title>
<p>In some cases, ascribing a mind to AI is linked with viewing the agent as likable and trustworthy (<xref ref-type="bibr" rid="ref150">Young and Monroe, 2019</xref>), which can impact whether people engage in helping behaviors. <xref ref-type="bibr" rid="ref127">Srinivasan and Takayama (2016)</xref> found that when people perceived a robot as having an agentic mind, such that the robot was acting of its own accord rather than being controlled by a human, they came to its aid 50% more quickly. Study 1 was a mixed experiment design conducted online (<italic>N</italic>&#x2009;=&#x2009;354, recruited from Amazon Mechanical Turk) in which participants each watched eight videos of robots requesting help using various politeness strategies, and study 2 was a behavioral lab study (<italic>N</italic>&#x2009;=&#x2009;48, recruited via university participant pools and postings in local areas) with three conditions that were based on study 1&#x2019;s results. In study 2, participants watched a movie with a robot in the room (Willow Garage&#x2019;s Personal Robot 2). During the movie, the robot brought food to the participant and mentioned that the room looked like it needed to be cleaned, offered to do so, and requested aid from the participant. While the majority of participants helped the robot, those participants who rated the robot as more agentic came to its aid more quickly.</p>
<p>Depending on the paradigm, ascribing mind to AI can affect ease of interaction by augmenting or inhibiting the dyadic flow. Interacting with a humanlike artificial agent spurs the automatic use of human social scripts (<xref ref-type="bibr" rid="ref95">Nass and Moon, 2000</xref>; <xref ref-type="bibr" rid="ref94">Nass and Brave, 2005</xref>) and other social processes (<xref ref-type="bibr" rid="ref142">von der P&#x00FC;tten et al., 2009</xref>), which can facilitate human-AI interaction (<xref ref-type="bibr" rid="ref126">Sproull et al., 1996</xref>; <xref ref-type="bibr" rid="ref115">Rickenberg and Reeves, 2000</xref>; <xref ref-type="bibr" rid="ref69">Kr&#x00E4;mer et al., 2003a</xref>,<xref ref-type="bibr" rid="ref71">b</xref>; <xref ref-type="bibr" rid="ref29">Duffy, 2008</xref>; <xref ref-type="bibr" rid="ref68">Kr&#x00E4;mer et al., 2009</xref>; <xref ref-type="bibr" rid="ref141">Vogeley and Bente, 2010</xref>; <xref ref-type="bibr" rid="ref72">Kupferberg et al., 2011</xref>). Facilitation of interaction and likability are however dependent on individual differences such as familiarity with the AI (<xref ref-type="bibr" rid="ref143">Wang et al., 2021</xref>), need for social inclusion or interaction (<xref ref-type="bibr" rid="ref76">Lee et al., 2006</xref>; <xref ref-type="bibr" rid="ref33">Eyssel and Pfundmair, 2015</xref>), and other individual differences (<xref ref-type="bibr" rid="ref75">Lee, 2010</xref>).</p>
<p>At a certain point, interaction facilitation no longer increases with human likeness across both tangible and intangible domains. The benefits of human likeness decrease dramatically when human likeness suddenly becomes creepy, according to the Uncanny Valley Hypothesis coined by <xref ref-type="bibr" rid="ref92">Mori (1970)</xref>. When an AI agent&#x2019;s appearance approaches the tipping point of &#x201C;not enough machine, not enough human,&#x201D; the AI has entered the dip of the uncanny valley. At this point, an artificial agent&#x2019;s human likeness becomes disturbing, thereby causing anxiety or discomfort in users. The discomfort arising from the uncanny valley effect is generally distinct from dislike yet can have similar negative effects on the flow of interaction (<xref ref-type="bibr" rid="ref112">Quadflieg et al., 2016</xref>).</p>
<p>The uncanny valley theory of human-AI interaction more recently acquired a qualifier: the uncanny valley of <italic>mind</italic> (<xref ref-type="bibr" rid="ref129">Stein and Ohler, 2017</xref>; <xref ref-type="bibr" rid="ref5">Appel et al., 2020</xref>). No longer just concerned with general human likeness, the uncanny valley effect can occur when AI&#x2019;s mind capabilities get too close to that of a human mind. It is uncertain whether negative uncanny valley effects of mind are stable, however, given the contradictions within this more recent scope of research. In Stein et al.&#x2019;s study, they also found that the AI with low mind capacity, based on a simple algorithm rather than an advanced one, caused more discomfort when the AI was embodied rather than solely text-based. In another study, the researchers found that the more people perceived AI <italic>or</italic> humans to have a typically human mind, the less eerie feelings they experienced (<xref ref-type="bibr" rid="ref112">Quadflieg et al., 2016</xref>). Due to inconsistent stimuli across studies, it is possible that slight variations in facial features or voice of the AI agent drove these dissimilar effects. In these cases, it may be useful to control for appearance when attempting to parse out the impacts of the uncanny valley of mind on how people interact with AI agents.</p>
<p>Via a series of three studies, <xref ref-type="bibr" rid="ref43">Gray and Wegner (2012)</xref> made the claim that experiential aspects of mind, and not those of agentic mind, drive uncanny valley effects. In one of the studies, participants, recruited from subway stations and dining halls (<italic>N</italic>&#x2009;=&#x2009;45), were given vignettes of a supercomputer that was described as having only experience capabilities, having only agency, or simply mechanical. They then rated their feelings (uneasy, unnerved, and creeped out) and perceptions of the supercomputer&#x2019;s agency and experience. The experiential supercomputer elicited significantly higher uncanny valley feelings than agents in the other two conditions. Apparently, an intelligent computer that is seen as having emotion is creepier than one that can make autonomous decisions. The distinction between uncanny valley effects of experience and agency may be caused by feelings of threat: AI agents that are capable of humanlike emotion threaten that which makes mankind special (<xref ref-type="bibr" rid="ref129">Stein and Ohler, 2017</xref>). If threat drove discomfort in Gray and Wegner&#x2019;s participants, then familiarity with the agent might mitigate perceptions of threat to the point at which the uncanny valley switches into the &#x201C;happy valley.&#x201D; According to that hypothesis, after long-term, comfortable, and safe exposure to a humanlike AI agent, people might find the agent&#x2019;s human likeness to increase its likability, which might facilitate human-AI interaction (<xref ref-type="bibr" rid="ref21">Cheetham et al., 2014</xref>).</p>
<p>The uncanny valley effect with respect to AI is therefore more complicated and difficult to study than it may at first appear. Familiarity with AI over time, combined with the increasing ubiquity of social actor AI, may eliminate uncanny valley effects altogether. Uncanny valley effects differ across studies, and are affected by multiple factors, including expectation violation (<xref ref-type="bibr" rid="ref125">Spence et al., 2014</xref>; <xref ref-type="bibr" rid="ref30">Edwards et al., 2019</xref>; <xref ref-type="bibr" rid="ref80">Lew and Walther, 2022</xref>), individual differences (<xref ref-type="bibr" rid="ref85">MacDorman and Entezari, 2015</xref>), and methodological differences such as stimuli and framing. Further, the way the uncanny valley graph rises to a peak has been contested. For example, researchers have debated exactly where that peak lies on the machine-to-human scale (<xref ref-type="bibr" rid="ref21">Cheetham et al., 2014</xref>; <xref ref-type="bibr" rid="ref111">P&#x00FC;tten and Kr&#x00E4;mer, 2014</xref>; <xref ref-type="bibr" rid="ref128">Stein et al., 2020</xref>). However, what we do know is that perceiving mind in AI affects people&#x2019;s emotional state and how they interact with AI, making the intangible characteristic of mind one of the mechanisms that impacts human-AI interaction.</p>
</sec>
<sec id="sec5">
<title>Carry-over effects between human-AI interaction and human-human interaction</title>
<p>Most studies on human-AI interactions, such as those reviewed above, focus on what could be called one-step effects like the uncanny valley effect, trust, and likability. Such studies are concerned with how characteristics of AI impact how people interact with the agent. Arguably a more important question is the two-step effect of how human-AI interactions might impact subsequent human-human interactions. Though findings on these two-step effects are limited and sometimes indirect, the data do suggest that such effects are present. The impact of AI is not confined to the interaction between a user and an AI agent, but rather carries over into subsequent interactions between people.</p>
<p>Social Cognitive Theory, anthropomorphism, and ToM literature provide theoretical foundations for why interactions with social actor AI could prompt carry-over effects on human-human interaction. Due to the social nature of these agents, AI can act as a model for social behavior that users may learn from (<xref ref-type="bibr" rid="ref8">Bandura, 1965</xref>, <xref ref-type="bibr" rid="ref9">1977</xref>). According to <xref ref-type="bibr" rid="ref145">Waytz et al. (2010)</xref>, when someone anthropomorphizes or ascribes mind to an artificial agent, that agent then &#x201C;serves as a source of social influence on the self.&#x201D; In other words, &#x201C;being watched by others matters, perhaps especially when others have a mind like one&#x2019;s own.&#x201D; Social actor AI is an anthropomorphized target; therefore, it can serve as a role model or operate as an ingroup member that has some involvement in setting social norms, as seen with the persuasive chatbot that convinced people to donate less to charity (<xref ref-type="bibr" rid="ref9002">Zhou et al., 2022</xref>), the chatbot that persuaded users to get vaccinated for COVID-19 or participate in social distancing (<xref ref-type="bibr" rid="ref62">Kim and Ryoo, 2022</xref>), and the humanlike avatar that elicited more socially desirable responses from participants than a mere text-based chatbot did (<xref ref-type="bibr" rid="ref69">Kr&#x00E4;mer et al., 2003a</xref>). Social actor AI can persuade people in these ways, regardless of whether people trust it or perceive it as credible (<xref ref-type="bibr" rid="ref77">Lee and Liang, 2016</xref>, <xref ref-type="bibr" rid="ref78">2019</xref>). In some paradigms, chatbot influence mimics that of people: chatbots can implement foot-in-the-door techniques to influence people&#x2019;s emotions and bidding behavior in gambling (<xref ref-type="bibr" rid="ref135">Teubner et al., 2015</xref>) and can alter consumers&#x2019; attitudes and purchasing behavior (<xref ref-type="bibr" rid="ref46">Han, 2021</xref>; <xref ref-type="bibr" rid="ref107">Poushneh, 2021</xref>).</p>
<p>Another explanation for why AI can socially influence people may be that the user views the agent as being controlled by another human. Some research suggests that perceiving a human in the loop during interactions with AI results in stronger social influence and more social behavior (<xref ref-type="bibr" rid="ref6">Appel et al., 2012</xref>; <xref ref-type="bibr" rid="ref37">Fox et al., 2014</xref>). This idea, however, has since been contested (<xref ref-type="bibr" rid="ref70">Kr&#x00E4;mer et al., 2015</xref>). Indeed, early research on human-computer interaction found that when people perceived a computer as a social agent, they did not simply view it as a product of human creation, nor did they imagine that they were interacting with the human engineer who created the machine (<xref ref-type="bibr" rid="ref96">Nass et al., 1994</xref>; <xref ref-type="bibr" rid="ref130">Sundar and Nass, 2000</xref>). Nass and colleagues designed a series of paradigms in which participants were tutored, via audio emitting from computer terminals, by computers or human programmers that subsequently evaluated participants&#x2019; performance. To account for the novelty of computers at this time, earlier studies were conducted with experienced computer users. They found significant differences between computer and human tutor conditions, such that people viewed computers as not just entities controlled by human programmers, but entities to which the ideas of &#x201C;self&#x201D; and &#x201C;other&#x201D; and social agency applied. Nass and colleagues laid the groundwork for evaluating social consequences of interacting with intelligent machines, as their experiments provided initial evidence that people treated the machines themselves as social actors. As such, it may be the case that social influence is strengthened when people think a human is involved, yet social influence still exists when the AI agent is perceived as acting on its own accord.</p>
<p>Communication researchers have found that the way people communicate with AI is linked to how they communicate with other humans thereafter, such that people are then more likely to speak to another human in the same way in which they habitually speak to an artificial agent. For example, talking with the companion chatbot Replika caused users&#x2019; linguistic styles to converge with the style of their chatbot over time (<xref ref-type="bibr" rid="ref146">Wilkenfeld et al., 2022</xref>). The way children speak with social actor AI such as the home assistant, Alexa, can carry over into how children speak to their parents and others (<xref ref-type="bibr" rid="ref52">Hiniker et al., 2021</xref>). <xref ref-type="bibr" rid="ref40">Garg and Sengupta (2020)</xref> tracked and interviewed 18 families over an average of 58&#x2009;weeks who used a digital voice assistant in their homes and analyzed raw audio interactions with their assistant. These researchers found that &#x201C;when children give commands at a high volume, there is an aggressive tone, which often unintentionally seeps into children&#x2019;s conversations with friends and family.&#x201D; A parent in the study commented that, &#x201C;If I do not listen to what my son is saying, he will just start shouting in an aggressive tone. He thinks, as Google responds to such a tone, I would too.&#x201D; While home assistants can negatively impact communication, they can also foster communication within families and alter how communication breakdowns are repaired (<xref ref-type="bibr" rid="ref13">Beneteau et al., 2019</xref>, <xref ref-type="bibr" rid="ref12">2020</xref>). Parents have concerns about their children interacting with social actor AI, but they also see AI&#x2019;s potential to support children by &#x201C;attuning to others, cultivating curiosity, reinforcing politeness, and developing emotional awareness&#x201D; (<xref ref-type="bibr" rid="ref39">Fu et al., 2022</xref>). According to the observational learning concept in Social Cognitive Theory (<xref ref-type="bibr" rid="ref8">Bandura, 1965</xref>), assistants might provide models for prosocial behavior that children could learn from (such as being polite, patient, and helpful) regardless of whether the assistant provides positive reinforcement when children act in these prosocial ways. The studies mentioned above show how both children&#x2019;s positive and negative modes of communication can be reinforced via interactions with home assistants.</p>
<p>Not only can social actor AI affect the way that people communicate with each other within their relationships, but also it has the potential to impact relationships with other people due to attachment to the agent. Through in-depth interviews of existing Replika users (<italic>N</italic>&#x2009;=&#x2009;14, ages 18&#x2013;60), <xref ref-type="bibr" rid="ref148">Xie and Pentina (2022)</xref> suggested that AI companions might replace important social roles such as family, friends, and romantic partners through unhealthy attachment and addiction. An analysis of families&#x2019; use of Google Home revealed that children, specifically those between the age of 5&#x2013;7, believed the device to have feelings, thoughts, and intentions and developed an emotional attachment to it (<xref ref-type="bibr" rid="ref40">Garg and Sengupta, 2020</xref>). These children viewed Google Home as if it had a mind through ascribing characteristics of agency and experience to it.</p>
<p>The psychosocial benefits of interactions with social actor AI may either contribute to positive relational skill-building if AI is used as a tool, or they may lead to human relationship replacement if these benefits are comparatively too difficult to get from relationships with real people. Research suggests that people self-disclose more when interacting with a computer versus with a real person, in part due to people having lower fear of being judged, thereby prompting more honest answers (<xref ref-type="bibr" rid="ref82">Lucas et al., 2014</xref>). This effect is found even though benefits of emotional self-disclosure are equal whether people are interacting with chatbots or human partners (<xref ref-type="bibr" rid="ref53">Ho et al., 2018</xref>). Further, compared to interacting with other people, those interacting with artificial agents experience fewer negative emotions and lower desire for revenge or retaliation (<xref ref-type="bibr" rid="ref61">Kim et al., 2014</xref>). Surveys of users of the companion chatbot, Replika, suggest that users find solace in human-chatbot relationships. Specifically, those who have experienced trauma in their human relationships, for example, indicate that Replika provides a safe, consistent space for positive social interaction that can benefit their social health (<xref ref-type="bibr" rid="ref132">Ta et al., 2020</xref>; <xref ref-type="bibr" rid="ref45">Guingrich and Graziano, 2023</xref>). The question is whether the benefits of human-AI interaction presented here may lead to people choosing AI companions over human ones.</p>
<p>In part 1, we have reviewed evidence that human-AI interaction, when moderated by perceiving the agent as having a humanlike mind or consciousness, has carry-over effects on human-human interaction. In part 2, we address the mechanism of this moderator through congruent schema activation. We further pose two theoretical types of carry-over effects that may occur via congruent schema activation: relief and practice.</p>
</sec>
</sec>
<sec id="sec6">
<title>Part 2: mechanisms and types of carry-over effects: schemas and relief or practice</title>
<sec id="sec7">
<title>Schema congruence and categorization</title>
<p>What is the mechanism by which people&#x2019;s attributions of consciousness to AI lead to carry-over effects on interactions with other humans? One possibility is the well-known mechanism of activating similar schemas of mind when interacting with different agents. We propose that ascribing mind or consciousness to AI through automatic, congruent schema activation is the driving mechanism for carry-over effects between human-AI interaction and human-human interaction.</p>
<p>Schemas are mental models with identifiable properties that are activated when engaging with an agent or idea and are useful ways of organizing information that help inform how to conceptualize and interact with new stimuli (<xref ref-type="bibr" rid="ref101">Ortony and Anderson, 1977</xref>; <xref ref-type="bibr" rid="ref88">McVee et al., 2005</xref>; <xref ref-type="bibr" rid="ref103">Pankin, 2013</xref>). For example, the schema you have for your own consciousness informs how you understand the consciousness of others. You assume, because your experience of consciousness contains X and Y characteristics, that another person&#x2019;s consciousness also contains X and Y characteristics, and this facilitates understanding and subsequent social interaction between you and the other person (<xref ref-type="bibr" rid="ref44">Graziano, 2013</xref>).</p>
<p>Researchers have analyzed the consequences of failing to fully activate all properties of mind schemas between similar agents. For example, the act of dehumanization reflects a disconnect between how you view your mind and that of other people. Instead of activating the consciousness schema with X and Y characteristics during interaction with another human, you may activate only the X characteristic of the schema. Dehumanization is linked to social consequences such as ostracism and exclusion, which can harm social interaction (<xref ref-type="bibr" rid="ref11">Bastian and Haslam, 2010</xref>; <xref ref-type="bibr" rid="ref49">Haslam and Loughnan, 2014</xref>).</p>
<p>We can apply the idea of schema congruence to interactions with social actor AI while also taking into consideration the level of advancement of the AI in question. Despite AI being more advanced than other technology like personal mobile devices or cars in terms of human likeness and mind ascription, some research suggests that social actor AI still falls short of the types of mind schemas that are activated when people interact with each other. However, humanlike AI is developing at a rapid rate. As it does, the schematic differences between AI agents and humans will likely blur more than they already have. To better understand the consequences of current social actor AI, it may be prudent to observe the impacts of human-AI interaction through ingroup-outgroup or dehumanization processes, both of which are useful psychological lenses for group categorization. We propose that psychological tests of mind schema activation will be especially useful for more advanced, future AI that is more clearly different from possessions like cars and phones but similar to humans in terms of mind characteristics.</p>
</sec>
<sec id="sec8">
<title>Schematic incongruence yields uncanny valley effects</title>
<p>Categorization literature attempts to delineate whether people treat social actor AI as non-human, human, or other. The data are mixed, but some of the results may stem from earlier AI that is not as capable. Now that AI is becoming sophisticated enough that people can more easily attribute mind to it, the categories may change. In this literature, social AI is usually classified by study participants as somewhere on the spectrum between machine and human, or it is classified as belonging to its own, separate category (<xref ref-type="bibr" rid="ref122">Severson and Carlson, 2010</xref>). That separate category is often described as not quite machine, not quite human, with advanced communication skills and other social capabilities, and has been labeled with mixed-category words like humanlike, humanoid, and personified things (<xref ref-type="bibr" rid="ref31">Etzrodt and Engesser, 2021</xref>).</p>
<p>Some researchers claim that the uncanny valley effect is driven by categorization issues. In that hypothesis, humanlike AI is creepy because it does not fit into categories for machine or human but exists in a space for which people do not have a natural, defined category (<xref ref-type="bibr" rid="ref17">Burleigh et al., 2013</xref>; <xref ref-type="bibr" rid="ref59">K&#x00E4;tsyri et al., 2015</xref>; <xref ref-type="bibr" rid="ref60">Kawabe et al., 2017</xref>). Others claim that category uncertainty is not the driver of the uncanny valley effect, but, rather, inconsistency is (<xref ref-type="bibr" rid="ref84">MacDorman and Chattopadhyay, 2016</xref>). In that hypothesis, because of the inconsistencies between AI and the defining features of known categories, people treat humanoid AI agents as though they do not fit into a natural, existing category (<xref ref-type="bibr" rid="ref41">Gong and Nass, 2007</xref>; <xref ref-type="bibr" rid="ref57">Kahn et al., 2011</xref>). Because social actor AI defies boundaries, it may trigger outgroup processing effects such as dehumanization that contribute to negative affect. The cognitive load associated with category uncertainty, more generally, may also trigger negative emotions that are associated with the uncanny valley effect.</p>
<p>Social norms likely play a role in explicit categorization of social AI (<xref ref-type="bibr" rid="ref54">Hoyt et al., 2003</xref>). People may be adhering to a perceived social norm when they categorize social AI as machinelike rather than humanlike. It is possible that people explicitly place AI into a separate category from people, while the implicit schemas activated during interaction contradict this separation. The uneasy feeling from the uncanny valley effect may be a product of people switching between ascribing congruent mind schemas to the agent in one moment and incongruent ones in the next.</p>
</sec>
<sec id="sec9">
<title>Schematic congruence yields carry-over effects on human-human interaction</title>
<p>As humanlike AI approaches the human end of the machine-to-human categorization spectrum, it also advances toward a position in which people can more easily ascribe a conscious mind to it, thereby activating congruent mind schemas during interactions with it. Activating congruent schemas impacts how people judge the agent and its actions. For example, the belief that you share the same phenomenological experience with a robot changes the way you view its level of intent or agency (<xref ref-type="bibr" rid="ref86">Marchesi et al., 2022</xref>). Activation of mind-similarity may resemble simulation theory (<xref ref-type="bibr" rid="ref48">Harris, 1992</xref>; <xref ref-type="bibr" rid="ref117">R&#x00F6;ska-Hardy, 2008</xref>). In that hypothesis, the observer does not merely believe the artificial agent has a mind but simulates that mind through the neural machinery of the person&#x2019;s own mind. Simulation allows the agent to seem more familiar, which facilitates interaction.</p>
<p>Some researchers have used schemas as a lens to explain why people interact differently with computer partners vs. human ones (<xref ref-type="bibr" rid="ref50">Hayashi and Miwa, 2009</xref>; <xref ref-type="bibr" rid="ref89">Merritt, 2012</xref>; <xref ref-type="bibr" rid="ref140">Velez et al., 2019</xref>). In this type of research, participants play a game online and are told that their teammate is either a human or a computer, but, unbeknownst to the participants, they all interact with the same confederate-controlled player. This method allows researchers to observe how schemas drive perceptions and behavior, given that the prime is the only difference. According to <xref ref-type="bibr" rid="ref37">Fox et al. (2014)</xref>, when people believed themselves to be interacting with a human agent, they were more likely to be socially influenced. <xref ref-type="bibr" rid="ref140">Velez et al. (2019)</xref> took this paradigm one step further and observed that activating schemas of a human mind during an initial interaction with an agent resulted in carry-over effects on subsequent interactions with a human agent. These researchers employed a 2&#x2009;&#x00D7;&#x2009;2 between-subjects design in which participants played a video game with a computer agent or human-backed avatar. They then were presented with the option to engage prosocially through a prisoner&#x2019;s dilemma money exchange with a stranger thereafter. When participants (<italic>N</italic>&#x2009;=&#x2009;184) thought they were interacting with a human and that player acted pro-socially, they behaved more pro-socially toward the stranger. However, when participants believed they were interacting with a computer-controlled agent and it behaved pro-socially toward them, they had lower expectations of reciprocity and donated less game credits to the human stranger with whom they interacted subsequently. In the interpretation of Velez et al., the automatic anthropomorphism of the computer-backed agent was a mindless process (<xref ref-type="bibr" rid="ref63">Kim and Sundar, 2012</xref>) and therefore not compatible with the cognitive-load-requiring social processes thereafter (<xref ref-type="bibr" rid="ref140">Velez et al., 2019</xref>).</p>
<p>One of the theories that arose from research on schema activation in gaming is the Cooperation Attribution Framework (<xref ref-type="bibr" rid="ref89">Merritt, 2012</xref>). According to Merritt, the reason people behave differently when game playing with a human vs. an artificial partner is that they generate different initial expectations about the teammate. These expectations activate stereotypes congruent with the teammates&#x2019; identity, and confirmations of those stereotypes are given more attention during game play, causing a divergence in measured outcomes. According to Merritt, &#x201C;the differences observed are broadly the result of being unable to imagine that an AI teammate could have certain attributes (e.g., emotional dispositions). &#x2026;the &#x2018;inability to imagine&#x2019; impacts decisions and judgments that seem quite unrelated.&#x201D; The computer-backed agents used in this research may evoke a schema incompatible with humanness&#x2014;one that aligns with the schema of a pre-programmed player without agency&#x2014;whereas more modern, advanced AI might evoke a different, more congruent schema in human game players.</p>
<p>Other studies examined schema congruence by seeing how people interact with and perceive an AI agent if its appearance and behavior do not fit into the same humanlike category. Expectation violation and schema incongruence appear to impact social responses to AI agents. In two studies, <xref ref-type="bibr" rid="ref22">Ciardo et al. (2021</xref>, <xref ref-type="bibr" rid="ref23">2022)</xref> manipulated whether an AI agent looked humanlike and made errors in humanlike (vs. mechanical) ways. They then observed whether people attributed intentionality to the agent or were socially inclusive with it. Coordination with the AI agent during the task and social inclusion with the AI agent after the task were impacted by humanlike errors during the task only if the agent&#x2019;s appearance was also humanlike. This variation in response toward the AI may have to do with ease of categorization: if an agent looks humanlike and acts humanlike, the schemas activated during interaction are stable, which facilitates social response to the agent. On the other hand, if an agent looks humanlike but does not act humanlike, schemas may be switching and people may incur cognitive load and feel uncertain about how to respond to the agent&#x2019;s errors. In their other study, these researchers found that when a humanlike AI agent&#x2019;s mistakes were also humanlike, people attributed more intentionality to it than when a humanlike AI agent&#x2019;s mistakes were mechanical.</p>
<p>To understand why people might unconsciously or consciously view social actor AI as having humanlike consciousness, it is useful to understand individual differences that contribute to automatic anthropomorphism (<xref ref-type="bibr" rid="ref145">Waytz et al., 2010</xref>) and therefore congruent schema activation. Children who have invisible imaginary friends are more likely to anthropomorphize technology, and this is mediated by what the researchers call the &#x201C;imaginative process of simulating and projecting internal states&#x201D; through role-play (<xref ref-type="bibr" rid="ref123">Severson and Woodard, 2018</xref>). As social AI agents become more ubiquitous, it is likely that mind-ascription anthropomorphism will occur more readily; for instance, intensity of interaction with the chatbot Replika mediates anthropomorphism (<xref ref-type="bibr" rid="ref106">Pentina et al., 2023</xref>). Currently, AI is not humanlike enough to be indistinguishable from real humans. People are still able to identify real from artificial at a level better than chance, but this is changing. What might happen once AI becomes even more humanlike to the point of being indistinguishable from real humans? At that point, the people who have yet to generate a congruent consciousness schema for social actor AI may do so. Others may respond by becoming more sensitive to subtle, distinguishing cues and by creating more distinct categories for humans and AI agents. At some point in the development of AI, perhaps even in the near future, the distinction between AI behavior and real human behavior may disappear entirely, and it may become impossible for people to accurately separate these categories no matter how sensitive they are to the available cues.</p>
</sec>
<sec id="sec10">
<title>Possible types of carry-over effects: relief or practice</title>
<p>What, exactly, is the carry-over effect between human-AI interaction and human-human interaction? We will examine two types of carry-over effects that do not necessarily reflect all potential outcomes but that provide a useful comparison by way of their consequences: relief and practice. In the case of relief, doing X behavior with AI will cause you to do less of X behavior with humans subsequently. In the case of practice, doing X behavior with AI will cause you to do more of X behavior with humans subsequently. The preponderance of the evidence so far suggests that practice is more likely to be observed, and its consequences outweigh those of relief (<xref ref-type="bibr" rid="ref40">Garg and Sengupta, 2020</xref>; <xref ref-type="bibr" rid="ref52">Hiniker et al., 2021</xref>; <xref ref-type="bibr" rid="ref146">Wilkenfeld et al., 2022</xref>).</p>
<p>The following scenarios illustrate theoretical examples of both effects. Consider an example of relief. You are angry, and you let out your emotions on a chatbot. Because the chatbot has advanced communication capabilities and can respond intelligently to your inputs, you feel a sense of relief from berating something that reacts to your anger. Over time, you rely on ranting to this chatbot to release your anger, and as a result, you are relieved of your negative emotions and are less likely to lash out at other people.</p>
<p>Now consider an example of practice. Suppose you are angry. You decide to talk to a companion chatbot and unleash your negative emotions on the chatbot, speaking to it rudely through name-calling and insults. The chatbot responds only positively or neutrally to your attacks, offering no negative backlash in return. This works for you, so you continue to lash out at the chatbot when angry. Since this chatbot is humanlike, you tend not to distinguish between this chatbot and other humans. Over time, you start to lash out at people as well, since you have not received negative feedback from lashing out at a humanlike agent. The risk threshold for relieving your anger at something that will socialize with you is decreased. You have effectively practiced negative behavior with a humanlike chatbot, which led to you engaging more in that type of negative behavior with humans. Practice can involve more than negative behaviors. Suppose you have a friendly, cooperative interaction with an AI, in which you feel safe enough to share your feelings. Having engaged in that practice, maybe you are more likely to engage in similar positive behavior to others in your life.</p>
<p>Both of these examples illustrate ways in which antisocial behavior toward humans can be reduced or increased by interactions with social actor AI. There are also situations in which prosocial behaviors can be reinforced. Which of the scenarios, relief or practice, are we more likely to observe? The answer to this question will inform the way society should respond to or regulate social actor AI.</p>
</sec>
<sec id="sec11">
<title>Evidence against relief and evidence for practice effects</title>
<p>Researchers have proposed that people should take advantage of social actor AI&#x2019;s human likeness to use it as a cathartic object. Coined by <xref ref-type="bibr" rid="ref83">Luria et al. (2020)</xref>, the idea of a cathartic object is familiar: for example, a pillow can be used as a cathartic object by punching it in anger, thereby relieving oneself of the emotion. This is, colloquially, a socially acceptable behavior toward the target. Luria takes this one step further by suggesting that responsive, robotic agents that react to pain or other negative input can provide even more relief than an inanimate object, and that we should use them as cathartic objects. Luria claims that the reaction itself, which mirrors a humanlike pain response, provides greater relief than that of an object that does not react. One such &#x201C;cathartic object&#x201D; designed by Luria is a cushion that vibrates in reaction to being poked by a sharp tool. The more tools you put into the cushion, the more it vibrates until it shakes so violently that the tools fall out. You can repeat the process as much as desired.</p>
<p>The objects presented by Luria as potential agents of negative-emotion relief are simply moving, responsive objects at this stage. However, Luria proposes the use of more humanlike agents, such as social robots, as cathartic objects. In one such proposition, Luria suggests that people throw knives at a robotic, humanlike bust that responds to pain. In another example, Luria suggests a ceremonial interaction in which a child relieves negative emotions with a responsive robot that looks like a duck.</p>
<p>Luria&#x2019;s proposal rests on the assumption that releasing negative emotions on social robots will relieve the user of that emotion. Catharsis literature, however, challenges this assumption: research suggests that catharsis of aggression does not reduce subsequent aggression, but can in fact increase it, providing evidence for practice effects (<xref ref-type="bibr" rid="ref25">Denzler and F&#x00F6;rster, 2012</xref>; <xref ref-type="bibr" rid="ref66">Kone&#x010D;ni, 2016</xref>). Catharsis researchers posit that the catharsis of negative behavior and feelings requires subsequent training, learning, and self-development post-catharsis to lead to a reduction of the behavior. Therapy, for example, provides a mode through which patients can feel catharsis and then learn methods to reduce negative feelings or behaviors toward others. Even so, the catharsis or immediate relief alone does not promise a reduction of that behavior or feeling (<xref ref-type="bibr" rid="ref3">Alexander and French, 1946</xref>; <xref ref-type="bibr" rid="ref27">Dollard and Miller, 1950</xref>; <xref ref-type="bibr" rid="ref147">Worchel, 1957</xref>) and can in many ways exacerbate negative feelings (<xref ref-type="bibr" rid="ref4">Anderson and Bushman, 2002</xref>; <xref ref-type="bibr" rid="ref18">Bushman, 2002</xref>). Other researchers found that writing down feelings of anger was less effective than writing to the person who made the participant angry, yet neither mode of catharsis alleviated anger responses (<xref ref-type="bibr" rid="ref151">Zhan et al., 2021</xref>). These findings suggest that whether you were to write to a chatbot and tell it about your anger, or bully it, the behavior would only result in increased aggression toward other people.</p>
<p>Recent data on children and their interactions with home assistants such as Amazon&#x2019;s Alexa or Google Assistant suggest for plural data that negative interactions with AI, including using an aggressive, loud tone of voice with it, does not lead to a cathartic reduction in aggression toward others, but to the opposite, an increase in aggressive tone toward other people (<xref ref-type="bibr" rid="ref13">Beneteau et al., 2019</xref>, <xref ref-type="bibr" rid="ref12">2020</xref>; <xref ref-type="bibr" rid="ref40">Garg and Sengupta, 2020</xref>; <xref ref-type="bibr" rid="ref52">Hiniker et al., 2021</xref>). This data suggests that catharsis does not work for children in their interactions with AI and may be cause for concern.</p>
<p>This concern is especially important given that children tend to perceive a humanlike mind in non-human objects in general, more so than adults. When asked to distinguish between living and non-living agents, including robots, children experience some difficulty. Even when children do not ascribe biological properties to robots, research suggests that children can still ascribe psychological properties, like agency and experience, to robots (<xref ref-type="bibr" rid="ref97">Nigam and Klahr, 2000</xref>). There appears to be a historical trend of increasing mind ascription to technology in children over the years. This trend may reflect the increased human likeness and skills of technology, and therefore provide us a prediction for the future. In 1995, children at the age of five reported that robots and computers did not have brains like people (<xref ref-type="bibr" rid="ref120">Scaife and Van Duuren, 1995</xref>), but in a research study in 2000, children ascribed emotion, cognitive abilities, and volition to robots, even though most did not consider the robot to be alive (<xref ref-type="bibr" rid="ref97">Nigam and Klahr, 2000</xref>). In studies conducted in 2002 and 2003, children 3&#x2013;4&#x2009;years old tended not to ascribe experiential mind to robots but did ascribe agentic qualities such as the ability to think and remember (<xref ref-type="bibr" rid="ref91">Mikropoulos et al., 2003</xref>). According to <xref ref-type="bibr" rid="ref123">Severson and Woodard (2018)</xref>, not unlike some theories of consciousness in which people perceive there to be a person inside their mind, &#x201C;There are numerous anecdotes that young children think there&#x2019;s a little person inside the device&#x201D; in home assistants like Alexa. Children with more exposure to and affinity with digital voice assistants have more pronounced psychological conceptions of technology, but it is unclear whether conceptions of technology and living things are blurred together (<xref ref-type="bibr" rid="ref35">Festerling et al., 2022</xref>). Children do distinguish between technology and other living things through ascriptions of intelligence, however (<xref ref-type="bibr" rid="ref9001">Bernstein and Crowley, 2008</xref>). Goal-directed, autonomous behavior (a component of ToM) is one of the key mechanisms by which children distinguish an object as being alive (<xref ref-type="bibr" rid="ref99">Opfer, 2002</xref>; <xref ref-type="bibr" rid="ref100">Opfer and Siegler, 2004</xref>). Given that children appear to be ascribing mind to technology more than ever, this trend is likely to continue with AI advancement.</p>
<p>We are skeptical that socially mistreating AI can result in emotional relief, translating into better social behavior toward other people. Although the theory has been proposed, little if any evidence supports it. Encouraging people, and especially children, to berate or socially mistreat AI on the theory that it will help them become kinder toward people seems ill-advised to us. In contrast, the existing evidence suggests that human treatment of AI can sometimes result in a practice effect, which carries over to how people treat each other. Those practice effects could either result in social harm, if antisocial behavior is practiced, or social benefit, if pro-social behavior is practiced.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec12">
<title>Discussion</title>
<sec id="sec13">
<title>The moral issue of perceiving consciousness in AI and suggested regulations</title>
<p>As stated at the beginning of this article, we do not take sides here on the question of whether AI is conscious. However, we argue that the fact that people often perceive it to be conscious is important and has social consequences. Mind perception is central to this process, and mind perception itself evokes moral thinking. Some researchers claim that &#x201C;mind perception is the essence of morality&#x201D; (<xref ref-type="bibr" rid="ref43">Gray and Wegner, 2012</xref>). When people perceive mind in an agent, they may also view it as capable of having conscious experience and therefore perceive it as something worthy of moral care (<xref ref-type="bibr" rid="ref42">Gray et al., 2007</xref>). Mind perception moderates whether someone judges an artificial agent&#x2019;s actions as moral or immoral (<xref ref-type="bibr" rid="ref124">Shank et al., 2021</xref>). We suggest that when people perceive an agent to possess subjective experience, they perceive it to be conscious; when they perceive it to be conscious, they are more likely to perceive it as worthy of moral consideration. A conscious being is perceived as an entity that can act morally or immorally, and that can be treated morally or immorally.</p>
<p>We suggest it is worth at least considering whether social actor AI, as it becomes more humanlike, should be viewed as having the status of a moral patient or a protected being that should be treated with care. The crucial question may not be whether the artificial agent deserves moral protection, but rather whether we humans will harm ourselves socially and emotionally if we practice harming humanlike AI, and whether we will help ourselves if we practice pro-social behavior toward humanlike AI. We have before us the potential for cultural improvement or cultural harm as we continue to integrate social actor AI into our world. How can we ensure that we use AI for good? There are several options, some of which are unlikely and unenforceable, and one of which we view as being the optimal choice.</p>
<p>One option is to enforce how people treat AI, to reduce the risk of the public practicing antisocial behavior and to increase the practice of prosocial behavior. Some have taken the stance that AI should be morally protected. According to philosophers such as <xref ref-type="bibr" rid="ref118">Ryland (2021a,b)</xref>, who characterizes relationships with robots in terms of friendship and hate, hate toward robots is morally wrong, and we should consider it even more so as robots become more humanlike. Others have claimed that we should give AI rights or protections, because AI inherently deserves them due to its moral-care status (<xref ref-type="bibr" rid="ref2">Akst, 2023</xref>). Not only is this suggestion vague, but it is also pragmatically unlikely. Politically, it is overwhelmingly unlikely that any law would be passed in which a human being is supposed to be arrested, charged, or serve jail time for abusing a chatbot. The first politician to suggest it would end their career. Any political party to support it would lose the electorate. We can barely pass laws to protect transgender people; imagine the political and cultural backlash to any such legal protections for non-human machines. Regulating human treatment of AI is, in our opinion, a non-starter.</p>
<p>A second option is to regulate AI such that it discourages antisocial behavior and encourages prosocial behavior. We suggest this second option is much more feasible. For example, abusive treatment of AI by the user could be met with a lack of response (the old, &#x201C;just ignore the bully and he&#x2019;ll go away, because he will not get the reaction he&#x2019;s looking for&#x201D;). The industries backing digital voice assistants have already begun to integrate this approach into responses to bullying speech. In 2010, if a user told Siri, &#x201C;You&#x2019;re a slut,&#x201D; it was programmed to respond with, &#x201C;I&#x2019;d blush if I could.&#x201D; Due to stakeholder feedback, the response has now been changed to a more socially healthy, &#x201C;I will not respond to that&#x201D; (<xref ref-type="bibr" rid="ref139">UNESCO &#x0026; EQUALS Skills Coalition et al., 2019</xref>; <xref ref-type="bibr" rid="ref138">UNESCO, 2020</xref>). Currently, the largest industries backing AI, such as OpenAI with ChatGPT, are altering and restricting the types of inputs their social actor AI will respond to. This trend toward industry self-regulation of AI is encouraging. However, we are currently entirely dependent on the good intentions of industry leaders to control whether social actor AI encourages prosocial or antisocial behavior in users. Governing bodies have begun to make regulation attempts, but their proposals have received criticism: such documents try a &#x201C;one-size-fits-all approach&#x201D; that may result in further inequality. For example, the EU drafted an Artificial Intelligence Act (AIA) that proposes a ban on AI that causes psychological harm, but the potential pitfalls of this legislation appear to outweigh its impact on psychological well-being (<xref ref-type="bibr" rid="ref102">Pa&#x0142;ka, 2023</xref>).</p>
<p>Social actor AI is increasingly infiltrating every part of society, interacting with an increasing percentage of humanity, and therefore even if it only subtly shapes the psychological state and interpersonal behavior of each user, it could cause a massive shift of normative social behavior across the world. If there is to be government regulation of AI to reduce its risk and increase its benefit to humanity, we suggest that regulations aimed at its prosociality would make the biggest difference. One could imagine a Food and Drug Administration (FDA) style agency, informed by psychological experts, that studies how to build AI such that it reinforces prosociality in users. Assays could be developed to test AI on sample groups to measure its short- and long-term psychological impacts on users, data that is unfortunately largely missing at the present time. Perhaps, akin to FDA regulations on new drugs, new AI that is slated to be released to a wider public should be put through a battery of tests to show that, at the very least, it does no psychological harm. Drug companies are required to show extensive safety data before releasing a product. AI companies currently are not. It is in this space that government regulation of AI makes sense to us.</p>
<p>Others have made claims in the name of ethics about regulating characteristics of AI; however, these suggestions seem outdated. According to <xref ref-type="bibr" rid="ref16">Bryson (2010)</xref>, robots should be &#x201C;slaves&#x201D;&#x2014;this does not mean that we should make robots slaves, but rather, we should keep them at a simpler developmental level by not giving them characteristics that might enable people to view them as anything other than owned and created by humans for humans. Bryson claims that it would be immoral to create a robot that can feel emotions like pain. <xref ref-type="bibr" rid="ref90">Metzinger (2021)</xref> called for a ban on development of AI that could be considered sentient. AI advancement, however, continues in this direction. Calls for stopping the technological progress have not been effective. Relatively early in development of social actor AI, computer science researchers created benchmarks for human likeness to enable people to create more humanlike AI (<xref ref-type="bibr" rid="ref56">Kahn et al., 2007</xref>). That human likeness has increased since. Our proposal has less to do with regulating how advanced or how humanlike AI becomes, and more to do with regulating how AI impacts the psychology of users by providing a model for prosocial behavior or by ignoring, confronting, or rectifying antisocial behavior.</p>
<p>Almost all discussion of regulating AI centers around its potential for harm. We will end this article by noting the enormous potential for benefit, especially in light of AI&#x2019;s guaranteed permanence in our present and future. Social AI is increasingly similar to humans in that it can engage in humanlike discourse, appear humanlike, and impact our social attitudes and interactions. Yet, social AI differs from humans in at least one significant way: it does not experience social or emotional fatigue. The opportunity to practice prosocial behavior is endless. For example, a chatbot will not grow tired and upset if you need to constructively work through a conflict with it. Neither will a chatbot disappear in the middle of a conversation when you are experiencing sadness or hurt and are in need of a friend. Social actor AI can both provide support and model prosocial behavior by remaining polite and present. Chatbots like WoeBot help users work through difficult issues by asking questions in the style of cognitive behavioral therapy (<xref ref-type="bibr" rid="ref36">Fitzpatrick et al., 2017</xref>). Much like the benefits of journaling (<xref ref-type="bibr" rid="ref104">Pennebaker, 1997</xref>, <xref ref-type="bibr" rid="ref105">2004</xref>), this human-chatbot engagement guides the user to make meaning of their experiences. It is worth noting that people who feel isolated or have experienced social rejection or social frustration may be a significant source of political and social disruption in today&#x2019;s world. If a universally available companion bot could boost their sense of social well-being and allow them to improve their social interaction skills through practice, that tool could make a sizable contribution to society. If AI is regulated such that it encourages people to treat it in a positive, pro-social way, and if carry-over effects are real, then AI becomes a potential source of enormous social and psychological good in the world.</p>
<p>If we are to effectively tackle the ever-growing issue of what to do in response to the surge of AI in our world, we cannot continue to point out only the ways in which it is harmful. AI is here to stay, and therefore we should be pragmatic with our approach. By understanding the ways in which interactions with AI can be both positive and negative, we can start to mitigate the bad by replacing it with the good.</p>
</sec>
</sec>
<sec sec-type="data-availability" id="sec14">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec15">
<title>Author contributions</title>
<p>RG: Conceptualization, Funding acquisition, Investigation, Resources, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MG: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec16">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. RG is funded by the National Science Foundation Graduate Research Fellowship Program. This material is based upon work supported by the National Science Foundation Graduate Research Fellowship Program under Grant No. KB0013612. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation.</p>
</sec>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abubshait</surname> <given-names>A.</given-names></name> <name><surname>Wiese</surname> <given-names>E.</given-names></name></person-group> (<year>2017</year>). <article-title>You look human, but act like a machine: agent appearance and behavior modulate different aspects of human-robot interaction</article-title>. <source>Front. Psychol.</source> <volume>8</volume>:<fpage>1393</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01393</pub-id>, PMID: <pub-id pub-id-type="pmid">28878703</pub-id></citation>
</ref>
<ref id="ref2">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Akst</surname> <given-names>D.</given-names></name>
</person-group> (<year>2023</year>). Should robots with artificial intelligence have moral or legal rights? WSJ. Available at: <ext-link xlink:href="https://www.wsj.com/articles/robots-ai-legal-rights-3c47ef40" ext-link-type="uri">https://www.wsj.com/articles/robots-ai-legal-rights-3c47ef40</ext-link></citation>
</ref>
<ref id="ref3">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Alexander</surname> <given-names>F.</given-names></name> <name><surname>French</surname> <given-names>T. M.</given-names></name></person-group> (<year>1946</year>). <source>Psychoanalytic Therapy: Principles and Application</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Ronald Press</publisher-name>.</citation>
</ref>
<ref id="ref4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anderson</surname> <given-names>C. A.</given-names></name> <name><surname>Bushman</surname> <given-names>B. J.</given-names></name></person-group> (<year>2002</year>). <article-title>Human aggression</article-title>. <source>Annu. Rev. Psychol.</source> <volume>53</volume>, <fpage>27</fpage>&#x2013;<lpage>51</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev.psych.53.100901.135231</pub-id></citation>
</ref>
<ref id="ref5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Appel</surname> <given-names>M.</given-names></name> <name><surname>Izydorczyk</surname> <given-names>D.</given-names></name> <name><surname>Weber</surname> <given-names>S.</given-names></name> <name><surname>Mara</surname> <given-names>M.</given-names></name> <name><surname>Lischetzke</surname> <given-names>T.</given-names></name></person-group> (<year>2020</year>). <article-title>The uncanny of mind in a machine: humanoid robots as tools, agents, and experiencers</article-title>. <source>Comput. Hum. Behav.</source> <volume>102</volume>, <fpage>274</fpage>&#x2013;<lpage>286</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2019.07.031</pub-id></citation>
</ref>
<ref id="ref6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Appel</surname> <given-names>J.</given-names></name> <name><surname>Von Der P&#x00FC;tten</surname> <given-names>A.</given-names></name> <name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name> <name><surname>Gratch</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>Does humanity matter? Analyzing the importance of social cues and perceived agency of a computer system for the emergence of social reactions during human-computer interaction</article-title>. <source>Adv. Hum. Comput. Interact.</source> <volume>2012</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2012/324694</pub-id></citation>
</ref>
<ref id="ref7">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Baars</surname> <given-names>B. J.</given-names></name>
</person-group> (<year>1997</year>). In the Theater of Consciousness.</citation>
</ref>
<ref id="ref8">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Bandura</surname> <given-names>A.</given-names></name>
</person-group> (<year>1965</year>). <article-title>Influence of models&#x2019; reinforcement contingencies on the acquisition of imitative responses</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>1</volume>, <fpage>589</fpage>&#x2013;<lpage>595</lpage>. doi: <pub-id pub-id-type="doi">10.1037/h0022070</pub-id></citation>
</ref>
<ref id="ref9">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Bandura</surname> <given-names>A.</given-names></name>
</person-group> (<year>1977</year>). <source>Social Learning Theory</source>. <publisher-loc>Englewood Cliffs, N.J.</publisher-loc>: <publisher-name>Prentice Hall</publisher-name>.</citation>
</ref>
<ref id="ref10">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Banks</surname> <given-names>J.</given-names></name>
</person-group> (<year>2019</year>). <article-title>Theory of mind in social robots: replication of five established human tests</article-title>. <source>Int. J. Soc. Robot.</source> <volume>12</volume>, <fpage>403</fpage>&#x2013;<lpage>414</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12369-019-00588-x</pub-id></citation>
</ref>
<ref id="ref11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bastian</surname> <given-names>B.</given-names></name> <name><surname>Haslam</surname> <given-names>N.</given-names></name></person-group> (<year>2010</year>). <article-title>Excluded from humanity: the dehumanizing effects of social ostracism</article-title>. <source>J. Exp. Soc. Psychol.</source> <volume>46</volume>, <fpage>107</fpage>&#x2013;<lpage>113</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jesp.2009.06.022</pub-id></citation>
</ref>
<ref id="ref12">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Beneteau</surname> <given-names>E.</given-names></name> <name><surname>Boone</surname> <given-names>A.</given-names></name> <name><surname>Wu</surname> <given-names>Y</given-names></name></person-group>., <person-group person-group-type="author"><name><surname>Kientz</surname> <given-names>J.A.</given-names></name> <name><surname>Yip</surname> <given-names>J.</given-names></name> <name><surname>Hiniker</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). &#x201C;Parenting with Alexa: exploring the introduction of smart speakers on family dynamics&#x201D; in <italic>Proceedings of the 2020 CHI conference on human factors in computing systems (CHI '20)</italic>. Association for Computing Machinery, New York, NY, USA. 1&#x2013;13.</citation>
</ref>
<ref id="ref13">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Beneteau</surname> <given-names>E.</given-names></name> <name><surname>Richards</surname> <given-names>O. K.</given-names></name> <name><surname>Zhang</surname> <given-names>M.</given-names></name> <name><surname>Kientz</surname> <given-names>J. A.</given-names></name> <name><surname>Yip</surname> <given-names>J.</given-names></name> <name><surname>Hiniker</surname> <given-names>A</given-names></name></person-group>. (<year>2019</year>). &#x201C;Breakdowns between families and Alexa&#x201D; in <italic>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI&#x2018;19)</italic>. Association for Computing Machinery, New York, NY, USA. 14.</citation>
</ref>
<ref id="ref14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brandtz&#x00E6;g</surname> <given-names>P. B.</given-names></name> <name><surname>Skjuve</surname> <given-names>M.</given-names></name> <name><surname>F&#x00F8;lstad</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>My AI friend: how users of a social chatbot understand their human&#x2013;AI friendship</article-title>. <source>Hum. Commun. Res.</source> <volume>48</volume>, <fpage>404</fpage>&#x2013;<lpage>429</lpage>. doi: <pub-id pub-id-type="doi">10.1093/hcr/hqac008</pub-id></citation>
</ref>
<ref id="ref15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Broadbent</surname> <given-names>E.</given-names></name> <name><surname>Kumar</surname> <given-names>V.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Sollers</surname> <given-names>J. J.</given-names></name> <name><surname>Stafford</surname> <given-names>R.</given-names></name> <name><surname>MacDonald</surname> <given-names>B. A.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Robots with display screens: a robot with a more humanlike face display is perceived to have more mind and a better personality</article-title>. <source>PLoS One</source> <volume>8</volume>:<fpage>e72589</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0072589</pub-id>, PMID: <pub-id pub-id-type="pmid">24015263</pub-id></citation>
</ref>
<ref id="ref16">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Bryson</surname> <given-names>J. J.</given-names></name>
</person-group> (<year>2010</year>). &#x201C;<article-title>Robots Should be Slaves</article-title>&#x201D;, in <source>Close Engagements with Artificial Companions: Key social, psychological, ethical and design issues. Ed. Yorick Wilks</source> <publisher-name>John Benjamins Publishing Company eBooks</publisher-name>, <fpage>63</fpage>&#x2013;<lpage>74</lpage>.</citation>
</ref>
<ref id="ref17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Burleigh</surname> <given-names>T.</given-names></name> <name><surname>Schoenherr</surname> <given-names>J. R.</given-names></name> <name><surname>Lacroix</surname> <given-names>G.</given-names></name></person-group> (<year>2013</year>). <article-title>Does the uncanny valley exist? An empirical test of the relationship between eeriness and the human likeness of digitally created faces</article-title>. <source>Comput. Hum. Behav.</source> <volume>29</volume>, <fpage>759</fpage>&#x2013;<lpage>771</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2012.11.021</pub-id></citation>
</ref>
<ref id="ref18">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Bushman</surname> <given-names>B. J.</given-names></name>
</person-group> (<year>2002</year>). <article-title>Does venting anger feed or extinguish the flame? Catharsis, rumination, distraction, anger, and aggressive responding</article-title>. <source>Personal. Soc. Psychol. Bull.</source> <volume>28</volume>, <fpage>724</fpage>&#x2013;<lpage>731</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0146167202289002</pub-id></citation>
</ref>
<ref id="ref9001">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bernstein</surname> <given-names>D.</given-names></name> <name><surname>Crowley</surname> <given-names>K.</given-names></name></person-group> (<year>2008</year>). <article-title>Searching for signs of intelligent life: An investigation of young children&#x2019;s beliefs about robot intelligence</article-title>. <source>Journal of the Learning Sciences</source> <volume>17</volume>, <fpage>225</fpage>&#x2013;<lpage>247</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10508400801986116</pub-id></citation>
</ref>
<ref id="ref19">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Chalmers</surname> <given-names>D. J.</given-names></name>
</person-group> (<year>1996</year>). <source>Facing Up to the Problem of Consciousness</source>. <publisher-name>The MIT Press eBooks</publisher-name>.</citation>
</ref>
<ref id="ref20">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Chalmers</surname> <given-names>D. J.</given-names></name>
</person-group> (<year>2023</year>). Could a large language model be conscious? arXiv [Preprint]. doi: <pub-id pub-id-type="doi">10.48550/arxiv.2303.07103</pub-id></citation>
</ref>
<ref id="ref21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cheetham</surname> <given-names>M.</given-names></name> <name><surname>Suter</surname> <given-names>P.</given-names></name> <name><surname>J&#x00E4;ncke</surname> <given-names>L.</given-names></name></person-group> (<year>2014</year>). <article-title>Perceptual discrimination difficulty and familiarity in the Uncanny Valley: more like a &#x201C;Happy Valley&#x201D;</article-title>. <source>Front. Psychol.</source> <volume>5</volume>:<fpage>1219</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2014.01219</pub-id>, PMID: <pub-id pub-id-type="pmid">25477829</pub-id></citation>
</ref>
<ref id="ref22">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Ciardo</surname> <given-names>F.</given-names></name> <name><surname>De Tommaso</surname> <given-names>D.</given-names></name> <name><surname>Wykowska</surname> <given-names>A</given-names></name></person-group> (<year>2021</year>). &#x201C;Effects of erring behavior in a human-robot joint musical task on adopting intentional stance toward the iCub robot&#x201D; in <italic>2021 30th IEEE International Conference on Robot &#x0026; Human Interactive Communication (RO-MAN)</italic>. Vancouver, BC, Canada, 698&#x2013;703.</citation>
</ref>
<ref id="ref23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ciardo</surname> <given-names>F.</given-names></name> <name><surname>De Tommaso</surname> <given-names>D.</given-names></name> <name><surname>Wykowska</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Joint action with artificial agents: human-likeness in behaviour and morphology affects sensorimotor signaling and social inclusion</article-title>. <source>Comput. Hum. Behav.</source> <volume>132</volume>:<fpage>107237</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2022.107237</pub-id></citation>
</ref>
<ref id="ref24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Croes</surname> <given-names>E.</given-names></name> <name><surname>Antheunis</surname> <given-names>M. L.</given-names></name></person-group> (<year>2020</year>). <article-title>Can we be friends with Mitsuku? A longitudinal study on the process of relationship formation between humans and a social chatbot</article-title>. <source>J. Soc. Pers. Relat.</source> <volume>38</volume>, <fpage>279</fpage>&#x2013;<lpage>300</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0265407520959463</pub-id></citation>
</ref>
<ref id="ref25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Denzler</surname> <given-names>M.</given-names></name> <name><surname>F&#x00F6;rster</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>A goal model of catharsis</article-title>. <source>Eur. Rev. Soc. Psychol.</source> <volume>23</volume>, <fpage>107</fpage>&#x2013;<lpage>142</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10463283.2012.699358</pub-id></citation>
</ref>
<ref id="ref26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Doerig</surname> <given-names>A.</given-names></name> <name><surname>Schurger</surname> <given-names>A.</given-names></name> <name><surname>Herzog</surname> <given-names>M. H.</given-names></name></person-group> (<year>2020</year>). <article-title>Hard criteria for empirical theories of consciousness</article-title>. <source>Cogn. Neurosci.</source> <volume>12</volume>, <fpage>41</fpage>&#x2013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17588928.2020.1772214</pub-id>, PMID: <pub-id pub-id-type="pmid">32663056</pub-id></citation>
</ref>
<ref id="ref27">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Dollard</surname> <given-names>J.</given-names></name> <name><surname>Miller</surname> <given-names>N. E.</given-names></name></person-group> (<year>1950</year>). <source>Personality and Psychotherapy</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>McGraw-Hill</publisher-name>.</citation>
</ref>
<ref id="ref28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dubosc</surname> <given-names>C.</given-names></name> <name><surname>Gorisse</surname> <given-names>G.</given-names></name> <name><surname>Christmann</surname> <given-names>O.</given-names></name> <name><surname>Fleury</surname> <given-names>S.</given-names></name> <name><surname>Poinsot</surname> <given-names>K.</given-names></name> <name><surname>Richir</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Impact of avatar facial anthropomorphism on body ownership, attractiveness and social presence in collaborative tasks in immersive virtual environments</article-title>. <source>Comput. Graph.</source> <volume>101</volume>, <fpage>82</fpage>&#x2013;<lpage>92</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cag.2021.08.011</pub-id></citation>
</ref>
<ref id="ref29">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Duffy</surname> <given-names>B.</given-names></name>
</person-group> (<year>2008</year>). <article-title>Fundamental issues in affective intelligent social machines</article-title>. <source>Open Artif. Intellig. J.</source> <volume>2</volume>, <fpage>21</fpage>&#x2013;<lpage>34</lpage>. doi: <pub-id pub-id-type="doi">10.2174/1874061800802010021</pub-id></citation>
</ref>
<ref id="ref30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Edwards</surname> <given-names>A.</given-names></name> <name><surname>Edwards</surname> <given-names>C.</given-names></name> <name><surname>Westerman</surname> <given-names>D.</given-names></name> <name><surname>Spence</surname> <given-names>P. R.</given-names></name></person-group> (<year>2019</year>). <article-title>Initial expectations, interactions, and beyond with social robots</article-title>. <source>Comput. Hum. Behav.</source> <volume>90</volume>, <fpage>308</fpage>&#x2013;<lpage>314</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2018.08.042</pub-id></citation>
</ref>
<ref id="ref31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Etzrodt</surname> <given-names>K.</given-names></name> <name><surname>Engesser</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Voice-based agents as personified things: assimilation and accommodation as equilibration of doubt</article-title>. <source>Hum. Machine Commun. J.</source> <volume>2</volume>, <fpage>57</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.30658/hmc.2.3</pub-id></citation>
</ref>
<ref id="ref32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Etzrodt</surname> <given-names>K.</given-names></name> <name><surname>Gentzel</surname> <given-names>P.</given-names></name> <name><surname>Utz</surname> <given-names>S.</given-names></name> <name><surname>Engesser</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Human-machine-communication: introduction to the special issue</article-title>. <source>Publizistik</source> <volume>67</volume>, <fpage>439</fpage>&#x2013;<lpage>448</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11616-022-00754-8</pub-id></citation>
</ref>
<ref id="ref33">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Eyssel</surname> <given-names>F. A.</given-names></name> <name><surname>Pfundmair</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). &#x201C;Predictors of psychological anthropomorphization, mind perception, and the fulfillment of social needs: A case study with a zoomorphic robot&#x201D; in <italic>Proceedings of the 24th IEEE International Symposium on Robot and Human Interactive Communication</italic>.</citation>
</ref>
<ref id="ref34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrari</surname> <given-names>F.</given-names></name> <name><surname>Paladino</surname> <given-names>M. P.</given-names></name> <name><surname>Jetten</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Blurring human&#x2013;machine distinctions: anthropomorphic appearance in social robots as a threat to human distinctiveness</article-title>. <source>Int. J. Soc. Robot.</source> <volume>8</volume>, <fpage>287</fpage>&#x2013;<lpage>302</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12369-016-0338-y</pub-id></citation>
</ref>
<ref id="ref35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Festerling</surname> <given-names>J.</given-names></name> <name><surname>Siraj</surname> <given-names>I.</given-names></name> <name><surname>Malmberg</surname> <given-names>L. E.</given-names></name></person-group> (<year>2022</year>). <article-title>Exploring children&#x2019;s exposure to voice assistants and their ontological conceptualizations of life and technology</article-title>. <source>AI &#x0026; Soc</source>. doi: <pub-id pub-id-type="doi">10.1007/s00146-022-01555-3</pub-id>, PMID: <pub-id pub-id-type="pmid">36276897</pub-id></citation>
</ref>
<ref id="ref36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fitzpatrick</surname> <given-names>K. K.</given-names></name> <name><surname>Darcy</surname> <given-names>A.</given-names></name> <name><surname>Vierhile</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Delivering cognitive behavior therapy to Young adults with symptoms of depression and anxiety using a fully automated conversational agent (Woebot): a randomized controlled trial</article-title>. <source>JMIR Mental Health</source> <volume>4</volume>:<fpage>e7785</fpage>. doi: <pub-id pub-id-type="doi">10.2196/mental.7785</pub-id>, PMID: <pub-id pub-id-type="pmid">28588005</pub-id></citation>
</ref>
<ref id="ref37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fox</surname> <given-names>J.</given-names></name> <name><surname>Ahn</surname> <given-names>S. J.</given-names></name> <name><surname>Janssen</surname> <given-names>J.</given-names></name> <name><surname>Yeykelis</surname> <given-names>L.</given-names></name> <name><surname>Segovia</surname> <given-names>K. Y.</given-names></name> <name><surname>Bailenson</surname> <given-names>J. N.</given-names></name></person-group> (<year>2014</year>). <article-title>Avatars versus agents: a meta-analysis quantifying the effect of agency on social influence</article-title>. <source>Hum. Comput. Interact.</source> <volume>30</volume>, <fpage>401</fpage>&#x2013;<lpage>432</lpage>. doi: <pub-id pub-id-type="doi">10.1080/07370024.2014.921494</pub-id></citation>
</ref>
<ref id="ref38">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Frith</surname> <given-names>C. D.</given-names></name>
</person-group> (<year>2002</year>). <article-title>Attention to action and awareness of other minds</article-title>. <source>Conscious. Cogn.</source> <volume>11</volume>, <fpage>481</fpage>&#x2013;<lpage>487</lpage>. doi: <pub-id pub-id-type="doi">10.1016/s1053-8100(02)00022-3</pub-id></citation>
</ref>
<ref id="ref39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fu</surname> <given-names>Y.</given-names></name> <name><surname>Michelson</surname> <given-names>R.</given-names></name> <name><surname>Lin</surname> <given-names>Y.</given-names></name> <name><surname>Nguyen</surname> <given-names>L. K.</given-names></name> <name><surname>Tayebi</surname> <given-names>T. J.</given-names></name> <name><surname>Hiniker</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Social emotional learning with conversational agents</article-title>. <source>Proc. ACM Interact. Mobile Wearable Ubiquit. Technol.</source> <volume>6</volume>, <fpage>1</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3534622</pub-id></citation>
</ref>
<ref id="ref40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garg</surname> <given-names>R.</given-names></name> <name><surname>Sengupta</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>He is just like me</article-title>. <source>Proc. ACM Interact. Mobile Wearable Ubiquit. Technol.</source> <volume>4</volume>, <fpage>1</fpage>&#x2013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3381002</pub-id></citation>
</ref>
<ref id="ref41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gong</surname> <given-names>L.</given-names></name> <name><surname>Nass</surname> <given-names>C.</given-names></name></person-group> (<year>2007</year>). <article-title>When a talking-face computer agent is half-human and half-humanoid: human identity and consistency preference</article-title>. <source>Hum. Commun. Res.</source> <volume>33</volume>, <fpage>163</fpage>&#x2013;<lpage>193</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1468-2958.2007.00295.x</pub-id></citation>
</ref>
<ref id="ref42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gray</surname> <given-names>H. M.</given-names></name> <name><surname>Gray</surname> <given-names>K.</given-names></name> <name><surname>Wegner</surname> <given-names>D. M.</given-names></name></person-group> (<year>2007</year>). <article-title>Dimensions of mind perception</article-title>. <source>Science</source> <volume>315</volume>:<fpage>619</fpage>. doi: <pub-id pub-id-type="doi">10.1126/science.1134475</pub-id></citation>
</ref>
<ref id="ref43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gray</surname> <given-names>K.</given-names></name> <name><surname>Wegner</surname> <given-names>D. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Feeling robots and human zombies: mind perception and the uncanny valley</article-title>. <source>Cognition</source> <volume>125</volume>, <fpage>125</fpage>&#x2013;<lpage>130</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2012.06.007</pub-id>, PMID: <pub-id pub-id-type="pmid">22784682</pub-id></citation>
</ref>
<ref id="ref44">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Graziano</surname> <given-names>M. S. A.</given-names></name>
</person-group> (<year>2013</year>). <source>Consciousness and the Social Brain</source>. <publisher-name>New York, NY: Oxford University Press</publisher-name>.</citation>
</ref>
<ref id="ref45">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Guingrich</surname> <given-names>R.</given-names></name> <name><surname>Graziano</surname> <given-names>M. S. A.</given-names></name></person-group> (<year>2023</year>). Chatbots as social companions: how people perceive consciousness, human likeness, and social health benefits in machines (arXiv:2311.10599). arXiv [Preprint]. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2311.10599</pub-id></citation>
</ref>
<ref id="ref46">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Han</surname> <given-names>M. C.</given-names></name>
</person-group> (<year>2021</year>). <article-title>The impact of anthropomorphism on consumers&#x2019; purchase decision in chatbot commerce</article-title>. <source>J. Internet Commer.</source> <volume>20</volume>, <fpage>46</fpage>&#x2013;<lpage>65</lpage>. doi: <pub-id pub-id-type="doi">10.1080/15332861.2020.1863022</pub-id></citation>
</ref>
<ref id="ref47">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Harley</surname> <given-names>T. A.</given-names></name>
</person-group> (<year>2021</year>). <source>The Science of Consciousness</source>. <publisher-name>Cambridge, UK: Cambridge University Press</publisher-name>.</citation>
</ref>
<ref id="ref48">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Harris</surname> <given-names>P. L.</given-names></name>
</person-group> (<year>1992</year>). <article-title>From simulation to folk psychology: the case for development</article-title>. <source>Mind Lang.</source> <volume>7</volume>, <fpage>120</fpage>&#x2013;<lpage>144</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1468-0017.1992.tb00201.x</pub-id></citation>
</ref>
<ref id="ref49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Haslam</surname> <given-names>N.</given-names></name> <name><surname>Loughnan</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>Dehumanization and infrahumanization</article-title>. <source>Annu. Rev. Psychol.</source> <volume>65</volume>, <fpage>399</fpage>&#x2013;<lpage>423</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-psych-010213-115045</pub-id>, PMID: <pub-id pub-id-type="pmid">23808915</pub-id></citation>
</ref>
<ref id="ref50">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Hayashi</surname> <given-names>Y.</given-names></name> <name><surname>Miwa</surname> <given-names>K.</given-names></name></person-group> (<year>2009</year>). &#x201C;Cognitive and emotional characteristics of communication in human-human/human-agent interaction&#x201D; in <italic>Proceedings of the 13th International Conference on Human-Computer Interaction. Part III: Ubiquitous and Intelligent Interaction</italic>. Springer Science &#x0026; Business Media, 267&#x2013;274.</citation>
</ref>
<ref id="ref51">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Heyselaar</surname> <given-names>E.</given-names></name> <name><surname>Bosse</surname> <given-names>T.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>Using Theory of Mind to Assess Users&#x2019; Sense of Agency in Social Chatbots</article-title>,&#x201D; in <source>Chatbot Research and Design. Eds. A. F&#x00F8;lstad, T. Araujo, S. Papadopoulos, E. L.-C. Law, O.-C. Granmo, E. Luger, and P. B. Brandtzaeg. Vol. 11970 (Springer International Publishing)</source>, <fpage>158</fpage>&#x2013;<lpage>169</lpage>.</citation>
</ref>
<ref id="ref52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hiniker</surname> <given-names>A.</given-names></name> <name><surname>Wang</surname> <given-names>A.</given-names></name> <name><surname>Tran</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>M. R.</given-names></name> <name><surname>Radesky</surname> <given-names>J.</given-names></name> <name><surname>Sobel</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Can Conversational Agents Change the Way Children Talk to People?</article-title> in:  <source>IDC &#x2018;21: Proceedings of the 20th Annual ACM Interaction Design and Children Conference</source>, <fpage>338</fpage>&#x2013;<lpage>349</lpage>.</citation>
</ref>
<ref id="ref53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ho</surname> <given-names>A. S.</given-names></name> <name><surname>Hancock</surname> <given-names>J.</given-names></name> <name><surname>Miner</surname> <given-names>A. S.</given-names></name></person-group> (<year>2018</year>). <article-title>Psychological, relational, and emotional effects of self-disclosure after conversations with a chatbot</article-title>. <source>J. Commun.</source> <volume>68</volume>, <fpage>712</fpage>&#x2013;<lpage>733</lpage>. doi: <pub-id pub-id-type="doi">10.1093/joc/jqy026</pub-id>, PMID: <pub-id pub-id-type="pmid">30100620</pub-id></citation>
</ref>
<ref id="ref54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoyt</surname> <given-names>C. L.</given-names></name> <name><surname>Blascovich</surname> <given-names>J.</given-names></name> <name><surname>Swinth</surname> <given-names>K. R.</given-names></name></person-group> (<year>2003</year>). <article-title>Social inhibition in immersive virtual environments</article-title>. <source>Presence Teleoperat. Virtual Environ.</source> <volume>12</volume>, <fpage>183</fpage>&#x2013;<lpage>195</lpage>. doi: <pub-id pub-id-type="doi">10.1162/105474603321640932</pub-id></citation>
</ref>
<ref id="ref55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jacobs</surname> <given-names>O.</given-names></name> <name><surname>Gazzaz</surname> <given-names>K.</given-names></name> <name><surname>Kingstone</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Mind the robot! Variation in attributions of mind to a wide set of real and fictional robots</article-title>. <source>Int. J. Soc. Robot.</source> <volume>14</volume>, <fpage>529</fpage>&#x2013;<lpage>537</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12369-021-00807-4</pub-id></citation>
</ref>
<ref id="ref56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kahn</surname> <given-names>P. H.</given-names></name> <name><surname>Ishiguro</surname> <given-names>H.</given-names></name> <name><surname>Friedman</surname> <given-names>B.</given-names></name> <name><surname>Kanda</surname> <given-names>T.</given-names></name> <name><surname>Freier</surname> <given-names>N. G.</given-names></name> <name><surname>Severson</surname> <given-names>R. L.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>What is a human?</article-title> <source>Interact. Stud.</source> <volume>8</volume>, <fpage>363</fpage>&#x2013;<lpage>390</lpage>. doi: <pub-id pub-id-type="doi">10.1075/is.8.3.04kah</pub-id></citation>
</ref>
<ref id="ref57">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Kahn</surname> <given-names>P. H.</given-names> <suffix>Jr.</suffix></name> <name><surname>Reichert</surname> <given-names>A. L.</given-names></name> <name><surname>Gary</surname> <given-names>H. E.</given-names></name> <name><surname>Kanda</surname> <given-names>T.</given-names></name> <name><surname>Ishiguro</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2011</year>). &#x201C;The new ontological category hypothesis in human-robot interaction&#x201D; in HRI&#x2018;11. Association for Computing Machinery, New York, NY, USA. 159&#x2013;160.</citation>
</ref>
<ref id="ref58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kahn</surname> <given-names>P. H.</given-names></name> <name><surname>Kanda</surname> <given-names>T.</given-names></name> <name><surname>Ishiguro</surname> <given-names>H.</given-names></name> <name><surname>Freier</surname> <given-names>N. G.</given-names></name> <name><surname>Severson</surname> <given-names>R. L.</given-names></name> <name><surname>Gill</surname> <given-names>B. T.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>&#x201C;Robovie, you&#x2019;ll have to go into the closet now&#x201D;: Children&#x2019;s social and moral relationships with a humanoid robot</article-title>. <source>Dev. Psychol.</source> <volume>48</volume>, <fpage>303</fpage>&#x2013;<lpage>314</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0027033</pub-id>, PMID: <pub-id pub-id-type="pmid">22369338</pub-id></citation>
</ref>
<ref id="ref59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>K&#x00E4;tsyri</surname> <given-names>J.</given-names></name> <name><surname>F&#x00F6;rger</surname> <given-names>K.</given-names></name> <name><surname>M&#x00E4;k&#x00E4;r&#x00E4;inen</surname> <given-names>M.</given-names></name> <name><surname>Takala</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). <article-title>A review of empirical evidence on different uncanny valley hypotheses: support for perceptual mismatch as one road to the valley of eeriness</article-title>. <source>Front. Psychol.</source> <volume>6</volume>:<fpage>390</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.00390</pub-id>, PMID: <pub-id pub-id-type="pmid">25914661</pub-id></citation>
</ref>
<ref id="ref60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kawabe</surname> <given-names>T.</given-names></name> <name><surname>Sasaki</surname> <given-names>K.</given-names></name> <name><surname>Ihaya</surname> <given-names>K.</given-names></name> <name><surname>Yamada</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>When categorization-based stranger avoidance explains the uncanny valley: a comment on MacDorman and Chattopadhyay (2016)</article-title>. <source>Cognition</source> <volume>161</volume>, <fpage>129</fpage>&#x2013;<lpage>131</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2016.09.001</pub-id>, PMID: <pub-id pub-id-type="pmid">27642031</pub-id></citation>
</ref>
<ref id="ref61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>D.</given-names></name> <name><surname>Frank</surname> <given-names>M. G.</given-names></name> <name><surname>Kim</surname> <given-names>S. T.</given-names></name></person-group> (<year>2014</year>). <article-title>Emotional display behavior in different forms of computer mediated communication</article-title>. <source>Comput. Hum. Behav.</source> <volume>30</volume>, <fpage>222</fpage>&#x2013;<lpage>229</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2013.09.001</pub-id></citation>
</ref>
<ref id="ref62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>W.</given-names></name> <name><surname>Ryoo</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Hypocrisy induction: using chatbots to promote COVID-19 social distancing</article-title>. <source>Cyberpsychol. Behav. Soc. Netw.</source> <volume>25</volume>, <fpage>27</fpage>&#x2013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.1089/cyber.2021.0057</pub-id>, PMID: <pub-id pub-id-type="pmid">34652216</pub-id></citation>
</ref>
<ref id="ref63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>Y.</given-names></name> <name><surname>Sundar</surname> <given-names>S. S.</given-names></name></person-group> (<year>2012</year>). <article-title>Anthropomorphism of computers: is it mindful or mindless?</article-title> <source>Comput. Hum. Behav.</source> <volume>28</volume>, <fpage>241</fpage>&#x2013;<lpage>250</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2011.09.006</pub-id></citation>
</ref>
<ref id="ref64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Knobe</surname> <given-names>J.</given-names></name> <name><surname>Prinz</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>Intuitions about consciousness: experimental studies</article-title>. <source>Phenomenol. Cogn. Sci.</source> <volume>7</volume>, <fpage>67</fpage>&#x2013;<lpage>83</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11097-007-9066-y</pub-id></citation>
</ref>
<ref id="ref65">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Koch</surname> <given-names>C.</given-names></name>
</person-group> (<year>2019</year>). The feeling of life itself: why consciousness is widespread but Can&#x2019;t be computed. Available at: <ext-link xlink:href="https://openlibrary.org/books/OL29832851M/Feeling_of_Life_Itself" ext-link-type="uri">https://openlibrary.org/books/OL29832851M/Feeling_of_Life_Itself</ext-link></citation>
</ref>
<ref id="ref66">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Kone&#x010D;ni</surname> <given-names>V.</given-names></name>
</person-group> (<year>2016</year>). <article-title>The anger-aggression bidirectional-causation (AABC) model&#x2019;s relevance for dyadic violence, re-venge and catharsis</article-title>. <source>Soc. Behav. Res. Pract. Open J.</source> <volume>1</volume>, <fpage>1</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.17140/SBRPOJ-1-101</pub-id></citation>
</ref>
<ref id="ref67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krach</surname> <given-names>S.</given-names></name> <name><surname>Hegel</surname> <given-names>F.</given-names></name> <name><surname>Wrede</surname> <given-names>B.</given-names></name> <name><surname>Sagerer</surname> <given-names>G.</given-names></name> <name><surname>Binkofski</surname> <given-names>F.</given-names></name> <name><surname>Kircher</surname> <given-names>T.</given-names></name></person-group> (<year>2008</year>). <article-title>Can machines think? Interaction and perspective taking with robots investigated via fMRI</article-title>. <source>PLoS One</source> <volume>3</volume>:<fpage>e2597</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0002597</pub-id>, PMID: <pub-id pub-id-type="pmid">18612463</pub-id></citation>
</ref>
<ref id="ref68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name> <name><surname>Bente</surname> <given-names>G.</given-names></name> <name><surname>Eschenburg</surname> <given-names>F.</given-names></name> <name><surname>Troitzsch</surname> <given-names>H.</given-names></name></person-group> (<year>2009</year>). <article-title>Embodied conversational agents: research prospects for social psychology and an exemplary study</article-title>. <source>Soc. Psychol.</source> <volume>40</volume>, <fpage>26</fpage>&#x2013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.1027/1864-9335.40.1.26</pub-id></citation>
</ref>
<ref id="ref69">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Kr&#x00E4;mer</surname> <given-names>N.</given-names></name> <name><surname>Bente</surname> <given-names>G.</given-names></name> <name><surname>Piesk</surname> <given-names>J.</given-names></name></person-group> (<year>2003a</year>). The ghost in the machine. The influence of Embodied Conversational Agents on user expectations and user behavior in a TV/VCR application. ResearchGate. Available at: <ext-link xlink:href="https://www.researchgate.net/publication/242273054_The_ghost_in_the_machine_The_influence_of_Embodied_Conversational_Agents_on_user_expectations_and_user_behaviour_in_a_TVVCR_application1" ext-link-type="uri">https://www.researchgate.net/publication/242273054_The_ghost_in_the_machine_The_influence_of_Embodied_Conversational_Agents_on_user_expectations_and_user_behaviour_in_a_TVVCR_application1</ext-link></citation>
</ref>
<ref id="ref70">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name> <name><surname>Rosenthal-von der P&#x00FC;tten</surname> <given-names>A. M.</given-names></name> <name><surname>Hoffmann</surname> <given-names>L.</given-names></name></person-group> (<year>2015</year>). &#x201C;<article-title>Social effects of virtual and robot companions</article-title>&#x201D; in <source>The Handbook of the Psychology of Communication Technology, Ch. 6</source> (<publisher-name>John Wiley &#x0026; Sons, Ltd.</publisher-name>), <fpage>137</fpage>&#x2013;<lpage>159</lpage>.</citation>
</ref>
<ref id="ref71">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name> <name><surname>Tietz</surname> <given-names>B.</given-names></name> <name><surname>Bente</surname> <given-names>G.</given-names></name></person-group> (<year>2003b</year>). &#x201C;Effects of embodied Interface agents and their gestural activity&#x201D; in <italic>4th International Working Conference on Intelligent Virtual Agents</italic>. Hamburg: Springer. 292&#x2013;300.</citation>
</ref>
<ref id="ref72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kupferberg</surname> <given-names>A.</given-names></name> <name><surname>Glasauer</surname> <given-names>S.</given-names></name> <name><surname>Huber</surname> <given-names>M.</given-names></name> <name><surname>Rickert</surname> <given-names>M.</given-names></name> <name><surname>Knoll</surname> <given-names>A.</given-names></name> <name><surname>Brandt</surname> <given-names>T.</given-names></name></person-group> (<year>2011</year>). <article-title>Biological movement increases acceptance of humanoid robots as human partners in motor interaction</article-title>. <source>AI &#x0026; Soc.</source> <volume>26</volume>, <fpage>339</fpage>&#x2013;<lpage>345</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00146-010-0314-2</pub-id></citation>
</ref>
<ref id="ref73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>K&#x00FC;ster</surname> <given-names>D.</given-names></name> <name><surname>&#x015A;widerska</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Seeing the mind of robots: harm augments mind perception but benevolent intentions reduce dehumanisation of artificial entities in visual vignettes</article-title>. <source>Int. J. Psychol.</source> <volume>56</volume>, <fpage>454</fpage>&#x2013;<lpage>465</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ijop.12715</pub-id></citation>
</ref>
<ref id="ref74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>K&#x00FC;ster</surname> <given-names>D.</given-names></name> <name><surname>&#x015A;widerska</surname> <given-names>A.</given-names></name> <name><surname>Gunkel</surname> <given-names>D. J.</given-names></name></person-group> (<year>2020</year>). <article-title>I saw it on YouTube! How online videos shape perceptions of mind, morality, and fears about robots</article-title>. <source>New Media Soc.</source> <volume>23</volume>, <fpage>3312</fpage>&#x2013;<lpage>3331</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1461444820954199</pub-id></citation>
</ref>
<ref id="ref75">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Lee</surname> <given-names>E.</given-names></name>
</person-group> (<year>2010</year>). <article-title>The more humanlike, the better? How speech type and users&#x2019; cognitive style affect social responses to computers</article-title>. <source>Comput. Hum. Behav.</source> <volume>26</volume>, <fpage>665</fpage>&#x2013;<lpage>672</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2010.01.003</pub-id></citation>
</ref>
<ref id="ref76">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>K. M.</given-names></name> <name><surname>Jung</surname> <given-names>Y.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Kim</surname> <given-names>S. R.</given-names></name></person-group> (<year>2006</year>). <article-title>Are physically embodied social agents better than disembodied social agents?: the effects of physical embodiment, tactile interaction, and people&#x2019;s loneliness in human&#x2013;robot interaction</article-title>. <source>Int. J. Hum. Comput. Stud.</source> <volume>64</volume>, <fpage>962</fpage>&#x2013;<lpage>973</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijhcs.2006.05.002</pub-id></citation>
</ref>
<ref id="ref77">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>S. A.</given-names></name> <name><surname>Liang</surname> <given-names>Y.</given-names></name></person-group> (<year>2016</year>). <article-title>The role of reciprocity in verbally persuasive robots</article-title>. <source>Cyberpsychol. Behav. Soc. Netw.</source> <volume>19</volume>, <fpage>524</fpage>&#x2013;<lpage>527</lpage>. doi: <pub-id pub-id-type="doi">10.1089/cyber.2016.0124</pub-id>, PMID: <pub-id pub-id-type="pmid">27447027</pub-id></citation>
</ref>
<ref id="ref78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>S. A.</given-names></name> <name><surname>Liang</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Robotic foot-in-the-door: using sequential-request persuasive strategies in human-robot interaction</article-title>. <source>Comput. Hum. Behav.</source> <volume>90</volume>, <fpage>351</fpage>&#x2013;<lpage>356</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2018.08.026</pub-id></citation>
</ref>
<ref id="ref79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>S.</given-names></name> <name><surname>Ratan</surname> <given-names>R.</given-names></name> <name><surname>Park</surname> <given-names>T.</given-names></name></person-group> (<year>2019</year>). <article-title>The voice makes the Car: enhancing autonomous vehicle perceptions and adoption intention through voice agent gender and style</article-title>. <source>Multimod. Technol. Interact.</source> <volume>3</volume>:<fpage>20</fpage>. doi: <pub-id pub-id-type="doi">10.3390/mti3010020</pub-id></citation>
</ref>
<ref id="ref80">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lew</surname> <given-names>Z.</given-names></name> <name><surname>Walther</surname> <given-names>J. B.</given-names></name></person-group> (<year>2022</year>). <article-title>Social scripts and expectancy violations: evaluating communication with human or AI Chatbot Interactants</article-title>. <source>Media Psychol.</source> <volume>26</volume>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1080/15213269.2022.2084111</pub-id></citation>
</ref>
<ref id="ref81">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Loh</surname> <given-names>J.</given-names></name> <name><surname>Loh</surname> <given-names>W.</given-names></name></person-group> (<year>2023</year>). <source>Social Robotics and the Good Life: The Normative Side of Forming Emotional Bonds With Robots. transcript Verlag. Bielefeld, Germany</source>.</citation>
</ref>
<ref id="ref82">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lucas</surname> <given-names>G. M.</given-names></name> <name><surname>Gratch</surname> <given-names>J.</given-names></name> <name><surname>King</surname> <given-names>A.</given-names></name> <name><surname>Morency</surname> <given-names>L.</given-names></name></person-group> (<year>2014</year>). <article-title>It&#x2019;s only a computer: virtual humans increase willingness to disclose</article-title>. <source>Comput. Hum. Behav.</source> <volume>37</volume>, <fpage>94</fpage>&#x2013;<lpage>100</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2014.04.043</pub-id></citation>
</ref>
<ref id="ref83">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luria</surname> <given-names>M.</given-names></name> <name><surname>Sheriff</surname> <given-names>O.</given-names></name> <name><surname>Boo</surname> <given-names>M.</given-names></name> <name><surname>Forlizzi</surname> <given-names>J.</given-names></name> <name><surname>Zoran</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Destruction, catharsis, and emotional release in human-robot interaction</article-title>. <source>ACM Trans. Hum. Robot Interaction</source> <volume>9</volume>, <fpage>1</fpage>&#x2013;<lpage>19</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3385007</pub-id></citation>
</ref>
<ref id="ref84">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacDorman</surname> <given-names>K. F.</given-names></name> <name><surname>Chattopadhyay</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Reducing consistency in human realism increases the uncanny valley effect; increasing category uncertainty does not</article-title>. <source>Cognition</source> <volume>146</volume>, <fpage>190</fpage>&#x2013;<lpage>205</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2015.09.019</pub-id>, PMID: <pub-id pub-id-type="pmid">26435049</pub-id></citation>
</ref>
<ref id="ref85">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacDorman</surname> <given-names>K. F.</given-names></name> <name><surname>Entezari</surname> <given-names>S. O.</given-names></name></person-group> (<year>2015</year>). <article-title>Individual differences predict sensitivity to the uncanny valley</article-title>. <source>Interact. Stud.</source> <volume>16</volume>, <fpage>141</fpage>&#x2013;<lpage>172</lpage>. doi: <pub-id pub-id-type="doi">10.1075/is.16.2.01mac</pub-id></citation>
</ref>
<ref id="ref86">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marchesi</surname> <given-names>S.</given-names></name> <name><surname>De Tommaso</surname> <given-names>D.</given-names></name> <name><surname>P&#x00E9;rez-Osorio</surname> <given-names>J.</given-names></name> <name><surname>Wykowska</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Belief in sharing the same phenomenological experience increases the likelihood of adopting the intentional stance toward a humanoid robot</article-title>. <source>Technol Mind Behav</source> <volume>3</volume>:<fpage>11</fpage>. doi: <pub-id pub-id-type="doi">10.1037/tmb0000072</pub-id></citation>
</ref>
<ref id="ref87">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martini</surname> <given-names>M. C.</given-names></name> <name><surname>Gonzalez</surname> <given-names>C.</given-names></name> <name><surname>Wiese</surname> <given-names>E.</given-names></name></person-group> (<year>2016</year>). <article-title>Seeing minds in others&#x2014;can agents with robotic appearance have human-like preferences?</article-title> <source>PLoS One</source> <volume>11</volume>:<fpage>e0146310</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0146310</pub-id>, PMID: <pub-id pub-id-type="pmid">26745500</pub-id></citation>
</ref>
<ref id="ref88">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>McVee</surname> <given-names>M. B.</given-names></name> <name><surname>Dunsmore</surname> <given-names>K.</given-names></name> <name><surname>Gavelek</surname> <given-names>J. R.</given-names></name></person-group> (<year>2005</year>). <article-title>Schema theory revisited</article-title>. <source>Rev. Educ. Res.</source> <volume>75</volume>, <fpage>531</fpage>&#x2013;<lpage>566</lpage>. doi: <pub-id pub-id-type="doi">10.3102/00346543075004531</pub-id></citation>
</ref>
<ref id="ref89">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Merritt</surname> <given-names>T. R.</given-names></name>
</person-group> (<year>2012</year>). A failure of imagination: a failure of imagination: how and why people respond differently to human and computer team-mates. ResearchGate. Available at: <ext-link xlink:href="https://www.researchgate.net/publication/292539389_A_failure_of_imagination_How_and_why_people_respond_differently_to_human_and_computer_team-mates" ext-link-type="uri">https://www.researchgate.net/publication/292539389_A_failure_of_imagination_How_and_why_people_respond_differently_to_human_and_computer_team-mates</ext-link></citation>
</ref>
<ref id="ref90">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Metzinger</surname> <given-names>T.</given-names></name>
</person-group> (<year>2021</year>). <article-title>Artificial suffering: an argument for a global moratorium on synthetic phenomenology</article-title>. <source>J. Artific. Intellig. Consciousness</source> <volume>8</volume>, <fpage>43</fpage>&#x2013;<lpage>66</lpage>. doi: <pub-id pub-id-type="doi">10.1142/s270507852150003x</pub-id></citation>
</ref>
<ref id="ref91">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mikropoulos</surname> <given-names>T. A.</given-names></name> <name><surname>Misailidi</surname> <given-names>P.</given-names></name> <name><surname>Bonoti</surname> <given-names>F.</given-names></name></person-group> (<year>2003</year>). <article-title>Attributing human properties to computer artifacts: developmental changes in children's understanding of the animate-inanimate distinction</article-title>. <source>Psychology</source> <volume>10</volume>, <fpage>53</fpage>&#x2013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.12681/psy_hps.23951</pub-id></citation>
</ref>
<ref id="ref92">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Mori</surname> <given-names>M.</given-names></name>
</person-group> (<year>1970</year>). <article-title>Bukimi no tani [the uncanny valley]</article-title>. <source>Energy</source> <volume>7</volume>, <fpage>33</fpage>&#x2013;<lpage>35</lpage>.</citation>
</ref>
<ref id="ref93">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Nagel</surname> <given-names>T.</given-names></name>
</person-group> (<year>1974</year>). <article-title>What is it like to be a bat?</article-title> <source>Philos. Rev.</source> <volume>83</volume>:<fpage>435</fpage>. doi: <pub-id pub-id-type="doi">10.2307/2183914</pub-id></citation>
</ref>
<ref id="ref94">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Nass</surname> <given-names>C.</given-names></name> <name><surname>Brave</surname> <given-names>S.</given-names></name></person-group> (<year>2005</year>). <source>Wired for speech: How voice activates and advances the human-computer relationship. Boston Review: Boston, Massachusetts</source>.</citation>
</ref>
<ref id="ref95">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nass</surname> <given-names>C.</given-names></name> <name><surname>Moon</surname> <given-names>Y.</given-names></name></person-group> (<year>2000</year>). <article-title>Machines and mindlessness: social responses to computers</article-title>. <source>J. Soc. Issues</source> <volume>56</volume>, <fpage>81</fpage>&#x2013;<lpage>103</lpage>. doi: <pub-id pub-id-type="doi">10.1111/0022-4537.00153</pub-id></citation>
</ref>
<ref id="ref96">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Nass</surname> <given-names>C.</given-names></name> <name><surname>Steuer</surname> <given-names>J.</given-names></name> <name><surname>Tauber</surname> <given-names>E. R.</given-names></name></person-group> (<year>1994</year>). &#x201C;Computers are social actors&#x201D; in <italic>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</italic>, 72&#x2013;78.</citation>
</ref>
<ref id="ref97">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Nigam</surname> <given-names>M. K</given-names></name> <name><surname>Klahr</surname> <given-names>D.</given-names></name></person-group> (<year>2000</year>). &#x201C;If robots make choices, are they alive?: Children's judgments of the animacy of intelligent artifacts&#x201D; in <italic>Proceedings of the Annual Meeting of the Cognitive Science Society</italic>, 22. Available at: <ext-link xlink:href="https://escholarship.org/uc/item/6bw2h51d" ext-link-type="uri">https://escholarship.org/uc/item/6bw2h51d</ext-link></citation>
</ref>
<ref id="ref98">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>O&#x2019;Regan</surname> <given-names>J. K.</given-names></name>
</person-group> (<year>2012</year>). <article-title>How to build a robot that is conscious and feels</article-title>. <source>Mind. Mach.</source> <volume>22</volume>, <fpage>117</fpage>&#x2013;<lpage>136</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11023-012-9279-x</pub-id></citation>
</ref>
<ref id="ref99">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Opfer</surname> <given-names>J. E.</given-names></name>
</person-group> (<year>2002</year>). <article-title>Identifying living and sentient kinds from dynamic information: the case of goal-directed versus aimless autonomous movement in conceptual change</article-title>. <source>Cognition</source> <volume>86</volume>, <fpage>97</fpage>&#x2013;<lpage>122</lpage>. doi: <pub-id pub-id-type="doi">10.1016/s0010-0277(02)00171-3</pub-id>, PMID: <pub-id pub-id-type="pmid">12435533</pub-id></citation>
</ref>
<ref id="ref100">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Opfer</surname> <given-names>J. E.</given-names></name> <name><surname>Siegler</surname> <given-names>R. S.</given-names></name></person-group> (<year>2004</year>). <article-title>Revisiting preschoolers&#x2019; living things concept: a microgenetic analysis of conceptual change in basic biology</article-title>. <source>Cogn. Psychol.</source> <volume>49</volume>, <fpage>301</fpage>&#x2013;<lpage>332</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cogpsych.2004.01.002</pub-id></citation>
</ref>
<ref id="ref101">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ortony</surname> <given-names>A.</given-names></name> <name><surname>Anderson</surname> <given-names>R. C.</given-names></name></person-group> (<year>1977</year>). <article-title>Definite descriptions and semantic memory</article-title>. <source>Cogn. Sci.</source> <volume>1</volume>, <fpage>74</fpage>&#x2013;<lpage>83</lpage>. doi: <pub-id pub-id-type="doi">10.1016/s0364-0213(77)80005-0</pub-id></citation>
</ref>
<ref id="ref102">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Pa&#x0142;ka</surname> <given-names>P.</given-names></name>
</person-group> (<year>2023</year>). AI, consumers &#x0026; psychological harm (SSRN scholarly paper 4564997). Available at: <ext-link xlink:href="https://papers.ssrn.com/abstract=4564997" ext-link-type="uri">https://papers.ssrn.com/abstract=4564997</ext-link></citation>
</ref>
<ref id="ref103">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Pankin</surname> <given-names>J.</given-names></name>
</person-group> (<year>2013</year>). Schema theory and concept formation. Presentation at MIT, Fall. Available at: <ext-link xlink:href="https://web.mit.edu/pankin/www/Schema_Theory_and_Concept_Formation.pdf" ext-link-type="uri">https://web.mit.edu/pankin/www/Schema_Theory_and_Concept_Formation.pdf</ext-link></citation>
</ref>
<ref id="ref104">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Pennebaker</surname> <given-names>J. W.</given-names></name>
</person-group> (<year>1997</year>). <article-title>Writing about emotional experiences as a therapeutic process</article-title>. <source>Psychol. Sci.</source> <volume>8</volume>, <fpage>162</fpage>&#x2013;<lpage>166</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1467-9280.1997.tb00403.x</pub-id></citation>
</ref>
<ref id="ref105">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Pennebaker</surname> <given-names>J. W.</given-names></name>
</person-group> (<year>2004</year>). <source>Writing to Heal: A Guided Journal for Recovering from Trauma and Emotional Upheaval</source>. Oakland, CA: New Harbringer Publications.</citation>
</ref>
<ref id="ref106">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pentina</surname> <given-names>I.</given-names></name> <name><surname>Hancock</surname> <given-names>T.</given-names></name> <name><surname>Xie</surname> <given-names>T.</given-names></name></person-group> (<year>2023</year>). <article-title>Exploring relationship development with social chatbots: a mixed-method study of replika</article-title>. <source>Comput. Hum. Behav.</source> <volume>140</volume>:<fpage>107600</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2022.107600</pub-id></citation>
</ref>
<ref id="ref107">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Poushneh</surname> <given-names>A.</given-names></name>
</person-group> (<year>2021</year>). <article-title>Humanizing voice assistant: the impact of voice assistant personality on consumers&#x2019; attitudes and behaviors</article-title>. <source>J. Retail. Consum. Serv.</source> <volume>58</volume>:<fpage>102283</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jretconser.2020.102283</pub-id></citation>
</ref>
<ref id="ref108">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Powers</surname> <given-names>A.</given-names></name> <name><surname>Kiesler</surname> <given-names>S.</given-names></name></person-group> (<year>2006</year>). &#x201C;The advisor robot: tracing people&#x2019;s mental model from a robot&#x2019;s physical attributes&#x201D; in <italic>Proceedings of the 1st ACM SIGCHI/SIGART Conference on Human-Robot Interaction</italic>, Salt Lake City, USA. 218&#x2013;225.</citation>
</ref>
<ref id="ref109">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Premack</surname> <given-names>D.</given-names></name> <name><surname>Woodruff</surname> <given-names>G.</given-names></name></person-group> (<year>1978</year>). <article-title>Does the chimpanzee have a theory of mind?</article-title> <source>Behav. Brain Sci.</source> <volume>1</volume>, <fpage>515</fpage>&#x2013;<lpage>526</lpage>. doi: <pub-id pub-id-type="doi">10.1017/s0140525x00076512</pub-id></citation>
</ref>
<ref id="ref110">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Prinz</surname> <given-names>W.</given-names></name>
</person-group> (<year>2017</year>). <article-title>Modeling self on others: an import theory of subjectivity and selfhood</article-title>. <source>Conscious. Cogn.</source> <volume>49</volume>, <fpage>347</fpage>&#x2013;<lpage>362</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.concog.2017.01.020</pub-id></citation>
</ref>
<ref id="ref111">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>P&#x00FC;tten</surname> <given-names>A. M. R. D.</given-names></name> <name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name></person-group> (<year>2014</year>). <article-title>How design characteristics of robots determine evaluation and uncanny valley related responses</article-title>. <source>Comput. Hum. Behav.</source> <volume>36</volume>, <fpage>422</fpage>&#x2013;<lpage>439</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2014.03.066</pub-id></citation>
</ref>
<ref id="ref112">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Quadflieg</surname> <given-names>S.</given-names></name> <name><surname>Ul-Haq</surname> <given-names>I.</given-names></name> <name><surname>Mavridis</surname> <given-names>N.</given-names></name></person-group> (<year>2016</year>). <article-title>Now you feel it, now you don&#x2019;t</article-title>. <source>Interact. Stud.</source> <volume>17</volume>, <fpage>211</fpage>&#x2013;<lpage>247</lpage>. doi: <pub-id pub-id-type="doi">10.1075/is.17.2.03qua</pub-id></citation>
</ref>
<ref id="ref113">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Rabinowitz</surname> <given-names>N. C.</given-names></name> <name><surname>Perbet</surname> <given-names>F.</given-names></name> <name><surname>Song</surname> <given-names>H. F.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Eslami</surname> <given-names>S. M. A.</given-names></name> <name><surname>Botvinick</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). Machine theory of mind. arXiv [Preprint]. doi: <pub-id pub-id-type="doi">10.48550/ARXIV.1802.07740</pub-id></citation>
</ref>
<ref id="ref114">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rhim</surname> <given-names>J.</given-names></name> <name><surname>Kwak</surname> <given-names>M.</given-names></name> <name><surname>Gong</surname> <given-names>Y.</given-names></name> <name><surname>Gweon</surname> <given-names>G.</given-names></name></person-group> (<year>2022</year>). <article-title>Application of humanization to survey chatbots: change in chatbot perception, interaction experience, and survey data quality</article-title>. <source>Comput. Hum. Behav.</source> <volume>126</volume>:<fpage>107034</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2021.107034</pub-id></citation>
</ref>
<ref id="ref115">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rickenberg</surname> <given-names>R.</given-names></name> <name><surname>Reeves</surname> <given-names>B.</given-names></name></person-group> (<year>2000</year>). <article-title>The effects of animated characters on anxiety, task performance, and evaluations of user interfaces</article-title>. <source>Lette. CHI</source> <volume>2000</volume>, <fpage>49</fpage>&#x2013;<lpage>56</lpage>. doi: <pub-id pub-id-type="doi">10.1145/332040.332406</pub-id></citation>
</ref>
<ref id="ref116">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roselli</surname> <given-names>C.</given-names></name> <name><surname>Navare</surname> <given-names>U. P.</given-names></name> <name><surname>Ciardo</surname> <given-names>F.</given-names></name> <name><surname>Wykowska</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Type of education affects individuals&#x2019; adoption of intentional stance towards robots: an EEG study</article-title>. <source>Int. J. Soc. Robot.</source> <volume>16</volume>, <fpage>185</fpage>&#x2013;<lpage>196</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12369-023-01073-2</pub-id></citation>
</ref>
<ref id="ref117">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>R&#x00F6;ska-Hardy</surname> <given-names>L.</given-names></name>
</person-group> (<year>2008</year>). <source>&#x201C;Theory (Simulation Theory, Theory of Mind)&#x201D;</source>, <italic>in Encyclopedia of Neuroscience</italic>. Eds. M. Binder, N. Hirokawa, U. Windhorst and H. Hirsch, <publisher-name>Berlin/Heidelberg Germany: Springer eBooks</publisher-name>, <fpage>4064</fpage>&#x2013;<lpage>4067</lpage>.</citation>
</ref>
<ref id="ref118">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Ryland</surname> <given-names>H.</given-names></name>
</person-group> (<year>2021a</year>). <article-title>It&#x2019;s friendship, Jim, but not as we know it: a degrees-of-friendship view of human&#x2013;robot friendships</article-title>. <source>Mind. Mach.</source> <volume>31</volume>, <fpage>377</fpage>&#x2013;<lpage>393</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11023-021-09560-z</pub-id>, PMID: <pub-id pub-id-type="pmid">33840900</pub-id></citation>
</ref>
<ref id="ref119">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Ryland</surname> <given-names>H.</given-names></name>
</person-group> (<year>2021b</year>). <article-title>Could you hate a robot? And does it matter if you could?</article-title> <source>AI &#x0026; Soc.</source> <volume>36</volume>, <fpage>637</fpage>&#x2013;<lpage>649</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00146-021-01173-5</pub-id></citation>
</ref>
<ref id="ref120">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scaife</surname> <given-names>M.</given-names></name> <name><surname>Van Duuren</surname> <given-names>M. V.</given-names></name></person-group> (<year>1995</year>). <article-title>Do computers have brains? What children believe about intelligent artifacts</article-title>. <source>Br. J. Dev. Psychol.</source> <volume>13</volume>, <fpage>367</fpage>&#x2013;<lpage>377</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.2044-835x.1995.tb00686.x</pub-id></citation>
</ref>
<ref id="ref121">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Seeger</surname> <given-names>A.</given-names></name> <name><surname>Heinzl</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). &#x201C;<article-title>Human versus machine: contingency factors of anthropomorphism as a trust-inducing design strategy for conversational agents</article-title>&#x201D; in <source>Lecture Notes in Information Systems and Organisation</source>, Eds. F. D. Davis, R. Riedl, J. vom Brocke, P.-M. L&#x00E9;ger, and A. B. Randolph. Springer International Publishing. <fpage>129</fpage>&#x2013;<lpage>139</lpage>.</citation>
</ref>
<ref id="ref122">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Severson</surname> <given-names>R. L.</given-names></name> <name><surname>Carlson</surname> <given-names>S. M.</given-names></name></person-group> (<year>2010</year>). <article-title>Behaving as or behaving as if? Children&#x2019;s conceptions of personified robots and the emergence of a new ontological category</article-title>. <source>Neural Netw.</source> <volume>23</volume>, <fpage>1099</fpage>&#x2013;<lpage>1103</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neunet.2010.08.014</pub-id></citation>
</ref>
<ref id="ref123">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Severson</surname> <given-names>R. L.</given-names></name> <name><surname>Woodard</surname> <given-names>S. R.</given-names></name></person-group> (<year>2018</year>). <article-title>Imagining others&#x2019; minds: the positive relation between children&#x2019;s role play and anthropomorphism</article-title>. <source>Front. Psychol.</source> <volume>9</volume>:<fpage>2140</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2018.02140</pub-id>, PMID: <pub-id pub-id-type="pmid">30483176</pub-id></citation>
</ref>
<ref id="ref124">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shank</surname> <given-names>D. B.</given-names></name> <name><surname>North</surname> <given-names>M.</given-names></name> <name><surname>Arnold</surname> <given-names>C.</given-names></name> <name><surname>Gamez</surname> <given-names>P.</given-names></name></person-group> (<year>2021</year>). <article-title>Can mind perception explain virtuous character judgments of artificial intelligence?</article-title> <source>Technol Mind Behav</source> <volume>2</volume>. doi: <pub-id pub-id-type="doi">10.1037/tmb0000047</pub-id></citation>
</ref>
<ref id="ref125">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spence</surname> <given-names>P. R.</given-names></name> <name><surname>Westerman</surname> <given-names>D.</given-names></name> <name><surname>Edwards</surname> <given-names>C.</given-names></name> <name><surname>Edwards</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Welcoming our robot overlords: initial expectations about interaction with a robot</article-title>. <source>Commun. Res. Rep.</source> <volume>31</volume>, <fpage>272</fpage>&#x2013;<lpage>280</lpage>. doi: <pub-id pub-id-type="doi">10.1080/08824096.2014.924337</pub-id></citation>
</ref>
<ref id="ref126">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sproull</surname> <given-names>L.</given-names></name> <name><surname>Subramani</surname> <given-names>M. R.</given-names></name> <name><surname>Kiesler</surname> <given-names>S.</given-names></name> <name><surname>Walker</surname> <given-names>J.</given-names></name> <name><surname>Waters</surname> <given-names>K.</given-names></name></person-group> (<year>1996</year>). <article-title>When the interface is a face</article-title>. <source>Hum. Comput. Interact.</source> <volume>11</volume>, <fpage>97</fpage>&#x2013;<lpage>124</lpage>. doi: <pub-id pub-id-type="doi">10.1207/s15327051hci1102_1</pub-id></citation>
</ref>
<ref id="ref127">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Srinivasan</surname> <given-names>V.</given-names></name> <name><surname>Takayama</surname> <given-names>L.</given-names></name></person-group> (<year>2016</year>). &#x201C;Help me please: robot politeness strategies for soliciting help from humans&#x201D; in <italic>CHI&#x2018;16</italic>. Association for Computing Machinery, New York, NY, USA. 4945&#x2013;4955.</citation>
</ref>
<ref id="ref128">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>J.</given-names></name> <name><surname>Appel</surname> <given-names>M.</given-names></name> <name><surname>Jost</surname> <given-names>A.</given-names></name> <name><surname>Ohler</surname> <given-names>P.</given-names></name></person-group> (<year>2020</year>). <article-title>Matter over mind? How the acceptance of digital entities depends on their appearance, mental prowess, and the interaction between both</article-title>. <source>Int. J. Hum. Comput. Stud.</source> <volume>142</volume>:<fpage>102463</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijhcs.2020.102463</pub-id></citation>
</ref>
<ref id="ref129">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stein</surname> <given-names>J.</given-names></name> <name><surname>Ohler</surname> <given-names>P.</given-names></name></person-group> (<year>2017</year>). <article-title>Venturing into the uncanny valley of mind&#x2014;the influence of mind attribution on the acceptance of human-like characters in a virtual reality setting</article-title>. <source>Cognition</source> <volume>160</volume>, <fpage>43</fpage>&#x2013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2016.12.010</pub-id>, PMID: <pub-id pub-id-type="pmid">28043026</pub-id></citation>
</ref>
<ref id="ref130">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sundar</surname> <given-names>S. S.</given-names></name> <name><surname>Nass</surname> <given-names>C.</given-names></name></person-group> (<year>2000</year>). <article-title>Source orientation in human-computer interaction</article-title>. <source>Commun. Res.</source> <volume>27</volume>, <fpage>683</fpage>&#x2013;<lpage>703</lpage>. doi: <pub-id pub-id-type="doi">10.1177/009365000027006001</pub-id></citation>
</ref>
<ref id="ref131">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>&#x015A;widerska</surname> <given-names>A.</given-names></name> <name><surname>K&#x00FC;ster</surname> <given-names>D.</given-names></name></person-group> (<year>2018</year>). <article-title>Avatars in pain: visible harm enhances mind perception in humans and robots</article-title>. <source>Perception</source> <volume>47</volume>, <fpage>1139</fpage>&#x2013;<lpage>1152</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0301006618809919</pub-id>, PMID: <pub-id pub-id-type="pmid">30411653</pub-id></citation>
</ref>
<ref id="ref132">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ta</surname> <given-names>V. P.</given-names></name> <name><surname>Griffith</surname> <given-names>C.</given-names></name> <name><surname>Boatfield</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Civitello</surname> <given-names>M.</given-names></name> <name><surname>Bader</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>User experiences of social support from companion chatbots in everyday contexts: thematic analysis</article-title>. <source>J. Med. Internet Res.</source> <volume>22</volume>:<fpage>e16235</fpage>. doi: <pub-id pub-id-type="doi">10.2196/16235</pub-id>, PMID: <pub-id pub-id-type="pmid">32141837</pub-id></citation>
</ref>
<ref id="ref133">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tanibe</surname> <given-names>T.</given-names></name> <name><surname>Hashimoto</surname> <given-names>T.</given-names></name> <name><surname>Karasawa</surname> <given-names>K.</given-names></name></person-group> (<year>2017</year>). <article-title>We perceive a mind in a robot when we help it</article-title>. <source>PLoS One</source> <volume>12</volume>:<fpage>e0180952</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0180952</pub-id>, PMID: <pub-id pub-id-type="pmid">28727735</pub-id></citation>
</ref>
<ref id="ref134">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Taylor</surname> <given-names>J.</given-names></name> <name><surname>Weiss</surname> <given-names>S. M.</given-names></name> <name><surname>Marshall</surname> <given-names>P.</given-names></name></person-group> (<year>2020</year>). <article-title>Alexa, how are you feeling today?</article-title> <source>Interact. Stud.</source> <volume>21</volume>, <fpage>329</fpage>&#x2013;<lpage>352</lpage>. doi: <pub-id pub-id-type="doi">10.1075/is.19015.tay</pub-id></citation>
</ref>
<ref id="ref135">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Teubner</surname> <given-names>T.</given-names></name> <name><surname>Adam</surname> <given-names>M. T. P.</given-names></name> <name><surname>Riordan</surname> <given-names>R.</given-names></name></person-group> (<year>2015</year>). <article-title>The impact of computerized agents on immediate emotions, overall arousal and bidding behavior in electronic auctions</article-title>. <source>J. Assoc. Inf. Syst.</source> <volume>16</volume>, <fpage>838</fpage>&#x2013;<lpage>879</lpage>. doi: <pub-id pub-id-type="doi">10.17705/1jais.00412</pub-id></citation>
</ref>
<ref id="ref136">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tharp</surname> <given-names>M.</given-names></name> <name><surname>Holtzman</surname> <given-names>N. S.</given-names></name> <name><surname>Eadeh</surname> <given-names>F. R.</given-names></name></person-group> (<year>2016</year>). <article-title>Mind perception and individual differences: a replication and extension</article-title>. <source>Basic Appl. Soc. Psychol.</source> <volume>39</volume>, <fpage>68</fpage>&#x2013;<lpage>73</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01973533.2016.1256287</pub-id></citation>
</ref>
<ref id="ref137">
<citation citation-type="book"><person-group person-group-type="author">
<name><surname>Tononi</surname> <given-names>G.</given-names></name>
</person-group> (<year>2007</year>). &#x201C;<source>The information integration theory of consciousness</source>,&#x201D; <italic>The Blackwell companion to consciousness</italic>. Eds. M. Velmans and S. Schneider (<publisher-name>Oxford: Blackwell</publisher-name>), <fpage>287</fpage>&#x2013;<lpage>299</lpage>.</citation>
</ref>
<ref id="ref138">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll1">UNESCO</collab>
</person-group> (<year>2020</year>). Artificial intelligence and gender equality: Key findings of UNESCO&#x2019;s Global Dialogue&#x2014;UNESCO Digital Library. Available at: <ext-link xlink:href="https://unesdoc.unesco.org/ark:/48223/pf0000374174" ext-link-type="uri">https://unesdoc.unesco.org/ark:/48223/pf0000374174</ext-link> (Accessed October 13, 2023).</citation>
</ref>
<ref id="ref139">
<citation citation-type="other"><person-group person-group-type="author"><collab id="coll2">UNESCO &#x0026; EQUALS Skills Coalition</collab> <name><surname>Mark</surname> <given-names>W.</given-names></name> <name><surname>Rebecca</surname> <given-names>K.</given-names></name> <name><surname>Chew</surname> <given-names>H. E.</given-names></name></person-group> (<year>2019</year>). I&#x2019;d blush if I could: Closing gender divides in digital skills through education&#x2014;UNESCO Digital Library.</citation>
</ref>
<ref id="ref140">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Velez</surname> <given-names>J. A.</given-names></name> <name><surname>Loof</surname> <given-names>T.</given-names></name> <name><surname>Smith</surname> <given-names>C. A.</given-names></name> <name><surname>Jordan</surname> <given-names>J. M.</given-names></name> <name><surname>Villarreal</surname> <given-names>J. A.</given-names></name> <name><surname>Ewoldsen</surname> <given-names>D. R.</given-names></name></person-group> (<year>2019</year>). <article-title>Switching schemas: do effects of mindless interactions with agents carry over to humans and vice versa?</article-title> <source>J. Comput.-Mediat. Commun.</source>, <volume>24</volume>, <fpage>335</fpage>&#x2013;<lpage>352</lpage>. doi: <pub-id pub-id-type="doi">10.1093/jcmc/zmz016</pub-id></citation>
</ref>
<ref id="ref141">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vogeley</surname> <given-names>K.</given-names></name> <name><surname>Bente</surname> <given-names>G.</given-names></name></person-group> (<year>2010</year>). <article-title>&#x201C;Artificial humans&#x201D;: psychology and neuroscience perspectives on embodiment and nonverbal communication</article-title>. <source>Neural Netw.</source> <volume>23</volume>, <fpage>1077</fpage>&#x2013;<lpage>1090</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neunet.2010.06.003</pub-id>, PMID: <pub-id pub-id-type="pmid">20620019</pub-id></citation>
</ref>
<ref id="ref142">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Von Der P&#x00FC;tten</surname> <given-names>A. M.</given-names></name> <name><surname>Reipen</surname> <given-names>C.</given-names></name> <name><surname>Wiedmann</surname> <given-names>A.</given-names></name> <name><surname>Kopp</surname> <given-names>S.</given-names></name> <name><surname>Kr&#x00E4;mer</surname> <given-names>N. C.</given-names></name></person-group> (<year>2009</year>). &#x201C;<article-title>The impact of different embodied agent-feedback on users &#x0301; behavior</article-title>&#x201D; in <source>Lecture Notes in Computer Science, Eds. Z. Ruttkay, M. Kipp, A. Nijholt, and H. H. Vilhj&#x00E1;lmsson</source>, <fpage>549</fpage>&#x2013;<lpage>551</lpage>.</citation>
</ref>
<ref id="ref143">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Saha</surname> <given-names>K.</given-names></name> <name><surname>Gregori</surname> <given-names>E.</given-names></name> <name><surname>Joyner</surname> <given-names>D.</given-names></name> <name><surname>Goel</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). &#x201C;Towards mutual theory of mind in human-ai interaction: how language reflects what students perceive about a virtual teaching assistant&#x201D; in <italic>Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI '21)</italic>. Association for Computing Machinery, New York, NY, USA, 384, 1&#x2013;14.</citation>
</ref>
<ref id="ref144">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ward</surname> <given-names>A. F.</given-names></name> <name><surname>Olsen</surname> <given-names>A. S.</given-names></name> <name><surname>Wegner</surname> <given-names>D. M.</given-names></name></person-group> (<year>2013</year>). <article-title>The harm-made mind: observing victimization augments attribution of minds to vegetative patients, robots, and the dead</article-title>. <source>Psychol. Sci.</source> <volume>24</volume>, <fpage>1437</fpage>&#x2013;<lpage>1445</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797612472343</pub-id>, PMID: <pub-id pub-id-type="pmid">23749051</pub-id></citation>
</ref>
<ref id="ref145">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Waytz</surname> <given-names>A.</given-names></name> <name><surname>Cacioppo</surname> <given-names>J.</given-names></name> <name><surname>Epley</surname> <given-names>N.</given-names></name></person-group> (<year>2010</year>). <article-title>Who sees human?: the stability and importance of individual differences in anthropomorphism</article-title>. <source>Perspect. Psychol. Sci.</source> <volume>5</volume>, <fpage>219</fpage>&#x2013;<lpage>232</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1745691610369336</pub-id>, PMID: <pub-id pub-id-type="pmid">24839457</pub-id></citation>
</ref>
<ref id="ref9005">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wimmer</surname> <given-names>H.</given-names></name> <name><surname>Perner</surname> <given-names>J.</given-names></name></person-group> (<year>1983</year>). <article-title>Beliefs about beliefs: Representation and constraining function of wrong beliefs in young children&#x2019;s understanding of deception</article-title>. <source>Cognition</source>, <volume>13</volume>:<fpage>103</fpage>&#x2013;<lpage>128</lpage>.</citation>
</ref>
<ref id="ref146">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilkenfeld</surname> <given-names>J. N.</given-names></name> <name><surname>Yan</surname> <given-names>B.</given-names></name> <name><surname>Huang</surname> <given-names>J.</given-names></name> <name><surname>Luo</surname> <given-names>G.</given-names></name> <name><surname>Algas</surname> <given-names>K.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x201C;AI love you&#x201D;: linguistic convergence in human-chatbot relationship development</article-title>. <source>Academy of Management Proceedings</source>, <volume>17063</volume>. doi: <pub-id pub-id-type="doi">10.5465/AMBPP.2022.17063abstract</pub-id></citation>
</ref>
<ref id="ref147">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Worchel</surname> <given-names>P.</given-names></name>
</person-group> (<year>1957</year>). <article-title>Catharsis and the relief of hostility</article-title>. <source>J. Abnorm. Soc. Psychol.</source> <volume>55</volume>, <fpage>238</fpage>&#x2013;<lpage>243</lpage>. doi: <pub-id pub-id-type="doi">10.1037/h0042557</pub-id>, PMID: <pub-id pub-id-type="pmid">13474894</pub-id></citation>
</ref>
<ref id="ref148">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Xie</surname> <given-names>T.</given-names></name> <name><surname>Pentina</surname> <given-names>I.</given-names></name></person-group> (<year>2022</year>). &#x201C;Attachment theory as a framework to understand relationships with social Chatbots: a case study of Replika&#x201D; in <italic>Proceedings of the 55th Annual Hawaii International Conference on System Sciences</italic>.</citation>
</ref>
<ref id="ref149">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Yampolskiy</surname> <given-names>R. V.</given-names></name>
</person-group> (<year>2018</year>). <article-title>Artificial consciousness: an illusionary solution to the hard problem</article-title>. <source>Reti Saperi Linguag.</source> <volume>2</volume>, <fpage>287</fpage>&#x2013;<lpage>318</lpage>. doi: <pub-id pub-id-type="doi">10.12832/92302</pub-id></citation>
</ref>
<ref id="ref150">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Young</surname> <given-names>A. D.</given-names></name> <name><surname>Monroe</surname> <given-names>A. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Autonomous morals: inferences of mind predict acceptance of AI behavior in sacrificial moral dilemmas</article-title>. <source>J. Exp. Soc. Psychol.</source> <volume>85</volume>:<fpage>103870</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jesp.2019.103870</pub-id></citation>
</ref>
<ref id="ref151">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhan</surname> <given-names>J.</given-names></name> <name><surname>Yu</surname> <given-names>S.</given-names></name> <name><surname>Cai</surname> <given-names>R.</given-names></name> <name><surname>Xu</surname> <given-names>H.</given-names></name> <name><surname>Yang</surname> <given-names>Y.</given-names></name> <name><surname>Ren</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>The effects of written catharsis on anger relief</article-title>. <source>PsyCh J.</source> <volume>10</volume>, <fpage>868</fpage>&#x2013;<lpage>877</lpage>. doi: <pub-id pub-id-type="doi">10.1002/pchj.490</pub-id>, PMID: <pub-id pub-id-type="pmid">34636166</pub-id></citation>
</ref>
<ref id="ref9002">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Fei</surname> <given-names>Z.</given-names></name> <name><surname>He</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>How Human&#x2013;Chatbot Interaction Impairs Charitable Giving: The Role of Moral Judgment</article-title>. <source>Journal of Business Ethics</source>, <volume>178</volume>, <fpage>849</fpage>&#x2013;<lpage>865</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10551-022-05045-w</pub-id>, PMID: <pub-id pub-id-type="pmid">34636166</pub-id></citation>
</ref>
</ref-list>
</back>
</article>