<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">653537</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2021.653537</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>I Am Looking for Your Mind: Pupil Dilation Predicts Individual Differences in Sensitivity to Hints of Human-Likeness in Robot Behavior</article-title>
<alt-title alt-title-type="left-running-head">Marchesi et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">I Am Looking for Your Mind.</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Marchesi</surname>
<given-names>Serena</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/486151/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Bossi</surname>
<given-names>Francesco</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/246570/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ghiglino</surname>
<given-names>Davide</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/578132/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>De Tommaso</surname>
<given-names>Davide</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/625809/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Wykowska</surname>
<given-names>Agnieszka</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/52884/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Social Cognition in Human-Robot Interaction, Istituto Italiano di Tecnologia, <addr-line>Genova</addr-line>, <country>Italy</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>Department of Computer Science, Faculty of Science and Engineering, Manchester University, <addr-line>Manchester</addr-line>, <country>United&#x20;Kingdom</country>
</aff>
<aff id="aff3">
<label>
<sup>3</sup>
</label>IMT School for Advanced Studies, <addr-line>Lucca</addr-line>, <country>Italy</country>
</aff>
<aff id="aff4">
<label>
<sup>4</sup>
</label>Dipartimento di Informatica, Bioingegneria, Robotica e Ingegneria dei Sistemi, Universit&#xe0; di Genova, <addr-line>Genova</addr-line>, <country>Italy</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3743/overview">Michela Balconi</ext-link>, Catholic University of the Sacred Heart, Italy</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/63824/overview">Davide Crivelli</ext-link>, Catholic University of the Sacred Heart, Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/425311/overview">Laura Fiorini</ext-link>, University of Florence, Italy</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Agnieszka Wykowska, <email>Agnieszka.Wykowska@iit.it</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Human-Robot Interaction, a section of the journal Frontiers in Robotics and&#x20;AI</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>18</day>
<month>06</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>8</volume>
<elocation-id>653537</elocation-id>
<history>
<date date-type="received">
<day>14</day>
<month>01</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>05</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Marchesi, Bossi, Ghiglino, De Tommaso and Wykowska.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Marchesi, Bossi, Ghiglino, De Tommaso and Wykowska</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>The presence of artificial agents in our everyday lives is continuously increasing. Hence, the question of how human social cognition mechanisms are activated in interactions with artificial agents, such as humanoid robots, is frequently being asked. One interesting question is whether humans perceive humanoid robots as mere artifacts (interpreting their behavior with reference to their function, thereby adopting the design stance) or as intentional agents (interpreting their behavior with reference to mental states, thereby adopting the intentional stance). Due to their humanlike appearance, humanoid robots might be capable of evoking the intentional stance. On the other hand, the knowledge that humanoid robots are only artifacts should call for adopting the design stance. Thus, observing a humanoid robot might evoke a cognitive conflict between the natural tendency of adopting the intentional stance and the knowledge about the actual nature of robots, which should elicit the design stance. In the present study, we investigated the cognitive conflict hypothesis by measuring participants&#x2019; pupil dilation during the completion of the InStance Test. Prior to each pupillary recording, participants were instructed to observe the humanoid robot iCub behaving in two different ways (either machine-like or humanlike behavior). Results showed that pupil dilation and response time patterns were predictive of individual biases in the adoption of the intentional or design stance in the IST. These results may suggest individual differences in mental effort and cognitive flexibility in reading and interpreting the behavior of an artificial&#x20;agent.</p>
</abstract>
<kwd-group>
<kwd>intentional stance</kwd>
<kwd>human&#x2013;robot interaction</kwd>
<kwd>pupil dilation</kwd>
<kwd>individual differences</kwd>
<kwd>human-likeness</kwd>
</kwd-group>
<contract-sponsor id="cn001">European Research Council<named-content content-type="fundref-id">10.13039/501100000781</named-content>
</contract-sponsor>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Introduction</title>
<p>Artificial agents are becoming increasingly present in our daily environment. From vocal assistants to humanoid robots, we are observing a change in the role played by these new entities in our lives (<xref ref-type="bibr" rid="B47">Samani et&#x20;al., 2013</xref>). However, it is still a matter of debate as to whether humans perceive embodied artificial agents, such as humanoid robots, as social and intentional agents or simple artifacts (<xref ref-type="bibr" rid="B25">Hortensius and Cross, 2018</xref>; <xref ref-type="bibr" rid="B56">Wykowska et&#x20;al., 2016</xref>). Several researchers have investigated whether humans would deploy similar sociocognitive mechanisms when presented with a novel type of (artificial) interaction partner (i.e.,&#x20;humanoid robots) as they would activate in an interaction with another human (<xref ref-type="bibr" rid="B48">Saygin et&#x20;al., 2012</xref>; <xref ref-type="bibr" rid="B9">Cross et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B57">Wykowska, 2020</xref>).</p>
<p>In this article, we report a study in which we investigated whether robot behavior&#x2014;by being humanlike or mechanistic&#x2014;can modulate the likelihood of people adopting the intentional stance (<xref ref-type="bibr" rid="B12">Dennett, 1971</xref>). The study also addressed the question of whether pupil dilation&#x2014;a marker of cognitive effort&#x2014;can predict the type of stance people would adopt toward the robots, and how all these factors are related to individual &#x201c;mentalistically inclined&#x201d; or &#x201c;mechanistically inclined&#x201d; biases.</p>
<p>According to <xref ref-type="bibr" rid="B12">Dennett (1971)</xref>, the <italic>intentional stance</italic> is a strategy that humans spontaneously adopt to interpret and predict the behavior of other humans, referring to the underpinning mental states (i.e.,&#x20;desires, intentions, and beliefs). The intentional stance is an efficient and flexible strategy, as it allows individuals to promptly interpret and predict others&#x2019; behavior. However, when interacting with nonbiological systems, humans might adopt a different strategy, which Dennett describes as the <italic>design stance</italic>. According to the author, we deploy this strategy when explaining a system&#x2019;s behavior based on the way it is designed to function. The intuition behind Dennett&#x2019;s definition is that humans would adopt the stance that allows them to predict and interpret the behavior of a system in the most efficient way. Thus, the adoption of either stance is not predefined; on the contrary, if the adopted stance is revealed as inefficient, one can switch to the other stance.</p>
<p>Several authors have demonstrated that people tend to spontaneously adopt the intentional stance toward other human and nonhuman agents (<xref ref-type="bibr" rid="B1">Abu-Akel et&#x20;al., 2020</xref>; <xref ref-type="bibr" rid="B21">Happ&#xe9; and Frith, 1995</xref>; <xref ref-type="bibr" rid="B22">Heider and Simmel, 1944</xref>; <xref ref-type="bibr" rid="B58">Zwickel, 2009</xref>; see also <xref ref-type="bibr" rid="B42">Perez-Osorio and Wykowska, 2019a</xref> and <xref ref-type="bibr" rid="B49">Schellen &#x0026; Wykowska (2019)</xref> for a review). However, it is not yet entirely clear which of the two aforementioned stances humans would adopt when interacting with humanoid robots. On the one hand, humanoid robots present humanlike characteristics, such as physical appearance (<xref ref-type="bibr" rid="B15">Fink, 2012</xref>). Hence, it is possible that these characteristics elicit representations and heuristics similar to those that we rely on when interacting with humans (<xref ref-type="bibr" rid="B2">Airenti, 2018</xref>; <xref ref-type="bibr" rid="B10">Dacey, 2017</xref>; <xref ref-type="bibr" rid="B54">Waytz et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B59">Z&#x142;otowski et&#x20;al., 2015</xref>). This might trigger the neural representations related to the adoption of the intentional stance (<xref ref-type="bibr" rid="B6">Chaminade et&#x20;al., 2012</xref>; <xref ref-type="bibr" rid="B16">Gallagher at al., 2002</xref>; <xref ref-type="bibr" rid="B38">Ozdem et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B51">Spunt et&#x20;al., 2015</xref>). Indeed, the presence of humanlike characteristics is one of the key factors that, according to <xref ref-type="bibr" rid="B14">Epley et&#x20;al., 2007</xref>, contribute to anthropomorphism toward artificial agents, facilitating the adoption of the intentional stance. On the other hand, humanoid robots are man-made artifacts, and therefore, they might evoke the adoption of the design stance, as they can be perceived simply as machines (<xref ref-type="bibr" rid="B55">Wiese et&#x20;al., 2017</xref>).</p>
<p>Recent literature has addressed the issue of adopting the intentional stance toward robots. For example, <xref ref-type="bibr" rid="B52">Thellman et&#x20;al., 2017</xref> presented a series of images and explicitly asked their participants to rate the perceived intentionality of the depicted agent (either a human or a humanoid robotic agent). The authors reported that participants perceived similar levels of intentionality behind the behavior of the human and the robot agents. <xref ref-type="bibr" rid="B17">Marchesi et&#x20;al. (2019)</xref> investigated the attribution of intentionality to humanoid robots, developing a novel tool, the InStance Test (IST). The IST consists of a series of pictorial &#x201c;scenarios&#x201d; that depict the humanoid robot iCub (<xref ref-type="bibr" rid="B37">Metta et&#x20;al., 2010</xref>) involved in several activities. In <xref ref-type="bibr" rid="B17">Marchesi et&#x20;al. (2019)</xref>, participants were asked to choose between mentalistic and mechanistic descriptions of the scenarios. Interestingly, individuals differed with respect to the likelihood of choosing one or the other explanation. Such individual bias in adopting one or the other stance toward humanoid robots called for examining whether it is possible to identify its physiological correlates. In fact, <xref ref-type="bibr" rid="B4">Bossi et&#x20;al. (2020)</xref> examined whether it is possible to relate individual participants&#x2019; EEG activity in the resting state with the individual likelihood of adopting the intentional or design stance in the IST. The authors found that resting-state beta activity differentiated people with respect to the likelihood of adopting either the intentional or the design stance toward the humanoid robot iCub. Recently, <xref ref-type="bibr" rid="B34">Marchesi et&#x20;al. (2021)</xref> have identified a dissociation between participants&#x2019; response time and the stance adopted toward either a human or a humanoid robot. Moreover, the individual bias emerged as being linked to participants&#x2019; individual tendency to anthropomorphize nonhuman agents.</p>
<p>Since the literature presents evidence for various individual tendencies to adopt either the design or the intentional stance, in the present study, we aimed at using pupil dilation as a marker of individual bias and cognitive effort invested in the task of describing a robot&#x27;s behavior, by adopting either stance. In addition, we were interested in finding out whether observing different types of robot behavior (humanlike or mechanistic) would have an impact on adopting the two different stances, taking into account individual biases.</p>
<sec id="s1-1">
<title>Pupillometry as an Index of Cognitive Activity</title>
<p>We focused on pupil dilation, as pupillary response is a reliable psychophysiological measure of changes in cognitive activity (for a review, see <xref ref-type="bibr" rid="B31">Larsen and Waters, 2018</xref>; <xref ref-type="bibr" rid="B35">Math&#xf4;t, 2018</xref>). Literature reports show that the pupils dilate in response to various cognitive activities. Previous studies have investigated the mechanisms underpinning pupil dilation, such as emotional and cognitive arousal (how much activation a stimulus can elicit) and cognitive load (the mental effort put into a task) (<xref ref-type="bibr" rid="B31">Larsen and Waters, 2018</xref>; <xref ref-type="bibr" rid="B35">Math&#xf4;t, 2018</xref>). <xref ref-type="bibr" rid="B11">de Gee et&#x20;al., 2014</xref> reported that, in a visual detection task, pupil dilation was greater for participants with a tendency to stick to their decisional strategy (defined as &#x201c;conservative participants&#x201d;) who made a decision not in line with their individual bias in the task. This result shows that pupil dilation can be considered as a marker of conflict between participants&#x2019; individual bias and the decision they take. Moreover, it has been shown that the variation in pupil size is linked to the activity in the locus coeruleus (<xref ref-type="bibr" rid="B26">Jackson et&#x20;al., 2009</xref>) and to the noradrenergic modulation (<xref ref-type="bibr" rid="B31">Larsen and Waters, 2018</xref>), and thus, greater pupil size can be considered as an indicator of general arousal and allocation of attentional resources. Other studies have used pupil dilation as an indicator of cognitive load and mental effort. For example, <xref ref-type="bibr" rid="B23">Hess and Polt (1964)</xref> reported that pupil dilation is closely correlated with problem-solving processes: the more difficult the problem, the greater the pupil size. Moreover, the recent literature (<xref ref-type="bibr" rid="B40">Pasquali et&#x20;al., 2021</xref>; <xref ref-type="bibr" rid="B39">Pasquali et&#x20;al., 2020</xref>) assessed the use of pupillometry in real and ecological scenarios where participants interacted with the iCub robot. The authors show that pupillometry can be a reliable measure to investigate cognitive load in the context of human&#x2013;robot interaction. Overall, these studies provide evidence that pupillometry is an adequate method to study individual tendencies and how they are related to resources allocated to a cognitively demanding task (for a comprehensive review, see also <xref ref-type="bibr" rid="B35">Math&#xf4;t, 2018</xref>). Here, we consider pupil dilation as a measure of cognitive effort related to the activation of one or the other stance in the context of one&#x2019;s individual biases.</p>
</sec>
<sec id="s1-2">
<title>Aims of the Study</title>
<p>The aims of the present study were to 1) examine whether observing an embodied humanoid robot exhibiting two different behaviors (a humanlike behavior and a machine-like behavior) would modulate participants&#x2019; individual bias in adopting the intentional or the design stance (assessed with the IST) and 2) explore whether this modulation would be reflected in participants&#x2019; pupil dilation, which is considered as a measure of cognitive effort. More specifically, we explored whether observing a humanoid robot behaving either congruently or incongruently with respect to participants&#x2019; individual tendency to adopt the intentional stance would lead them to experience different levels of cognitive effort in the InStance Test. That is because we expected participants to experience an increase in cognitive effort due to the dissonance between their individual tendency in interpreting the behavior of a humanoid robot and the need for integrating the representation of the observed behavior manifested by the embodied&#x20;robot.</p>
</sec>
</sec>
<sec sec-type="materials|methods" id="s2">
<title>Materials and Methods</title>
<sec id="s2-1">
<title>Participants</title>
<p>Forty-two participants were recruited from a mailing list for this experiment (mean age: 24.05, SD: 3.73, females: 24) in return for a payment of 15&#x20ac;. All participants self-reported normal or corrected-to-normal vision. The study was approved by the local Ethical Committee (Comitato Etico Regione Liguria) and was conducted in accordance with the Code of Ethics of the World Medical Association (Declaration of Helsinki). Each participant provided written informed consent before taking part in the experiment. All participants were na&#xef;ve to the purpose of this experiment and were debriefed upon completion. Five participants were excluded from data analysis, due to technical problems occurring during the recording phase. Three participants were excluded due to insufficient amount of valid pupil data (&#x3c;60%). A total of 34 participants were included in the data analysis.</p>
</sec>
<sec id="s2-2">
<title>Pupil-Recording Apparatus, Materials, and Procedure</title>
<p>In a within-subject design, participants first attended, in a dimly lit room, the robot observation session, where they were positioned in front of the embodied iCub and observed it exhibiting a humanlike or a machine-like behavior. Right after this session, the participants were led to a different room (dimly lit) where they were instructed to sit down and position their head on a chinrest. They were then presented with the IST. The procedure would then be repeated for the second behavior of the robot. Choosing a within-participants design, and exposing participants to both behaviors of the robot, allows for a higher control of their previous knowledge and experience related to the iCub&#x20;robot.</p>
<p>Items from the IST were presented on a 22-&#x2033; LCD screen (resolution: 1,680 &#xd7; 1,050). A chinrest was mounted at the edge of the table, at a horizontal distance of 62&#xa0;cm from the screen. The monocular (left eye) pupil signal was recorded using a screen-mounted SMI RED500 eyetracker (sampling rate of 500&#xa0;Hz). The dim illumination of the room was kept constant through the whole duration of the experimental sessions. The IST items were displayed through Opensesame 3.2.8 (<xref ref-type="bibr" rid="B36">Math&#xf4;t et&#x20;al., 2012</xref>).</p>
<sec id="s2-2-1">
<title>Robot Behavior</title>
<p>Before taking part in the IST, the participants were asked to observe the embodied iCub robot, which was programmed to behave as if it was playing a solitaire card game on a laptop positioned in front of it. From time to time, the robot was turning its head toward a second monitor, located on its left side, in the periphery. On this lateral monitor, a sequence of videos was played for the entire duration of this session. The behaviors displayed by the robot, in terms of eye and head movements, were manipulated between two experimental conditions. One condition involved the robot displaying a humanlike behavior, which was a replica of the behavior recorded in a previous attentional capture experiment from a human participant (detailed description of the robot behaviors is beyond the scope of this article; for details, see <xref ref-type="bibr" rid="B19">Ghiglino et&#x20;al., 2018</xref>). It is important to point out that the behavior displayed by the robot in this condition fully embodied the variability and the unpredictability of the behavior displayed by the human when the recording was first made. As a contrast condition, we programmed the robot to display another behavior, which was extremely stereotypical and predictable, defined as &#x201c;machine-like&#x201d; behavior. While the &#x201c;humanlike&#x201d; behavior consisted of several patterns of neck and eye movements, the &#x201c;machine-like&#x201d; behavior consisted of just one pattern of neck and eye movements. In other words, the &#x201c;machine-like&#x201d; behavior was generated in order to display no variability at all. The order of presentation of these two behaviors was counterbalanced across participants.</p>
</sec>
<sec id="s2-2-2">
<title>InStance Test Stimuli and Task</title>
<p>After the observation session, the participants performed a 9-point calibration, and they were then presented with the IST (<xref ref-type="bibr" rid="B4">Bossi et&#x20;al., 2020</xref>; <xref ref-type="bibr" rid="B17">Marchesi et&#x20;al., 2019</xref>; <xref ref-type="fig" rid="F1">Figure&#x20;1</xref>). The instructions in each trial were as follows: (i) first, look freely at the baseline image (1,000&#xa0;ms), (ii) freely explore the presented item (5,000&#xa0;ms), (iii) listen to the two sentences (5,000&#xa0;ms Sentence A and 5,000&#xa0;ms Sentence B), and finally, (iv) choose the description that you think better explains the presented scenario by moving a cursor on a slider (until click) (<xref ref-type="fig" rid="F2">Figure&#x20;2</xref>). The presentation order of mechanistic and mentalistic sentences was counterbalanced. Presentation of items was randomized. The IST was split into two subsets<xref ref-type="fn" rid="FN1">
<sup>1</sup>
</xref> of items, with half (one subset, 17 items) presented after one observation session and the other half (17 items) after the second observation session (the order of presentation of the subsets was counterbalanced). An example of the mentalistic sentences is &#x201c;iCub pretends to be gardener&#x201d;; an example of a mechanistic sentence is &#x201c;iCub adjusts the force to the weight of the object&#x201d; (<xref ref-type="fig" rid="F2">Figure&#x20;2</xref>). The complete list of mechanistic and mentalistic sentences, associated with the corresponding scenarios, is reported in <xref ref-type="bibr" rid="B17">Marchesi et&#x20;al. (2019)</xref> Supplementary Materials.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Exemplification of the IST items with exemplification of Sentence A and Sentence B (Marchesi et&#x20;al., 2019).</p>
</caption>
<graphic xlink:href="frobt-08-653537-g001.tif"/>
</fig>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Experimental time&#x20;line.</p>
</caption>
<graphic xlink:href="frobt-08-653537-g002.tif"/>
</fig>
<p>To avoid eye movements related to the reading process, for each scenario, the two descriptions were presented auditorily through headphones (similarly to the procedure adapted for EEG, <xref ref-type="bibr" rid="B4">Bossi et&#x20;al., 2020</xref>). Moreover, to allow a reliable baseline correction, we created a luminance-related baseline version of each scenario using MATLAB function Randblock (<ext-link ext-link-type="uri" xlink:href="https://it.mathworks.com/matlabcentral/fileexchange/17981-randblock">https://it.mathworks.com/matlabcentral/fileexchange/17981-randblock</ext-link>). This function allowed us to create a scrambled version of each item scenario with randomized blocks of pixel positions. The scrambled items were used as specific baselines for each corresponding scenario. This process was necessary to control the different luminance levels of each&#x20;item.</p>
</sec>
</sec>
<sec id="s2-3">
<title>Pupil Data Preprocessing</title>
<p>All data were preprocessed (and analyzed) using R (version 3.4.0, available at <ext-link ext-link-type="uri" xlink:href="http://www.rproject.org">http://www.rproject.org</ext-link>) and an open-source MATLAB (The Mathworks, Natick, MA, United&#x20;States) toolbox provided by <xref ref-type="bibr" rid="B29">Kret and Sjak-Shie (2019)</xref>. To clean and preprocess the data, we followed the pipeline proposed by Kret &#x26; Sjak-Shie: 1) first, we converted the eyetracker data to the standard format used by Kret &#x26; Sjak-Shie&#x2019;s MATLAB toolbox. Since we were interested in exploring how pupil dilation could predict participants&#x2019; choice in the IST, we decided to take the duration of each sentence as our time window of interest. Thus, data were segmented and preprocessed separately for the selected time windows. By applying this procedure, we reduced the probability that the pupil dilation signal would be biased by the preprocessing procedure (<xref ref-type="bibr" rid="B44">Proch&#xe1;zka et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B60">Math&#xf4;t et&#x20;al., 2018</xref>). In this dataset, we included information relevant to the pupil diameter, start/end time stamps of each segment, and validity of the data point, in separate columns. 2) We filtered dilation speed outliers, trend-deviation outliers, and samples that were temporally isolated, applying the parameters described by <xref ref-type="bibr" rid="B29">Kret and Sjak-Shie (2019)</xref>. In greater detail, in order to mitigate possible gaps due to nonuniform sampling, dilation speed data were normalized following the formula below:<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:msup>
<mml:mtext>d</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mtext>i</mml:mtext>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>max</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
<mml:mo>,</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>&#x7c;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mo>]</mml:mo>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x7c;</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>.</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf1">
<mml:math id="m2">
<mml:msup>
<mml:mrow>
<mml:msup>
<mml:mtext>d</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mo>[</mml:mo>
<mml:mtext>i</mml:mtext>
<mml:mo>]</mml:mo>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> indicates the dilation speed at each sample, d[i] indicates the pupil size series, and t[i] indicates the corresponding time stamp. Dilation speed outliers were then identified using the median absolute deviation (MAD, <xref ref-type="bibr" rid="B32">Leys et&#x20;al., 2013</xref>). MAD is a robust metric of dispersion, resilient to outliers. Samples within 50&#xa0;ms of gaps were rejected; contiguous missing data sections larger than 75&#xa0;ms were identified as gaps. The MAD metric was applied to identify absolute trend-line outliers. 3) We interpolated and smoothened the signal using a zero-phase low-pass filter with a cutoff of 4Hz (<xref ref-type="bibr" rid="B26">Jackson et&#x20;al., 2009</xref>). After having applied the pipeline described above, data were baseline-corrected by subtracting the mean pupil size during the baseline phase from the mean pupil size in our time of interest (ToI), and dividing by the mean pupil size during the baseline (<xref ref-type="bibr" rid="B43">Preuschoff et&#x20;al., 2011</xref>).<disp-formula id="e2">
<mml:math id="m3">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>z</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>I</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mi>b</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>p</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>z</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mi>b</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>p</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>z</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>.</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>This process allows a clean comparison of the resulting percentage of pupillary change relative to the baseline.</p>
</sec>
<sec id="s2-4">
<title>Sample Split and Dichotomization of the IST Response</title>
<p>In line with <xref ref-type="bibr" rid="B4">Bossi et&#x20;al. (2020)</xref>, in order to investigate individual biases, participants were grouped by their average individual InStance Score (ISS, the overall score across both robot behavior conditions): mentalistically biased people (&#x3e;0.5 SD over the mean score, <italic>N</italic>&#x20;&#x3d; 12, average ISS for this group: 62.25, SD: 7.64) and mechanistically biased people (&#x3c;-0.5 SD below the mean score, <italic>N</italic>&#x20;&#x3d; 9, average ISS for this group: 28.23, SD: 5.66). People who were not clearly over or under the cutoff value (&#x2212;0.5 &#x3c; score &#x3c; 0.5 SD, <italic>N</italic>&#x20;&#x3d; 13, average ISS for this group: 44.90, SD: 4) were considered as the &#x201c;unbiased&#x201d; group. Moreover, to be able to investigate participants&#x2019; stance in the IST (mentalistic vs. mechanistic), we considered the type of selected sentence (by considering as mechanistic a score &#x3c;50 and mentalistic a score &#x3e;50) as the attributed explanation to the item (from here on, defined as &#x201c;Attribution&#x201d;), leading to a binomial distribution. Although this practice could lead to a considerable loss of information, it allowed for a higher control of the interindividual variability present in the raw IST scores that could bias the overall mean&#x20;score.</p>
</sec>
<sec id="s2-5">
<title>Data Analysis: Pipeline Applied for (Generalized) Linear Mixed-Effects Models</title>
<p>Data analysis was conducted on the mean pupil size (baseline-corrected) for the time windows of interest (Sentence A and Sentence B time periods) using linear (or generalized linear where needed) mixed-effects models (<xref ref-type="bibr" rid="B3">Bates et&#x20;al., 2015</xref>). When it comes to linear mixed-effects models (LMMs) or generalized linear mixed-effects models (GLMMs), it is important to specify the pipeline that was followed to create the models. (i) First, we included all the fixed effects that allowed the model to converge. (ii) We included random effects that presented a low correlation value (&#x7c;r&#x7c; &#x3c; 0.80) with other random effects, to avoid overfitting. In all our models, Participant was included as a random effect. (iii) The significance level of the effects for the LMM was estimated using the Satterthwaite approximation for degrees of freedom, while for the GLMM, we performed a comparison with the corresponding null model (likelihood ratio tests, LRTs). Since time series analyses were not planned, autocorrelation of factors was not modeled. Detailed parameters for each model are reported in the Supplementary Materials.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<p>In line with <xref ref-type="bibr" rid="B17">Marchesi et&#x20;al. (2019)</xref>, the score in the InStance Test was calculated ranging on a scale from 0 (extreme mechanistic value) to 100 (extreme mentalistic value). In order to obtain the average InStance Score (ISS) per participant, the scores across single scenarios were averaged. Before performing any preprocessing, the overall average score at the InStance Test after observing the mechanistic behavior was 43.80, with SD: 17.69, and the overall average score after observing the humanlike behavior was 43.44, with SD: 18.03 [t(65.97) &#x3d; &#x2013;0.08, <italic>p</italic>&#x20;&#x3d; 0.934]; thus, the type of robot behavior that participants observed did not modulate the ISS. The overall sample average score at the InStance Test was 43.62, SD:&#x20;17.26.</p>
<p>As in the study by <xref ref-type="bibr" rid="B4">Bossi et&#x20;al. (2020)</xref>, given that our focus was the individual bias at the IST, in the present section, we will report the results from the mechanistically and mentalistically biased participants, leading to an overall total sample of <italic>N</italic>&#x20;&#x3d; 21 participants. Results on the very same models involving unbiased participants as well are reported in the Supplementary Materials (overall <italic>N</italic>&#x20;&#x3d; 34 participants).</p>
<sec id="s3-1">
<title>InStance Test Individual Attribution and Pupil Size</title>
<p>The first model (GLMM) aimed at investigating the relationship between pupil size and participants&#x2019; attribution at the IST. Our fixed effects were as follows: 1) the mean pupil size, 2) robot behavior previously observed, and 3) participants&#x2019; general bias at the IST, while we considered the selected attribution as the dependent variable. Because of this, the distribution of the GLMM is binomial.</p>
<p>The main effect of RobotBehavior emerged as statistically significant (b &#x3d; &#x2212;0.537, model comparison: &#x3c7;<sup>2</sup> (1) &#x3d; 24.286, <italic>p</italic>&#x20;&#x3d; &#x3c;0.001). Results showed that participants chose more often an attribution congruent with the behavior previously observed on the robot (more mechanistic attribution after watching machine-like behavior and <italic>vice versa</italic>) (<xref ref-type="fig" rid="F3">Figure&#x20;3</xref>).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>GLMM: boxplot showing the statistically significant effect of RobotBehaviour &#x2a; Bias on attribution, with extreme values as predicted by the&#x20;model.</p>
</caption>
<graphic xlink:href="frobt-08-653537-g003.tif"/>
</fig>
<p>The interaction effect between RobotBehaviour &#x2a; mean pupil size was statistically significant as well (b &#x3d; &#x2212;9.291, model comparison: &#x3c7;<sup>2</sup> (1) &#x3d; 9.355, <italic>p</italic>&#x20;&#x3d; 0.002). Although the three-way interaction between RobotBehaviour&#x2a;mean pupil size &#x2a; individual bias was significant only when taking into account the Unbiased group (see Supplementary Materials), our main <italic>a priori</italic> hypotheses aimed at exploring differences due to participants&#x2019; individual bias in the IST. Therefore, we performed a planned comparison GLMM for each bias group (<xref ref-type="bibr" rid="B53">Tucker, 1990</xref>; <xref ref-type="bibr" rid="B30">Kuehne, 1993</xref>; <xref ref-type="bibr" rid="B46">Ruxton and Beuchamp, 2008</xref>) to test the interaction between RobotBehaviour &#x2a; mean pupil size: mechanistic group (model comparison: &#x3c7;<sup>2</sup> (1) &#x3d; 7.701&#x20;<italic>p</italic>&#x20;&#x3d; 0.005); mentalistic group (model comparison: &#x3c7;<sup>2</sup> (1) &#x3d; 3.001, <italic>p</italic>&#x20;&#x3d;0 .083). These results show that mechanistically biased participants showed a greater pupil dilation for attributions congruent with the robot behavior (b &#x3d; &#x2212;9.28, z &#x3d; &#x2212;2.757, <italic>p</italic>&#x20;&#x3d;0.005, <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>) when attributing a mechanistic description after the observation of the robot behaving in a machine-like way and when attributing a mentalistic score after the observation of the robot behaving in a humanlike way. On the other hand, mentalistically biased participants showed a tendency, although statistically not significant, toward greater pupil sizes for mentalistic attributions, relative to mechanistic attributions, regardless of the robot behavior (b &#x3d; &#x2212;4.45, z &#x3d; &#x2212;1.73, <italic>p</italic>&#x20;&#x3d; 0.083, <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>GLMM on the mechanistic group (<italic>N</italic>&#x20;&#x3d; 9) and the mentalistic group (<italic>N</italic>&#x20;&#x3d; 12). The mechanistic bias group shows the interaction effect between attribution and mean pupil size. No statistically significant effect on attribution and pupil size in the mentalistic bias&#x20;group.</p>
</caption>
<graphic xlink:href="frobt-08-653537-g004.tif"/>
</fig>
</sec>
<sec id="s3-2">
<title>Behavioral Data Analysis</title>
<p>In order to investigate the relationship between behavioral data and participants&#x2019; response times, we tested the quadratic effect of the z-transformed IST score (included as the fixed factor) on log-transformed response times (our dependent variable), as we expected them to be smaller in the extremes of the score distribution of the IST. Results showed a statistically significant quadratic effect of the IST score [b &#x3d; &#x2212;0.146, t (1,419.99) &#x3d; &#x2212;9.737, <italic>p</italic>&#x20;&#x3d; &#x3c;0.001] (<xref ref-type="fig" rid="F5">Figure&#x20;5</xref>). These results show that participants were overall faster when scoring on the extremes of the IST&#x20;scale.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>LMM: statistically significant quadratic effect of the IST-z score on log-transformed response time showing faster RTs for extreme scores.</p>
</caption>
<graphic xlink:href="frobt-08-653537-g005.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>In the present study, we investigated whether adopting the intentional/design stance could be predicted by changes in pupil dilation and how both effects are modulated by participants&#x2019; individual bias in adopting the intentional stance and by a behavior of a robot observed prior to the test. To address these aims, we conducted an experiment in which participants first observed the embodied humanoid robot iCub, programmed to behave as if it was playing solitaire on a laptop positioned in front of it. From time to time, the robot was programmed to turn its head toward a second monitor on its left periphery, where a sequence of videos was being played. The behaviors exhibited by the robot were manipulated in a within-subjects design: in one condition, the robot exhibited a humanlike behavior, and in the second condition, the robot exhibited a machine-like behavior. After each session with the robot, participants&#x2019; pupil data were recorded while they completed the InStance Test. Participants were then divided into two groups, based on the bias showed by their IST score: a mentalistically biased group and a mechanistically biased&#x20;group.</p>
<p>We found that both mechanistically and mentalistically biased participants leaned more toward mentalistic attributions in the IST after observing the robot&#x2019;s humanlike behavior, as compared to the mechanistic behavior. This shows that participants had some sensitivity to the subtle differences in the robot behavior, thereby attributing more &#x201c;humanness&#x201d; to the humanlike behavior, independently of their initial bias (<xref ref-type="bibr" rid="B18">Ghiglino et&#x20;al., 2020b</xref>).</p>
<p>We also explored the relationship between the individual bias and the changes in pupil dilation as a function of the behaviors displayed by the robot. We found that the two groups showed different patterns. On the one hand, for mechanistically biased people, pupil dilation was greater when they chose descriptions of the robot behavior in terms that were &#x201c;congruent&#x201d; with the previously observed robot behavior: a mentalistic attribution after the humanlike behavior and a mechanistic attribution after the machine-like behavior. We argue that this is due to the engagement of additional cognitive resources, caused by the cognitive effort in integrating the representation of the observed behavior into the judgment (<xref ref-type="bibr" rid="B28">Kool et&#x20;al., 2010</xref>; <xref ref-type="bibr" rid="B27">Kool and Botvinick, 2014</xref>). In other words, these participants might have had enough sensitivity to detect the &#x201c;human-likeness&#x201d; or &#x201c;machine-likeness&#x201d; in the behavior of the robot. We argue that the integration of this piece of evidence into the judgment in the IST might have required additional cognitive resources.</p>
<p>On the other hand, mentalistically biased participants showed a tendency for greater pupil dilation when choosing the mentalistic description, independent of the observed robot behavior. Perhaps this group of participants showed engagement of additional cognitive resources when they were choosing descriptions that were in line with their initial bias (<xref ref-type="bibr" rid="B7">Christie and Schrater, 2015</xref>). Adherence to the &#x201c;mentalistic&#x201d; descriptions, independent of observed behavior, indicates, on the one hand, lower cognitive flexibility than the mechanistically oriented participants and, on the other hand, might be related to the general individual characteristic to structure and make the external world reasonable. This tendency to structure the external environment and engage in cognitive effortful tasks is defined as &#x201c;need for cognition&#x201d; (<xref ref-type="bibr" rid="B5">Cacioppo and Petty, 1982</xref>; <xref ref-type="bibr" rid="B8">Cohen et&#x20;al., 1955</xref>; <xref ref-type="bibr" rid="B14">Epley et&#x20;al., 2007</xref>). Mentalistically biased participants might have a lower need for cognition, and therefore pay less attention to all the subtle behavioral cues exhibited by the agent and stick to their original bias. Therefore, we may argue that this group is less prone to changing the stance adopted to interpret an agent&#x2019;s behavior.</p>
<p>One last (and interesting) finding of our study was that RTs were faster on the extremes of the IST score distribution. This suggests that perhaps once participants made a clear decision toward mentalistic or mechanistic description, it was easier and more straightforward for them to indicate the extreme poles of the slider. On the other hand, when they were not convinced about which alternative to choose, they indicated this through keeping the cursor close to the middle and longer (more hesitant) responses.</p>
<p>Overall, it seems plausible that the general mechanistic bias leads to allocating a higher amount of attentional resources toward observation of the robot (<xref ref-type="bibr" rid="B20">Ghiglino et&#x20;al., 2020a</xref>), resulting in paying more attention to the details of the observed behavior (in line also with <xref ref-type="bibr" rid="B18">Ghiglino et&#x20;al., 2020b</xref>; see also <xref ref-type="bibr" rid="B33">Marchesi et&#x20;al., 2020</xref>). This, in turn, might influence the subsequent evaluation of robot behavior descriptions. On the other hand, a mentalistic bias might lead participants to stick to their spontaneous first impression (<xref ref-type="bibr" rid="B50">Spatola et&#x20;al., 2019</xref>) and a lower need for cognition (<xref ref-type="bibr" rid="B5">Cacioppo and Petty, 1982</xref>; <xref ref-type="bibr" rid="B8">Cohen et&#x20;al., 1955</xref>; <xref ref-type="bibr" rid="B14">Epley et&#x20;al., 2007</xref>). Commonly, individual differences and expectations shape the first impression about a humanoid robot (<xref ref-type="bibr" rid="B45">Ray et&#x20;al., 2008</xref>, <xref ref-type="bibr" rid="B4">Bossi et&#x20;al., 2020</xref>, <xref ref-type="bibr" rid="B24">Horstmann and Kr&#xe4; mer, 2019</xref>; <xref ref-type="bibr" rid="B34">Marchesi et&#x20;al., 2021</xref>). <xref ref-type="bibr" rid="B41">Perez-Osorio et&#x20;al. (2019b)</xref> showed that people with higher expectations about robots tend to explain the robot behavior with reference to mental states. This might indicate that our participants with a mentalistic bias were predominantly influenced by their expectations about the abilities of the robot and, therefore, paid less attention to the mechanistic behaviors of the robot. To conclude, we interpret the results in light of the influence of individual differences in the allocation of cognitive resources that might differ between people who are prone to adopting the intentional stance toward humanoid robots and people who, by default, adopt the design stance (<xref ref-type="bibr" rid="B4">Bossi et&#x20;al., 2020</xref>; <xref ref-type="bibr" rid="B34">Marchesi et&#x20;al., 2021</xref>).</p>
</sec>
<sec id="s5">
<title>Limitations of the Current Study and Future Work</title>
<p>In the present study, we opted for a within-subjects design to reduce the influence of interindividual differences related to prior knowledge/experience with the iCub robot. Nevertheless, we cannot rule out the fact that our approach was indeed too conservative, leading to a null effect of the robot behavior manipulation on the raw IST scores due to a carry-over effect. Future research should consider adapting similar paradigms to a between-subjects design, since this option will allow for controlling possible carry-over effects.</p>
</sec>
<sec id="s6">
<title>Concluding Remarks</title>
<p>In conclusion, our present findings indicate that there might be individual differences with respect to people&#x2019;s sensitivity to subtle hints regarding human-likeness of the robot and the likelihood of integrating the representation of the observed behavior into the judgment about the robot&#x2019;s intentionality. Whether these individual differences are the result of personal traits, attitudes specific to robots, or a particular state at a given moment of measurement remains to be answered in future research. However, it is important to keep such biases in mind (and their interplay with engagement of cognitive resources) when evaluating the quality of human&#x2013;robot interaction. The evidence for different biases in interpreting the behavior of a humanoid robot might translate into the design of socially attuned humanoid robots capable of understanding the needs of the users, targeting their biases to facilitate the integration of artificial agents into our social environment.</p>
</sec>
</body>
<back>
<sec id="s7">
<title>Data Availability Statement</title>
<p>Data from this experiment can be found at the following link: <ext-link ext-link-type="uri" xlink:href="https://osf.io/s7tfe">https://osf.io/s7tfe</ext-link>.</p>
</sec>
<sec id="s8">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by the Comitato Etico Regione Liguria. The patients/participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s9">
<title>Author Contributions</title>
<p>SM and AW designed the pupillometry task. DG and AW designed the observational task. DD programmed the behaviors of the robot. SM and DG performed data collection. SM and FB analyzed the data. SM and AW wrote the manuscript. All authors contributed to reviewing the manuscript and approved&#x20;it.</p>
</sec>
<sec id="s10">
<title>Funding</title>
<p>This work has received support from the European Research Council under the European Union&#x2019;s Horizon 2020 research and innovation program, ERC Starting Grant, G.A. number: ERC-2016-StG-715058, awarded to AW. The content of this article is the sole responsibility of the authors. The European Commission or its services cannot be held responsible for any use that may be made of the information it contains.</p>
</sec>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s12">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frobt.2021.653537/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frobt.2021.653537/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Presentation1.PPTX" id="SM1" mimetype="application/PPTX" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="DataSheet1.docx" id="SM2" mimetype="application/docx" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="FN1">
<label>1</label>
<p>The two groups of items of the IST were created based on the results of Marchesi et. al (2019), in such a way that the mean score and SD for both groups were comparable (Group 1: M &#x3d; 40.60, SD &#x3d; 15.31; Group 2: M &#x3d; 40.85, SD&#x3d; 16.55, t(34)&#x20;&#x3d; .82, p &#x3d;&#x20;.415).</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abu-Akel</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Apperly</surname>
<given-names>I. A.</given-names>
</name>
<name>
<surname>Wood</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Hansen</surname>
<given-names>P. C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Re-imaging the Intentional Stance</article-title>. <source>Proc. R. Soc. B</source> <volume>287</volume>, <fpage>20200244</fpage>. <pub-id pub-id-type="doi">10.1098/rspb.2020.0244</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Airenti</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The Development of Anthropomorphism in Interaction: Intersubjectivity, Imagination, and Theory of Mind</article-title>. <source>Front. Psychol.</source> <volume>9</volume>, <fpage>2136</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2018.02136</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bates</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kliegl</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Vasishth</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Baayen</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Parsimonious Mixed Models</source> (<comment>arXiv preprint arXiv:1506.04967</comment>.</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bossi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Willemse</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cavazza</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Murino</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group>, (<year>2020</year>). <article-title>The Human Brain Reveals Resting State Activity Patterns that are Predictive of Biases in Attitudes toward Robots</article-title>. <source>Sci. Robotics</source> <volume>5</volume>, <issue>46</issue>. </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cacioppo</surname>
<given-names>J.&#x20;T.</given-names>
</name>
<name>
<surname>Petty</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>1982</year>). <article-title>The Need for Cognition</article-title>. <source>J.&#x20;Personal. Soc. Psychol.</source> <volume>42</volume> (<issue>1</issue>), <fpage>116</fpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.42.1.116</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chaminade</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Rosset</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Da Fonseca</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Nazarian</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Lutscher</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>How Do We Think Machines Think? an fMRI Study of Alleged Competition with an Artificial Intelligence</article-title>. <source>Front. Hum. Neurosci.</source> <volume>6</volume>, <fpage>103</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2012.00103</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Christie</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Schrater</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Cognitive Cost as Dynamic Allocation of Energetic Resources</article-title>. <source>Front. Neurosci.</source> <volume>9</volume>, <fpage>289</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2015.00289</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cohen</surname>
<given-names>A. R.</given-names>
</name>
<name>
<surname>Stotland</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wolfe</surname>
<given-names>D. M.</given-names>
</name>
</person-group> (<year>1955</year>). <article-title>An Experimental Investigation of Need for Cognition</article-title>. <source>J.&#x20;Abnormal Soc. Psychol.</source> <volume>51</volume> (<issue>2</issue>), <fpage>291</fpage>. <pub-id pub-id-type="doi">10.1037/h0042761</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cross</surname>
<given-names>E. S.</given-names>
</name>
<name>
<surname>Hortensius</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>From Social Brains to Social Robots: Applying Neurocognitive Insights to Human&#x2013;Robot Interaction</article-title>. <pub-id pub-id-type="doi">10.1098/rstb.2018.0024</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dacey</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Anthropomorphism as Cognitive Bias</article-title>. <source>Philos. Sci.</source> <volume>84</volume> (<issue>5</issue>), <fpage>1152</fpage>&#x2013;<lpage>1164</lpage>. <pub-id pub-id-type="doi">10.1086/694039</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de Gee</surname>
<given-names>J.&#x20;W.</given-names>
</name>
<name>
<surname>Knapen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Donner</surname>
<given-names>T. H.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Decision-related Pupil Dilation Reflects Upcoming Choice and Individual Bias</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>111</volume> (<issue>5</issue>), <fpage>E618</fpage>&#x2013;<lpage>E625</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1317557111</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dennett</surname>
<given-names>D. C.</given-names>
</name>
</person-group> (<year>1971</year>). <article-title>Intentional Systems</article-title>. <source>J.&#x20;Philos.</source> <volume>68</volume> (<issue>4</issue>), <fpage>87</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.2307/2025382</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Epley</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Waytz</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cacioppo</surname>
<given-names>J.&#x20;T.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>On Seeing Human: a Three-Factor Theory of Anthropomorphism</article-title>. <source>Psychol. Rev.</source> <volume>114</volume> (<issue>4</issue>), <fpage>864</fpage>. <pub-id pub-id-type="doi">10.1037/0033-295X.114.4.864</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Fink</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Anthropomorphism and Human Likeness in the Design of Robots and Human-Robot Interaction</article-title>. in <conf-name>Proceedings of the International Conference on Social Robotics</conf-name>. <publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>, <fpage>199</fpage>&#x2013;<lpage>208</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-34103-8_20</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gallagher</surname>
<given-names>H. L.</given-names>
</name>
<name>
<surname>Jack</surname>
<given-names>A. I.</given-names>
</name>
<name>
<surname>Roepstorff</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Frith</surname>
<given-names>C. D.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Imaging the Intentional Stance in a Competitive Game</article-title>. <source>Neuroimage</source> <volume>16</volume>, <fpage>814</fpage>&#x2013;<lpage>821</lpage>. <pub-id pub-id-type="doi">10.1006/nimg.2002.1117</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Ghiglino</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>De Tommaso</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Willemse</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020b</year>). <article-title>Can I Get Your (Robot) Attention? Human Sensitivity to Subtle Hints of Human-Likeness in a Humanoid Robot&#x2019;s Behavior</article-title>. <comment>Cogsci 2020. <ext-link ext-link-type="uri" xlink:href="https://cognitivesciencesociety.org/cogsci20/papers/0168/0168.pdf">https://cognitivesciencesociety.org/cogsci20/papers/0168/0168.pdf</ext-link>
</comment>. </citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ghiglino</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>De Tommaso</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <source>International Conference on Social Robotics</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>, <fpage>400</fpage>&#x2013;<lpage>409</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-05204-1_39</pub-id> <article-title>November). Attributing Human-Likeness to an Avatar: the Role of Time and Space in the Perception of Biological Motion</article-title>. </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ghiglino</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Willemse</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>De Tommaso</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bossi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020a</year>). <article-title>At First Sight: Robots&#x2019; Subtle Eye Movement Parameters Affect Human Attentional Engagement, Spontaneous Attunement and Perceived Human-Likeness. <italic>Paladyn</italic>
</article-title>. <source>J.&#x20;Behav. Robotics</source> <volume>11</volume> (<issue>1</issue>), <fpage>31</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1515/pjbr-2020-0004</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Happ&#xe9;</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Frith</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Theory of Mind in Autism</article-title>. In <conf-name>Learning and cognition in autism</conf-name>. <publisher-loc>Boston, MA</publisher-loc>: <publisher-name>Springer</publisher-name>, <fpage>177</fpage>&#x2013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.1007/978-1-4899-1286-2_10</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heider</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Simmel</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1944</year>). <article-title>An Experimental Study of Apparent Behavior</article-title>. <source>Am. J.&#x20;Psychol.</source> <volume>57</volume> (<issue>2</issue>), <fpage>243</fpage>&#x2013;<lpage>259</lpage>. <pub-id pub-id-type="doi">10.2307/1416950</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hess</surname>
<given-names>E. H.</given-names>
</name>
<name>
<surname>Polt</surname>
<given-names>J.&#x20;M.</given-names>
</name>
</person-group> (<year>1964</year>). <article-title>Pupil Size in Relation to Mental Activity during Simple Problem-Solving</article-title>. <source>Science</source> <volume>143</volume> (<issue>3611</issue>), <fpage>1190</fpage>&#x2013;<lpage>1192</lpage>. </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Horstmann</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>Kr&#xe4;mer</surname>
<given-names>N. C.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Great Expectations? Relation of Previous Experiences with Social Robots in Real Life or in the media and Expectancies Based on Qualitative and Quantitative Assessment</article-title>. <source>Front. Psychol.</source> <volume>10</volume>, <fpage>939</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2019.00939</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hortensius</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Cross</surname>
<given-names>E. S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>From Automata to Animate Beings: the Scope and Limits of Attributing Socialness to Artificial Agents</article-title>. <source>Ann. N Y Acad. Sci.</source> <volume>1426</volume> (<issue>1</issue>), <fpage>93</fpage>&#x2013;<lpage>110</lpage>. <pub-id pub-id-type="doi">10.1111/nyas.13727</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jackson</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Sirois</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kalwani</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>Gold</surname>
<given-names>J.&#x20;I.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Infant Cognition: Going Full Factorial with Pupil dilationRelationships between Pupil Diameter and Neuronal Activity in the Locus Coeruleus, Colliculi, and Cingulate Cortex</article-title>. <source>Developmental scienceNeuron</source> <volume>1289</volume> (<issue>41</issue>), <fpage>670221</fpage>&#x2013;<lpage>679234</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-7687.2008.00805.xJoshi10.1016/j.neuron.2015.11.028</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kool</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Botvinick</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <source>A Labor/leisure Tradeoff in Cognitive Control</source>. <pub-id pub-id-type="doi">10.1037/2333-8113.1.S.3</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kool</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>McGuire</surname>
<given-names>J.&#x20;T.</given-names>
</name>
<name>
<surname>Rosen</surname>
<given-names>Z. B.</given-names>
</name>
<name>
<surname>Botvinick</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Decision Making and the Avoidance of Cognitive Demand</article-title>. <source>J.&#x20;Exp. Psychol. Gen.</source> <volume>139</volume>, <fpage>665</fpage>&#x2013;<lpage>682</lpage>. <pub-id pub-id-type="doi">10.1037/a0020198</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kret</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Sjak-Shie</surname>
<given-names>E. E.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Preprocessing Pupil Size Data: Guidelines and Code</article-title>. <source>Behav. Res. Methods</source> <volume>51</volume> (<issue>3</issue>), <fpage>1336</fpage>&#x2013;<lpage>1342</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-018-1075-y</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kuehne</surname>
<given-names>C. C.</given-names>
</name>
</person-group> (<year>1993</year>). <source>The Advantages of Using Planned Comparisons over Post Hoc Tests</source>.</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Larsen</surname>
<given-names>R. S.</given-names>
</name>
<name>
<surname>Waters</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Neuromodulatory Correlates of Pupil Dilation</article-title>. <source>Front. Neural circuits</source> <volume>12</volume>, <fpage>21</fpage>. <pub-id pub-id-type="doi">10.3389/fncir.2018.00021</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Leys</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ley</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Klein</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Bernard</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Licata</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Detecting Outliers: Do Not Use Standard Deviation Around the Mean, Use Absolute Deviation Around the Median</article-title>. <source>J.&#x20;Exp. Soc. Psychol.</source> <volume>49</volume> (<issue>4</issue>), <fpage>764</fpage>&#x2013;<lpage>766</lpage>. <pub-id pub-id-type="doi">10.1016/j.jesp.2013.03.013Marchesi</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Perez-Osorio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>De Tommaso</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Don&#x2019;t Overthink: Fast Decision Making Combined with Behavior Variability Perceived as More Human-like</article-title>. In <conf-name>2020 29th IEEE International Conference on Robot and Human Interactive Communication, 2020</conf-name>. <publisher-loc>RO-MAN), Naples, Italy</publisher-loc>, <fpage>54</fpage>&#x2013;<lpage>59</lpage>. <pub-id pub-id-type="doi">10.1109/RO-MAN47096.2020.9223522</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ghiglino</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ciardo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Perez-Osorio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Baykara</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Do We Adopt the Intentional Stance toward Humanoid Robots?</article-title> <source>Front. Psychol.</source> <volume>10</volume>, <fpage>450</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2019.00450</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Spatola</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>P&#xe9;rez-Osorio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>March). Human vs Humanoid. A Behavioral Investigation of the Individual Tendency to Adopt the Intentional Stance</article-title>. <source>Proc. 2021 ACM/IEEE Int. Conf. Human-Robot Interaction</source>, <fpage>332</fpage>&#x2013;<lpage>340</lpage>. <pub-id pub-id-type="doi">10.1145/3434073.3444663</pub-id> </citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Math&#x00F4;t</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Fabius</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Van Heusden</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Safe and Sensible Preprocessing and Baseline Correction of Pupil-Size Data</article-title>. <source>Behav. Res.</source> <volume>50</volume>, <fpage>94</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-017-1007-2</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Math&#xf4;t</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Pupillometry: Psychology, Physiology, and Function</article-title>. <source>J.&#x20;Cogn.</source> <volume>1</volume> (<issue>1</issue>). <pub-id pub-id-type="doi">10.5334/joc.18</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Math&#xf4;t</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schreij</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Theeuwes</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>OpenSesame: An Open-Source, Graphical experiment Builder for the Social Sciences</article-title>. <source>Behav. Res. Methods</source> <volume>44</volume> (<issue>2</issue>), <fpage>314</fpage>&#x2013;<lpage>324</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-011-0168-7</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Metta</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Natale</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Nori</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sandini</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Vernon</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Fadiga</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Bernardino</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>The iCub Humanoid Robot: An Open-Systems Platform for Research in Cognitive Development</article-title>. <source>Neural Networks</source> <volume>23</volume> (<issue>8-9</issue>), <fpage>1125</fpage>&#x2013;<lpage>1134</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2010.08.010</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#xd6;zdem</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wiese</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>M&#xfc;ller</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Brass</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Van Overwalle</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Believing Androids&#x2013;fMRI Activation in the Right Temporo-Parietal junction Is Modulated by Ascribing Intentions to Non-human Agents</article-title>. <source>Soc. Neurosci.</source> <volume>12</volume> (<issue>5</issue>), <fpage>582</fpage>&#x2013;<lpage>593</lpage>. <pub-id pub-id-type="doi">10.1080/17470919.2016.1207702</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Pasquali</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Aroyo</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Gonzalez-Billandon</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rea</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sandini</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Siutti</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Your Eyes Never Lie: A Robot Magician Can Tell if You Are Lying</article-title>. In <conf-name>Companion of the 2020 ACM/IEEE International Conference on Human-Robot Interaction (HRI &#x27;20)</conf-name>. <publisher-name>Association for Computing Machinery</publisher-name>, <publisher-loc>New York, NY, USA</publisher-loc>, <fpage>392</fpage>&#x2013;<lpage>394</lpage>. <pub-id pub-id-type="doi">10.1145/3371382.3378253</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Pasquali</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Gonzalez-Billandon</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rea</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sandini</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sciutti</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Magic iCub: A Humanoid Robot Autonomously Catching Your Lies in a Card Game</article-title>. In <conf-name>Proceedings of the 2021 ACM/IEEE International Conference on Human-Robot Interaction (HRI &#x2032;21)</conf-name>. <publisher-name>Association for Computing Machinery</publisher-name>, <publisher-loc>New York, NY, USA</publisher-loc>, <fpage>293</fpage>&#x2013;<lpage>302</lpage>. <pub-id pub-id-type="doi">10.1145/3434073.3444682</pub-id> </citation>
</ref>
<ref id="B41">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Perez-Osorio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Marchesi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ghiglino</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ince</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019b</year>). &#x201c;<article-title>More Than You Expect: Priors Influence on the Adoption of Intentional Stance toward Humanoid Robots</article-title>,&#x201d; in <source>Social Robotics. ICSR 2019. Lecture Notes in Computer Science, Vol 11876</source>. Editors Salichs M. et al. (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>). <pub-id pub-id-type="doi">10.1007/978-3-030-35888-4_12</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Perez-Osorio</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019a</year>). <source>Wording Robotics</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>, <fpage>119</fpage>&#x2013;<lpage>136</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-17974-8_10</pub-id> <article-title>Adopting the Intentional Stance towards Humanoid Robots</article-title>. </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Preuschoff</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>t Hart</surname>
<given-names>B. M.</given-names>
</name>
<name>
<surname>Einhauser</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Pupil Dilation Signals surprise: Evidence for Noradrenaline&#x2019;s Role in Decision Making</article-title>. <source>Front. Neurosci.</source> <volume>5</volume>, <fpage>115</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2011.00115</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Proch&#xe1;zka</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mudrov&#xe1;</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Vy&#x161;ata</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Hava</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Araujo</surname>
<given-names>C. P. S.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>May). Multi-Channel EEG Signal Segmentation and Feature Extraction</article-title>. In <conf-name>2010 IEEE 14th International Conference on Intelligent Engineering Systems</conf-name>. <publisher-name>IEEE</publisher-name>, <fpage>317</fpage>&#x2013;<lpage>320</lpage>. <pub-id pub-id-type="doi">10.1109/INES.2010.5483824</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ray</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Mondada</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Siegwart</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>September)What Do People Expect from Robots?</article-title>. In <conf-name>2008 IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name>. <publisher-name>IEEE</publisher-name>, <fpage>3816</fpage>&#x2013;<lpage>3821</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2008.4650714</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruxton</surname>
<given-names>G. D.</given-names>
</name>
<name>
<surname>Beauchamp</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Time for Some A Priori Thinking about Post Hoc Testing</article-title>. <source>Behav. Ecol.</source> <volume>19</volume> (<issue>3</issue>), <fpage>690</fpage>&#x2013;<lpage>693</lpage>. <pub-id pub-id-type="doi">10.1093/beheco/arn020</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Samani</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Saadatian</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Polydorou</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Fernando</surname>
<given-names>O. N. N.</given-names>
</name>
<name>
<surname>Nakatsu</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). <article-title>Cultural Robotics: the Culture of Robotics and Robotics in Culture</article-title>. <source>Int. J.&#x20;Adv. Robotic Syst.</source> <volume>10</volume> (<issue>12</issue>), <fpage>400</fpage>. <pub-id pub-id-type="doi">10.5772/57260</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saygin</surname>
<given-names>A. P.</given-names>
</name>
<name>
<surname>Chaminade</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Driver</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Frith</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>The Thing that Should Not Be: Predictive Coding and the Uncanny valley in Perceiving Human and Humanoid Robot Actions</article-title>. <source>Soc. Cogn. affective Neurosci.</source> <volume>7</volume> (<issue>4</issue>), <fpage>413</fpage>&#x2013;<lpage>422</lpage>. <pub-id pub-id-type="doi">10.1093/scan/nsr025s</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schellen</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Intentional Mindset toward Robots&#x2014;Open Questions and Methodological Challenges</article-title>. <source>Front. Robotics AI</source> <volume>5</volume>, <fpage>139</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00139</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Spatola</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Monceau</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ferrand</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Cognitive Impact of Social Robots: How Anthropomorphism Boosts Performance</article-title>. <source>IEEE Robotics Automation Mag. Inst. Electr. Elect. Eng.</source> <volume>27</volume> (<issue>3</issue>), <fpage>73</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1109/MRA.2019.2928823ff.ffhal-02347083v3f</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Spunt</surname>
<given-names>R. P.</given-names>
</name>
<name>
<surname>Meyer</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Lieberman</surname>
<given-names>M. D.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>The Default Mode of Human Brain Function Primes the Intentional Stance</article-title>
<source>J.&#x20;Cogn. Neurosci.</source>, <volume>27</volume>, <fpage>1116</fpage>&#x2013;<lpage>1124</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.a.00785</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Thellman</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Silvervarg</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ziemke</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Folk-psychological Interpretation of Human vs. Humanoid Robot Behavior: Exploring the Intentional Stance toward Robots</article-title>. <source>Front. Psychol.</source> <volume>8</volume>, <fpage>1962</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01962</pub-id> </citation>
</ref>
<ref id="B53">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Tucker</surname>
<given-names>M. L.</given-names>
</name>
</person-group> (<year>1990</year>). <source>A Compendium of Textbook Views on Planned versus Post Hoc Tests</source>.</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Waytz</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cacioppo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Epley</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Who Sees Human? the Stability and Importance of Individual Differences in Anthropomorphism</article-title>. <source>Perspect. Psychol. Sci.</source> <volume>5</volume> (<issue>3</issue>), <fpage>219</fpage>&#x2013;<lpage>232</lpage>. <pub-id pub-id-type="doi">10.1177/1745691610369336</pub-id> </citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wiese</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Metta</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Robots as Intentional Agents: Using Neuroscientific Methods to Make Robots Appear More Social</article-title>. <source>Front. Psychol.</source> <volume>8</volume>, <fpage>1663</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01663</pub-id> </citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chaminade</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Embodied Artificial Agents for Understanding Human Social Cognition</article-title>. <source>Phil. Trans. R. Soc. B: Biol. Sci.</source> <volume>371</volume> (<issue>1693</issue>), <fpage>20150375</fpage>. <pub-id pub-id-type="doi">10.1098/rstb.2015.0375</pub-id> </citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Social Robots to Test Flexibility of Human Social Cognition</article-title>. <source>Int. J.&#x20;Soc. Rob.</source> <volume>12</volume> (<issue>6</issue>), <fpage>1203</fpage>&#x2013;<lpage>1211</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-020-00674-5</pub-id> </citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zwickel</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Agency Attribution and Visuospatial Perspective Taking</article-title>. <source>Psychon. Bull. Rev.</source> <volume>16</volume> (<issue>6</issue>), <fpage>1089</fpage>&#x2013;<lpage>1093</lpage>. <pub-id pub-id-type="doi">10.3758/PBR.16.6.1089</pub-id> </citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Z&#x142;otowski</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Proudfoot</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Yogeeswaran</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bartneck</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Anthropomorphism: Opportunities and Challenges in Human&#x2013;Robot Interaction</article-title>. <source>Int. J.&#x20;Soc. Robotics</source> <volume>7</volume>, <fpage>347</fpage>&#x2013;<lpage>360</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-014-0267-6</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>