<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">883814</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2022.883814</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Personalizing Care Through Robotic Assistance and Clinical Supervision</article-title>
<alt-title alt-title-type="left-running-head">Sorrentino et al.</alt-title>
<alt-title alt-title-type="right-running-head">Personalizing Care Through Robotic Assistance</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Sorrentino</surname>
<given-names>Alessandra</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1834135/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fiorini</surname>
<given-names>Laura</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/425311/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mancioppi</surname>
<given-names>Gianmaria</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/674978/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cavallo</surname>
<given-names>Filippo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/252146/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Umbrico</surname>
<given-names>Alessandro</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/765192/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cesta</surname>
<given-names>Amedeo</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Orlandini</surname>
<given-names>Andrea</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/138683/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Scuola Superiore Sant&#x2019;Anna</institution>, <addr-line>Pisa</addr-line>, <country>Italy</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Industrial Engineering</institution>, <institution>University of Florence</institution>, <addr-line>Florence</addr-line>, <country>Italy</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>CNR&#x2013;Institute of Cognitive Sciences and Technologies (CNR-ISTC)</institution>, <addr-line>Rome</addr-line>, <country>Italy</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1321124/overview">Alessandro Freddi</ext-link>, Marche Polytechnic University, Italy</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1760707/overview">Daniele Proietti Pagnotta</ext-link>, Marche Polytechnic University, Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1037123/overview">Luca Romeo</ext-link>, Marche Polytechnic University, Italy</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Alessandro Umbrico, <email>alessandro.umbrico@istc.cnr.it</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Biomedical Robotics, a section of the journal Frontiers in Robotics and AI</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>12</day>
<month>07</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>9</volume>
<elocation-id>883814</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>02</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>06</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Sorrentino, Fiorini, Mancioppi, Cavallo, Umbrico, Cesta and Orlandini.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Sorrentino, Fiorini, Mancioppi, Cavallo, Umbrico, Cesta and Orlandini</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>By 2030, the World Health Organization (WHO) foresees a worldwide workforce shortfall of healthcare professionals, with dramatic consequences for patients, economies, and communities. Research in assistive robotics has experienced an increasing attention during the last decade demonstrating its utility in the realization of intelligent robotic solutions for healthcare and social assistance, also to compensate for such workforce shortages. Nevertheless, a challenge for effective assistive robots is dealing with a high variety of situations and <italic>contextualizing</italic> their interactions according to living contexts and habits (or preferences) of assisted people. This study presents a novel cognitive system for assistive robots that rely on artificial intelligence (AI) representation and reasoning features/services to support decision-making processes of healthcare assistants. We proposed an original integration of AI-based features, that is, <italic>knowledge representation and reasoning</italic> and <italic>automated planning</italic> to 1) define a human-in-the-loop continuous assistance procedure that helps clinicians in evaluating and managing patients and; 2) to dynamically adapt robot behaviors to the specific needs and interaction abilities of patients. The system is deployed in a realistic assistive scenario to demonstrate its feasibility to support a clinician taking care of several patients with different conditions and needs.</p>
</abstract>
<kwd-group>
<kwd>socially assistive robot (SAR)</kwd>
<kwd>knowledge representation and reasoning (KRR)</kwd>
<kwd>automated planning (AP)</kwd>
<kwd>user modeling (UM)</kwd>
<kwd>human&#x2013;robot interaction (HRI)</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>By 2030, the World Health Organization (WHO) foresees a worldwide workforce shortfall of about 18 million healthcare professionals, with dramatic consequences for patients, economies, and communities (<xref ref-type="bibr" rid="B33">Liu et al., 2017</xref>). The development of ICT-based integrated care solutions offers a variety of possible solutions to address this issue. Research in assistive robotics has experienced an increasing attention during the last decade aiming at the realization of intelligent robotic solutions for healthcare and social assistance, also to compensate for such workforce shortages. Also, the potential impact of healthcare and assistive robots is also witnessed by their deployments to deal with the COVID19 pandemic (<xref ref-type="bibr" rid="B36">Murphy et al., 2022</xref>). Remarkable results have been achieved integrating social robots in realistic assistive scenarios with human users (see e.g., (<xref ref-type="bibr" rid="B12">Cavallo et al., 2018</xref>; <xref ref-type="bibr" rid="B3">Angelini et al., 2019</xref>; <xref ref-type="bibr" rid="B18">D&#x2019;Onofrio et al., 2016</xref>; <xref ref-type="bibr" rid="B6">Bertolini et al., 2016</xref>; <xref ref-type="bibr" rid="B14">Coradeschi et al., 2013</xref>)), also including the case of assistance and monitoring of impaired and frail people (see, e.g., (<xref ref-type="bibr" rid="B11">Casey et al., 2016</xref>; <xref ref-type="bibr" rid="B22">Fiorini et al., 2017</xref>; <xref ref-type="bibr" rid="B34">Mancioppi et al., 2019</xref>)). Assistive robots can be then used to support healthcare professionals in their activities augmenting their capacities and strength in dealing with a wide number of patients. Moreover, human&#x2013;robot interaction (HRI) is now a very compelling field also used to better understand how humans perceive, interact with, or accept these machines in social contexts (<xref ref-type="bibr" rid="B53">Wykowska et al., 2016</xref>). Several studies investigated the relationships between user needs and assistive robot features when deployed inside integrated care solutions for older adults living alone in their homes (see, e.g., (<xref ref-type="bibr" rid="B13">Cesta et al., 2018</xref>; <xref ref-type="bibr" rid="B15">Cortellessa et al., 2021</xref>)). A crucial requirement for effective assistive robotic systems is their ability to deal with a high variety of situations and <italic>contextualize</italic> their interactions according to living contexts and habits (or preferences) of assisted people (<xref ref-type="bibr" rid="B41">Rossi et al., 2017</xref>; <xref ref-type="bibr" rid="B10">Bruno et al., 2019</xref>; <xref ref-type="bibr" rid="B51">Umbrico et al., 2020c</xref>). A key current challenge consists in realizing advanced control systems endowing assistive robots with a rich portfolio of high-level cognitive and interaction capabilities (<xref ref-type="bibr" rid="B37">Nocentini et al., 2019</xref>; <xref ref-type="bibr" rid="B20">Fiorini et al., 2020a</xref>) to realize <italic>personalized</italic> and <italic>adaptive</italic> assistance (<xref ref-type="bibr" rid="B45">Tapus et al., 2007</xref>; <xref ref-type="bibr" rid="B49">Umbrico et al., 2020a</xref>; <xref ref-type="bibr" rid="B2">Andriella et al., 2022</xref>) and thus achieve a good level of <italic>acceptance</italic> (<xref ref-type="bibr" rid="B41">Rossi et al., 2017</xref>; <xref ref-type="bibr" rid="B35">Moro et al., 2018</xref>).</p>
<p>This study presents a cognitive system for assistive robots that rely on ontology-based representation and reasoning capabilities to support healthcare professionals and elderly users during assessment and therapy administration. More specifically, the presented approach pursues a human-in-the-loop methodology that leverages a &#x201c;robot-based&#x201d; user profiling and artificial intelligence (AI) representation and reasoning features/services to support decision-making processes of healthcare assistants. The objective is, on the one hand, to support healthcare professionals during patient assessment and therapy administration and, on the other hand, to provide assistive robots with <italic>personalization</italic> and <italic>adaptability</italic> features to support patients characterized by heterogeneous health-related needs. Taking inspiration from cognitive architecture research (<xref ref-type="bibr" rid="B28">Langley et al., 2009</xref>; <xref ref-type="bibr" rid="B32">Lieto et al., 2018</xref>; <xref ref-type="bibr" rid="B27">Kotseruba and Tsotsos, 2020</xref>), we proposed the integration of AI-based features, that is, <italic>knowledge representation and reasoning</italic> and <italic>automated planning</italic> to 1) define a human-in-the-loop process for continuous evaluation and treatment of patients and; 2) to dynamically adapt robot behaviors to the specific needs and interaction abilities of patients.</p>
<p>The system is deployed on a social assistive robot and validated in a realistic scenario. We showed how an assistive robot endowed with cognitive control features is able to autonomously contextualize its behavior and effectively support both patients and clinicians in the synthesis of personalized cognitive interventions. A key point stands in the <italic>mutual assistance</italic> between the clinician and the robot through a &#x201c;mixed-initiative&#x201d; work flow. The role of the clinician is essential to <italic>refine</italic> and <italic>validate</italic> decisions made by the robot. In turn, the robot supports the clinician in the <italic>screening</italic> and <italic>monitoring</italic> of patients as well as the <italic>administration</italic> of a therapy. In this regard, the main contribution of the work concerns the correlation between <italic>standard</italic> screening practices used by therapists with the internal user model used by the robot. This correlation allows a robot to correctly interpret health-related data about patients provided by therapists. In particular, it enables the <italic>transfer</italic> of knowledge from clinicians to robots and is thus crucial to synthesizing effective and personalized assistive behaviors.</p>
<p>A profiling procedure is performed through a robotic platform during the administration of the Mini-Mental State Examination (MMSE) to patients with suspected cognitive decline. As shown in <xref ref-type="bibr" rid="B42">Rossi et al. (2018</xref>) and <xref ref-type="bibr" rid="B17">Di Nuovo et al. (2019</xref>), the use of a robot guarantees test neutrality and attainable standardization for the administration of cognitive tests. Data about the quality of interaction are extracted to refine interaction modalities and thus shape robot behaviors when interacting with users. <italic>User modeling</italic> capabilities of the robot rely on an ontological reification of the <italic>International Classification of Functioning, Disability and Health</italic>
<xref ref-type="fn" rid="fn1">
<sup>1</sup>
</xref> (ICF). The obtained ontological model defines a well-structured and general reference framework suitable to autonomously reason about the health status of a person and elicit fitting interaction parameters. Many works in the literature deal with user modeling and propose different frameworks, depending on the specific application needs (<xref ref-type="bibr" rid="B29">Lema ignan et al., 2010</xref>; <xref ref-type="bibr" rid="B5">Awaad et al., 2015</xref>; <xref ref-type="bibr" rid="B47">Tenorth and Beetz, 2015</xref>; <xref ref-type="bibr" rid="B30">Lemaignan et al., 2017</xref>; <xref ref-type="bibr" rid="B40">Porzel et al., 2020</xref>).</p>
<p>Concerning healthcare and assistive domains, user modeling is particularly crucial to support a <italic>user-centered design</italic> and realize effective assistive technologies (<xref ref-type="bibr" rid="B31">LeRouge et al., 2013</xref>). Other works have used the ICF framework as a reference to characterize cognitive and physical conditions of users. For example, the work (<xref ref-type="bibr" rid="B26">Kostavelis et al., 2019</xref>) introduced a novel robot-based assessment methodology of users&#x2019; skills is proposed in order to characterize the needed level of daily assistance. The work (<xref ref-type="bibr" rid="B19">Filippeschi et al., 2018</xref>) used the ICF to characterize cognitive and physical skills of users and accordingly represent the outcomes of the implemented robot-based assessment procedures. Similarly, the work (<xref ref-type="bibr" rid="B24">Garc&#xed;a-Betances et al., 2016</xref>) used ICF to represent needs and requirements of different types of users and support the user-centered design of ICT technologies. In particular, this work integrates an ontological model of ICF into the cognitive architecture ACT-R (<xref ref-type="bibr" rid="B1">Anderson et al., 1997</xref>) to simulate the behaviors of different types of user.</p>
<p>Nevertheless, the aforementioned works present a &#x201c;rigid&#x201d; and static representation as they usually do not rely on a well-structured ontological formalism to characterize <italic>knowledge</italic> about users (i.e., <italic>user profiles</italic>) in different situations. Related works usually do not integrate online reasoning mechanisms that allow assistive robots to <italic>autonomously reason</italic> about the specific needs of a user and autonomously (or partially autonomously) <italic>decide</italic> the kind of intervention that best fit such needs. Conversely, our approach pursues a highly flexible solution implementing the cognitive capabilities needed to <italic>understand</italic> health conditions of users and (autonomously) personalize assistance accordingly, under the supervision of a human expert.</p>
</sec>
<sec id="s2">
<title>2 Continuous Assessment and Monitoring</title>
<p>We aimed at leveraging the interaction capabilities of socially assistive robots to support clinicians in assessing and monitoring the cognitive state of patients. We envisage a multi-actor HRI approach in which an assistive robot can facilitate the interactions. In particular, we proposed a continuous assessment and monitoring procedure in which a robot supports a clinician by: 1) <italic>proposing</italic> a set of tests suitable for the specific needs of a patient, 2) <italic>administering</italic> the chosen tests, and 3) <italic>monitoring</italic> (and reasoning over) the performance of the patient. In this way, the employment of assistive robots can alleviate clinicians in some of their activities and, thus, support them in dealing with a larger number of patients. In addition, a robot can continuously and proactively stimulate patients by administrating suitable exercises and generally motivating the <italic>participation</italic> and the <italic>adherence</italic> to the therapy.</p>
<sec id="s2-1">
<title>2.1 Mixed-Initiative Design of Cognitive Stimulation Therapy</title>
<p>We envisage a novel cognitive intervention program where a robot constantly supports clinicians in evaluating/monitoring the cognitive state of a patient and in making decisions about the <italic>intervention plan</italic> to follow. The general structure is depicted in <xref ref-type="fig" rid="F1">Figure 1</xref>. The process fosters a continuous &#x201c;feedback loop&#x201d; between the robot and the clinician. It interleaves patient&#x2013;robot interactions (i.e., steps 1.1, 2.1, and 3.1 in <xref ref-type="fig" rid="F1">Figure 1</xref>) with direct clinician validation and involvement (i.e., steps 1.2, 2.2, and 3.2 in <xref ref-type="fig" rid="F1">Figure 1</xref>). The interleaving of steps performed by the two actors aimed at achieving a fruitful synergy combining the computational capabilities of the robot with the analytical capabilities of the clinician.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Workflow of the devised cognitive intervention mixing on robot and human in skills.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g001.tif"/>
</fig>
<p>It is worth noticing that the clinician is constantly involved in the decisional process and maintains control over the decisions made by the robot, validating them. Each cycle consists of a number of human&#x2013;robot interaction steps aiming at 1) profiling the health state of a person (steps 1.1 and 1.2), 2) defining an intervention plan suitable for the specific health needs of a patient (steps 2.1 and 2.2) and, 3) executing the plan by administrating exercises within a certain temporal horizon (e.g., a week or a month) and evaluating outcomes (steps 3.1 and 3.2).</p>
<p>The cyclic repetition of these phases allows a clinician to continuously monitor and assess the <italic>evolving cognitive state</italic> of a patient with the support of a robot. Two &#x201c;feedback chains&#x201d; are considered as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. One feedback chain assesses and (if necessary) updates the profile of the user at the end of each cycle. In this way, it is possible to keep track of the outcomes of synthesized plans, keep track of changes in the health state of a person, and adapt the next cycle accordingly. Another feedback chain concerns the continuous construction of a dataset containing information about the evolution of user profiles and the related outcomes of the cognitive interventions. This information would, in particular, allow the clinician to analyze the evolution over time of the state of a user and thus make better decisions about next steps.</p>
</sec>
<sec id="s2-2">
<title>2.2 An AI-Based Cognitive Architecture</title>
<p>To implement the process of <xref ref-type="fig" rid="F1">Figure 1</xref>, an assistive robot should be able to reason about the health state/conditions of a patient and autonomously make suitable decisions. In particular, a robot needs a number of properly designed and integrated <italic>cognitive capabilities</italic> in order to contextualize assistive behaviors and effectively support both patients and clinicians. Taking inspiration from cognitive architectures (<xref ref-type="bibr" rid="B28">Langley et al., 2009</xref>; <xref ref-type="bibr" rid="B27">Kotseruba and Tsotsos, 2020</xref>), we focus on the development and integration of AI-based technologies supporting <italic>knowledge representation and reasoning</italic> and <italic>decision making and problem solving</italic>. The integration of knowledge representation and reasoning with automated planning has been shown to be effective for the synthesis of flexible robot behaviors. They are particularly crucial to realize advanced (cognitive) controllers capable of (autonomously) <italic>personalize</italic> and <italic>adapt</italic> robot behaviors to the specific features of different application scenarios, for example, <italic>service robots</italic> (<xref ref-type="bibr" rid="B5">Awaad et al., 2015</xref>; <xref ref-type="bibr" rid="B47">Tenorth and Beetz, 2015</xref>; <xref ref-type="bibr" rid="B40">Porzel et al., 2020</xref>), <italic>daily assistance</italic> (<xref ref-type="bibr" rid="B49">Umbrico et al., 2020a</xref>; <xref ref-type="bibr" rid="B15">Cortellessa et al., 2021</xref>), and <italic>manufacturing</italic> (<xref ref-type="bibr" rid="B8">Borgo et al., 2019</xref>).</p>
<p>
<xref ref-type="fig" rid="F2">Figure 2</xref> provides an overview of modules developed to support the considered capabilities and their integration within a &#x201c;cognitive loop.&#x201d; On the one hand, an <italic>ontology-based representation and reasoning</italic> module allows an assistive robot to internally represent cognitive and physical information about an assisted person and contextualize its interaction and intervention capabilities accordingly. Pursuing a foundational approach (<xref ref-type="bibr" rid="B25">Guarino, 1998</xref>), we defined a domain ontology based on the ICF classification to represent user profiles and reason on the health state of a person. It relies on DOLCE<xref ref-type="fn" rid="fn2">
<sup>2</sup>
</xref> as a theoretical foundation, and was written in OWL (<xref ref-type="bibr" rid="B4">Antoniou and Harmelen, 2009</xref>) using Prot&#xe9;g&#xe9;<xref ref-type="fn" rid="fn3">
<sup>3</sup>
</xref>. The <italic>robot knowledge</italic> and related knowledge-reasoning modules have been developed in Java using the open-source library Apache Jena<xref ref-type="fn" rid="fn4">
<sup>4</sup>
</xref>. On the other hand, a <italic>decision making and problem solving</italic> module allows an assistive robot to synthesize and execute intervention <italic>plans</italic>, personalized according to the &#x201c;recommendations&#x201d; extracted from the robot knowledge. The synthesis and execution of such plans rely on PLATINUm (<xref ref-type="bibr" rid="B48">Umbrico et al., 2017</xref>), a timeline-based planning and execution framework deployed in assistive scenarios (<xref ref-type="bibr" rid="B49">Umbrico et al., 2020a</xref>) and concrete human&#x2013;robot collaboration manufacturing scenarios (<xref ref-type="bibr" rid="B38">Pellegrinelli et al., 2017</xref>). The contribution of this work specifically focuses on the developed ontology-based representation and reasoning capabilities.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Overview of the AI-based cognitive architecture.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g002.tif"/>
</fig>
</sec>
</sec>
<sec id="s3">
<title>3 Ontology-Based Modeling of Health-Related Needs</title>
<p>A user profile should encapsulate a rich and heterogeneous set of information characterizing the general health state of a person. We proposed an ontological model of health needs based on the <italic>International Classification of Functioning, Disability and Health</italic> (ICF), defined by the World Health Organization (WHO) (<xref ref-type="bibr" rid="B52">World Health Organization, 2001</xref>). User profiles are thus represented on top of such ICF-based ontological models in order to provide an assistive robot with a complete characterization of patients&#x2019; needs.</p>
<sec id="s3-1">
<title>3.1 Modeling Health-Related Knowledge About Patients</title>
<p>There are several factors that can be considered when modeling users. Different choices would support different robot behaviors and different levels/types of adaptation. Broadly speaking, the creation of a complete and effective user model is crucial to realize human&#x2013;robot interactions characterized by adaptability, trust building, effective communication, and explainability (<xref ref-type="bibr" rid="B44">Tabrez et al., 2020</xref>). In the context of cognitive assessment with assistive robots, many works tend to focus on personality, emotions, and engagement as aspects of the user profile that the robot should take into account (<xref ref-type="bibr" rid="B43">Sorrentino et al., 2021</xref>). For example, <xref ref-type="bibr" rid="B46">Tapus et al. (2008</xref>) described a socially assisted robot therapist designed to monitor, assist, encourage, and socially interact with post-stroke users engaged in rehabilitation exercises. This work investigated the role of the robot&#x2019;s personality (i.e., introvert&#x2013;extrovert) in the therapy process taking into account the personality traits of a user. Similarly, (<xref ref-type="bibr" rid="B42">Rossi et al. (2018</xref>) investigated the influence of the user&#x2019;s personality traits on the perception of the Pepper robot, administrating a cognitive test. Their results suggested that the usage of a robot in this context improved socialization among the participants. On the other hand, the works of <xref ref-type="bibr" rid="B16">Desideri et al. (2019</xref>) and (<xref ref-type="bibr" rid="B39">Pino et al. (2020</xref>) showed that the usage of a robotic platform for cognitive stimulation engaged more participants to the therapy. In the mentioned works, the influence of each aspect was mostly investigated offline and it was mostly related to the occurred quality of the interaction. In addition, the robotic platform was adopted as a medium for the administration of the clinical protocol, without providing any cues on how the information collected by the robot could be used for planning future interventions. The assumption behind this work is that the robot should be able to adapt its intervention, by focusing on the quality of the interaction, but also on the user cognitive profile.</p>
<p>Concerning our contribution, other works have used the ICF framework to characterize cognitive and physical conditions of users. The work (<xref ref-type="bibr" rid="B26">Kostavelis et al., 2019</xref>) introduced a novel robot-based assessment methodology of users&#x2019; skills to characterize the needed level of daily assistance. The work (<xref ref-type="bibr" rid="B19">Filippeschi et al., 2018</xref>) used ICF to characterize cognitive and physical skills of users and accordingly represent the outcomes of the implemented robot-based assessment procedures. Similarly, the work (<xref ref-type="bibr" rid="B24">Garc&#xed;a-Betances et al., 2016</xref>) used ICF to represent needs and requirements of different types of users and support a user-centered design of ICT technologies. This work integrates an ontological model of ICF into the cognitive architecture ACT-R (<xref ref-type="bibr" rid="B1">Anderson et al., 1997</xref>) to simulate the behaviors of different types of user. Nevertheless, these works present a &#x201c;rigid&#x201d; and static representation as they usually do not rely on a well-structured ontological formalism to dynamically contextualize <italic>knowledge</italic> about users (i.e., <italic>user profiles</italic>) in different situations. Such works usually do not integrate online reasoning mechanisms to allow assistive robots to <italic>autonomously reason</italic> about the specific needs of a user and autonomously (or partially autonomously) <italic>decide</italic> the kind of intervention that best fit such needs. Conversely, our approach pursues a highly flexible solution implementing the cognitive capabilities needed to <italic>understand</italic> health conditions of users and (autonomously) personalize assistance accordingly, under the supervision of a human expert.</p>
</sec>
<sec id="s3-2">
<title>3.2 ICF-Based Representation of User Profiles</title>
<p>The ICF classification aimed at organizing and documenting information on functioning and disability. It pursues the interpretation of functioning as a dynamic interaction among health conditions of a person, environmental factors, and personal factors. Each defined concept characterizes a specific aspect concerning the physical or cognitive functioning of a person. The level of functioning of each physical/cognitive aspect is represented by the following scale: 1) the value 0 denotes <italic>no impairment</italic>, 2) the value 1 denotes <italic>soft impairment</italic>, 3) the value 2 denotes <italic>medium impairment</italic>, 4) the value 3 denotes <italic>serious impairment</italic>, and 5) the value 4 denotes <italic>full impairment</italic>.</p>
<p>The ICF classification is organized into two parts. A part deals with <italic>functioning and disabilities</italic> while the other part deals with <italic>contextual factors</italic>. The former is further organized into the components <italic>body functions</italic> and <italic>body structures</italic> that are the ones considered in the design of the ontological model. The body is an integral part of human functioning and the bio-psychosocial model considers it in interaction with other components. Body functions are thus the physiological aspects of body systems, while structures are the anatomical support (e.g., sight is a function while the eye is a structure). Several ICF concepts describe the functioning of mental faculties and have been used to define user profiles. The concept <monospace>OrientationFunctioning</monospace> characterizes the functioning of general mental functions of known and ascertaining one&#x2019;s relation to time, to place, to self, objects, and space. The concept <monospace>AttentionFunctioning</monospace> characterizes specific mental functions focusing on external stimulus or internal experience for the required period of time. The concept <monospace>MemoryFunctioning</monospace> characterizes specific mental functions of encoding, storing information, and retrieving it as needed.</p>
<p>Other ICF concepts have been instead used to characterize the <italic>interaction capabilities</italic> of a person and thus identify <italic>interaction preferences</italic> determining the way a robot should interact with a person while administrating exercises. The concept <monospace>SeeingFunctioning</monospace> models specific functions related to seeing the presence of light and sensing the form, the size, shape, and color of visual stimuli. The concept <monospace>HearingFunctioning</monospace> models sensory functions related to sensing the presence of sounds and discriminating the location, pitch, loudness, and quality of sounds.</p>
</sec>
</sec>
<sec id="s4">
<title>4 Knowledge Reasoning for Personalization</title>
<p>Information gathered during the profiling phase and its representation based on ICF allow an assistive robot to autonomously reason about the intervention plan that &#x201c;best fit&#x201d; the specific needs of a person (e.g., <italic>what</italic> kind of cognitive exercise a person needs) and the way such actions should be executed (e.g., <italic>how</italic> a robot should interact with a person to effectively administrate cognitive exercises).</p>
<sec id="s4-1">
<title>4.1 From Impairments to Intervention Actions</title>
<p>Following ICF classification, the ontological model defines a number of concepts that represent different <monospace>FunctioningQuality</monospace> of a person. As mentioned in <xref ref-type="sec" rid="s2-2">Section 2.2</xref>, we rely on DOLCE as foundational ontology. Then, the ICF qualities are modeled as subclasses of <monospace>DOLCE:Quality</monospace> and are associated to entities of type <monospace>DOLCE:Person</monospace>. The concept <monospace>Profile</monospace> defines a descriptive context of the overall functioning qualities of a particular person. It represents the outcome of a profiling phase and consists of a number of <monospace>Measurements</monospace>. Each measurement associates the evaluation of a functioning quality to a <italic>value</italic> representing the assigned ICF score (i.e., the outcome of the evaluation). Knowledge-reasoning processes analyze such measurements (i.e., a user profile) to autonomously <italic>infer</italic> the physical or cognitive impairments characterizing the functioning state of a person. <xref ref-type="disp-formula" rid="e1">Eq. 1</xref> in the following section shows a general inference rule used to detect such impairments.<disp-formula id="e1">
<mml:math id="m1">
<mml:mtable class="array">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mo>&#x2200;</mml:mo>
<mml:mtext>x,&#x2009;y,&#x2009;w.</mml:mtext>
<mml:mo>&#x2203;</mml:mo>
<mml:mtext>z.</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
</mml:mrow>
<mml:mtext>Measurement</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
</mml:mrow>
<mml:mtext>x</mml:mtext>
<mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>measures</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>x,&#x2009;y</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>FunctioningQuality</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>y</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>hasOutcome</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>y,&#x2009;w</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>FunctioningRegion</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>w</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>greaterThan</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>hasICFscore</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>w</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mtext>,&#x2009;0</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left"/>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mtext>lowerThan</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>hasICFscore</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>w</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mtext>,&#x2009;5</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2192;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mtext>Impairment</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>z</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mtext>concerns</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mtext>z,&#x2009;y</mml:mtext>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2227;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mtext>satisfies</mml:mtext>
<mml:mrow>
<mml:mo>(</mml:mo>
</mml:mrow>
<mml:mtext>z,&#x2009;x</mml:mtext>
<mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(1)</label>
</disp-formula>
</p>
<p>In addition to this knowledge, the ontology characterizes <italic>properties</italic> of intervention plans. A robot can indeed be endowed with a number of &#x201c;programs&#x201d; implementing known tests suitable to evaluate/stimulate different functioning qualities, for example, the <italic>Free and Cued Selective Reminding Test</italic> for episodic long-term memory assessment or the <italic>Trailing Making Test form A</italic> for selective attention assessment.</p>
<p>Taking inspiration from some works in manufacturing that define the concept of function (<xref ref-type="bibr" rid="B9">Borgo and Leit&#xe3;o, 2004</xref>; <xref ref-type="bibr" rid="B7">Borgo et al., 2009</xref>), we characterized intervention actions of a robot in terms of their <italic>effects</italic> on the functioning qualities of a person. The defined semantics characterizes these &#x201c;programs&#x201d; according to the functioning qualities they address. For example, an interactive program implementing the <italic>Free and Cued Selective Reminding Test</italic> is classified as an intervention action whose effects can &#x201c;improve&#x201d; (i.e., <italic>has positive effects on</italic>) the functioning quality <monospace>MemoryFunctioning</monospace>. The obtained ontological model fosters an integrated representation of knowledge about the health state of a person and intervention capabilities of a robot. Knowledge processing mechanisms then use this integrated knowledge to infer a set of actions suited to address the inferred impairments of a patient (<xref ref-type="bibr" rid="B50">Umbrico et al., 2020b</xref>). For example, if <monospace>MemoryFunctioning</monospace> is inferred as <italic>soft impairment</italic> (value 1) and <monospace>AttentionFunctioning</monospace> as <italic>no impairment</italic> (value 0) only actions implementing the <italic>Free and Cued Selective Reminding Test</italic> (or other similar tests) are inferred as suitable to a patient.</p>
</sec>
<sec id="s4-2">
<title>4.2 Reasoning on Interaction Preferences</title>
<p>A <monospace>Profile</monospace> encapsulates a rich set of information that can be analyzed to infer interaction capabilities of a person and define robot interaction preferences accordingly. If the analysis of a profile infers a <italic>medium impairment</italic> of the quality <monospace>HearingFunctioning</monospace> then, the interactions between the patient and the robot should rely mainly on visual and textual messages rather than voice and audio. In case that audio interactions cannot be avoided (e.g., recorded audio instructions and recommendations or video conferences) it would be possible to properly set the sound level of the robot in order to help the assisted person as much as possible.</p>
<p>Knowledge-reasoning mechanisms thus infer also <italic>how</italic> intervention plans should be carried out by the robot in order to effectively interact with the considered patient. In this regard, we have defined four interaction parameters characterizing the execution of robot actions: 1) <italic>sound level</italic>, 2) <italic>subtitle</italic>, 3) <italic>font size</italic>, and 4) <italic>explanation</italic>. The <italic>sound level</italic> is an enumeration parameter with values \{ <italic>none</italic>, <italic>regular</italic>, <italic>high</italic>\} specifying the volume of audio communications and messages from the robot to the patient. Patients with soft or medium hearing impairment represented as <monospace>HearingFunctioning</monospace> would need a high sound level, while audio would be completely excluded for persons with serious impairments in order to use different interaction modalities. The <italic>subtitle</italic> is an enumeration parameter with values \{ <italic>none</italic>, <italic>yes</italic>, <italic>no</italic>\} specifying the need of supporting audio messages through text. Patients with no, soft, or medium impairment of <monospace>SeeingFunctioning</monospace> and medium or serious impairment of <monospace>HearingFunctioning</monospace> would need subtitles to better understand instructions and messages from the robot.</p>
<p>The <italic>font size</italic> is a binary parameter with values \{ <italic>regular</italic>, <italic>large</italic>\} specifying the size of the font of text messages and subtitles, if used. Patients with medium impairment of <monospace>SeeingFunctioning</monospace> would need <italic>large</italic> fonts in text messages in order to better read their content. Finally, <italic>explanation</italic> is a binary parameter (i.e., <italic>yes</italic> or <italic>non</italic>) specifying the need of explaining an exercise to a patient before its execution. Such instructions would be particularly needed for patients with impaired <monospace>MemoryFunctioning</monospace> or <monospace>OrientationFunctioning</monospace>. Clearly, the way such explanations are carried out complies with the interaction parameters described earlier.</p>
</sec>
</sec>
<sec id="s5">
<title>5 Feasibility Assessment</title>
<p>To demonstrate the feasibility of our cycle-based approach, we considered ASTRO, an assistive robot equipped with several sensors (i.e. laser, RGB-D camera, microphones, speakers, and force sensors) and two tablets (<xref ref-type="bibr" rid="B21">Fiorini et al., 2020b</xref>) (see <xref ref-type="fig" rid="F3">Figure 3</xref>). We deployed on ASTRO the architecture proposed in <xref ref-type="sec" rid="s2">Section 2</xref> augmenting its capabilities with the cognitive functionalities presented in <xref ref-type="sec" rid="s2-2">Section 2.2</xref> in order to implement the human-in-the-loop cycle proposed in <xref ref-type="sec" rid="s2-1">Section 2.1</xref>. Then, we demonstrated the feasibility of such robotic functionalities to support a clinician while responding to specific needs of some older adult users. In particular, eight elderly persons, 3 males and 5 females (avg. age 82.25 years old, range 72&#x2013;91&#xa0;years old), were enrolled for this study. All the recruited subjects live in a residential facility in the same geographical region<xref ref-type="fn" rid="fn5">
<sup>5</sup>
</xref>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Experimental set-up: ASTRO robot is administering the MMSE test to the subject.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g003.tif"/>
</fig>
<p>The ASTRO robot equipped with the new proposed functionalities was tested to demonstrate its ability to support a clinician in realizing the functionalities to 1) represent user profiles with respect to ICF (<italic>who you are</italic>), 2) synthesize personalized intervention plans (<italic>what you need</italic>) considering a set of 10 cognitive tests typically used to further investigate and evaluate the cognitive state of a person, and 3) select the appropriate interaction modalities of the robot (<italic>how you like it</italic>) among the ones supported by the robotic platform. An off-line analysis and discussion of the results with the clinicians are reported at the end of the section to emphasize the importance of the <italic>human-in-the-loop</italic> approach (see again <xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<sec id="s5-1">
<title>5.1 Demonstrating User Profiling and Profile Representation</title>
<p>As shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, user profiling is necessary at the beginning of each intervention cycle to set/update robot knowledge about the health state of an assisted person. The outcome of this step is a <italic>profile</italic> describing the cognitive state of a person with respect to the developed ontological model. A correct acquisition of this information is crucial for the efficacy of the synthesized intervention plan. The robot indeed relies on the <italic>user profile</italic> to <italic>infer</italic> the set of cognitive tests (i.e., <italic>stimuli</italic>) that are suitable for the considered user and then <italic>decide personalized intervention plan</italic> (i.e., the further assessment).</p>
<p>The user profile is generated according to the scoring obtained through the administration of the Mini-Mental State Examination (MMSE). The MMSE represents the most used screening test for cognitive status, and it is adopted worldwide by clinicians to briefly assess persons with the suspect of dementia. It encompasses 21 items which cover tests of orientation, recall, registration, naming, comprehension, calculation and attention, writing, repetition, drawing, and reading (<xref ref-type="bibr" rid="B23">Folstein et al., 1983</xref>). The cognitive status level is obtained by summing the score of the individual items and normalizing it based on the educational level and the age of the patient. Decreasing scores of repeated tests highlight deterioration in cognition. In particular, the participants were asked to undergo the MMSE administrated by ASTRO. The assessment was performed with a Wizard-of-Oz (WOz) method. A clinician guides the robot through the examination phases using a dedicated web interface. The patient is not aware of the presence of the clinician and he/she directly interacts with the robot. The web interface allows the clinician to select the appropriate MMSE tests to perform. The tests require different interaction modalities between the user and the robot, for example, asking questions to the user or showing images to the user through the front tablet. The clinician can ask the robot to repeat the test if necessary.</p>
<p>The caregiver stores the results of the assessment into the robot knowledge base through the same dedicated technical interface. The overall score of the MMSE is automatically processed by the robot at the end of the session, by parsing the annotated answers. The robot automatically correlates MMSE items with (relevant) ICF functions the robot uses to represent the cognitive state of a patient. According to this correlation, the robot then builds a user profile by mapping received MMSE scores to ICF scores. <xref ref-type="table" rid="T1">Table 1</xref> shows how each ICF function can be described by one or multiple MMSE categories. This is an original mapping performed by a clinician between MMSE scores and ICF for generating a user profile representation. For instance, the ICF concept <monospace>MemoryFunctioning</monospace> is defined as specific mental functions of registering and sorting information and retrieving it as needed. This ICF function can be described by the MMSE items which cover the recalling, counting, and spelling tests. Based on the same similarity approach, the overall mapping shown in <xref ref-type="table" rid="T1">Table 1</xref> is obtained. In order to convert the MMSE scoring of each category into the measured level of impairment of the ICF profiling, a proportional method is used. The current MMSE score (i.e., the number of tasks correctly performed) in one category is compared to the maximum MMSE score achievable in the same category and then converted into the ICF scoring. This mapping is based on an inverted scale of values. For example, if the patient correctly accomplished the requested task of MMSE items (high values of MMSE), he/she gets a lower ICF score (<italic>no impairment</italic>). If the patient partially accomplished the task, an intermediate value of the ICF score is assigned (<italic>mild impairment</italic>). If the patient did not accomplish the task, a higher value of ICF score is attributed (<italic>hard impairment</italic>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Mapping MMSE profiling to ICF profiling.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">ICF function</th>
<th align="center">MMSE item</th>
<th align="center">Maximum score (MMSE)</th>
<th align="center">Total score (MMSE)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Orientation (ORI)</td>
<td>Orientation in time</td>
<td align="char" char=".">5</td>
<td align="char" char=".">10</td>
</tr>
<tr>
<td align="left"/>
<td>Orientation in space</td>
<td align="char" char=".">5</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Attention (ATT)</td>
<td>Counting</td>
<td align="char" char=".">5</td>
<td align="char" char=".">5</td>
</tr>
<tr>
<td align="left">Memory (MEM)</td>
<td>Recalling</td>
<td align="char" char=".">3</td>
<td align="char" char=".">13</td>
</tr>
<tr>
<td align="left"/>
<td>Counting</td>
<td align="char" char=".">5</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td>Spelling</td>
<td align="char" char=".">5</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Perceptual (PER)</td>
<td>Robot&#x2019;s perception</td>
<td align="center">-</td>
<td align="center">-</td>
</tr>
<tr>
<td align="left">High level (HIL)</td>
<td>Recalling</td>
<td align="char" char=".">3</td>
<td align="char" char=".">11</td>
</tr>
<tr>
<td align="left"/>
<td>Counting</td>
<td align="char" char=".">5</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td>Comprehension</td>
<td align="char" char=".">3</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td>Coherent interaction</td>
<td align="center">-</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Language (LAN)</td>
<td>Naming</td>
<td align="char" char=".">2</td>
<td align="char" char=".">4</td>
</tr>
<tr>
<td align="left"/>
<td>Repetition</td>
<td align="char" char=".">1</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td>Writing</td>
<td align="char" char=".">1</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Calculation (CAL)</td>
<td>Counting</td>
<td align="char" char=".">5</td>
<td align="char" char=".">5</td>
</tr>
<tr>
<td align="left">Communication (COM)</td>
<td>Coherent interaction</td>
<td align="center">-</td>
<td align="char" char=".">1</td>
</tr>
<tr>
<td align="left"/>
<td>Incoherent interaction</td>
<td align="center">-</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Speaking (SPE)</td>
<td>Naming</td>
<td align="char" char=".">2</td>
<td align="char" char=".">3</td>
</tr>
<tr>
<td align="left"/>
<td>Reception</td>
<td align="char" char=".">1</td>
<td align="left"/>
</tr>
<tr>
<td align="left">Writing (WRI)</td>
<td>Writing</td>
<td align="char" char=".">1</td>
<td align="char" char=".">1</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The video and audio of the administration sessions were recorded by the robot&#x2019;s frontal camera. The recorded videos were off-line analyzed by the clinician to extract the quality of interaction (e.g., the number of robot&#x2019;s repetition) with the robot and additional evidence of the cognitive decline (e.g., coherent and incoherent interaction) (<xref ref-type="bibr" rid="B43">Sorrentino et al., 2021</xref>). <xref ref-type="table" rid="T1">Table 1</xref> reports the complete list of extracted parameters. These data are then merged with the individual MMSE score returned by the robot and manually mapped into the ICF scores, following the proposed mapping. <xref ref-type="fig" rid="F4">Figure 4</xref> reports the final results for each user. Data were then analyzed with the proposed framework and discussed with the clinician to corroborate the analysis. The clinician found an adequate representation of the profiles and validated all of them.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Final ICF scoring of users.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g004.tif"/>
</fig>
</sec>
<sec id="s5-2">
<title>5.2 Demonstrating Intervention Personalization</title>
<p>The subsequent considered actions consist in the selection of a number of cognitive tests to administer, regarding the <italic>inferred</italic> impaired functioning qualities of a patient. The assistive capabilities of such actions depend on the features of the associated cognitive tests and thus on the functioning qualities stimulated by them. According to the inferred impairments and the recommendations generated by the developed knowledge processing mechanisms, a number of these tests are selected for administration.</p>
<p>We have considered a total number of 10 cognitive tests that are typically used to further investigate and evaluate the cognitive state of a person. The <italic>Free and Cued Selective Reminding Test</italic>, the <italic>Rey&#x2019;s Figure Test</italic>, the <italic>Forward Digit Test</italic>, and the <italic>Backward Digit Span Test</italic> evaluate and stimulate the functioning quality <monospace>MemoryFunctioning</monospace>. The <italic>Trailing Making Test form A</italic> and the <italic>Trailing Making Test form B</italic> evaluate and stimulate the functioning quality <monospace>AttentionFunctioning</monospace>. The <italic>Stroop Test</italic> evaluates and stimulates the functioning quality <monospace>OrientationFunctioning</monospace>. The <italic>Boston Naming Test 40-item</italic>, the <italic>Animals Test</italic>, and the <italic>Denomination Test</italic> evaluate and stimulate the functioning quality <monospace>LanguageFunctioning</monospace>.</p>
<p>The experiments have been performed with the objective to demonstrate the capability of combining this knowledge with the ICF scores of <xref ref-type="fig" rid="F4">Figure 4</xref> (i.e., user profiles) to determine actions that fit the cognitive status of the profiled users and thus achieve <italic>personalization</italic>. <xref ref-type="fig" rid="F5">Figure 5</xref> shows results of the experiments. It specifically shows a heat-map with the <italic>ranking values</italic> of known cognitive tests (i.e., actions) for each user profile. The value expresses the <italic>significance</italic> of a specific test/action considering the cognitive impairments inferred for a particular user profile. The higher the computed ranking value the more is the relevance of a particular action for the corresponding user. <xref ref-type="fig" rid="F6">Figure 6</xref> then shows aggregated numbers pointing out the total level of impairments of the considered users in <xref ref-type="fig" rid="F6">Figure 6A</xref> and the inferred impact of each action in <xref ref-type="fig" rid="F6">Figure 6B</xref>. These figures clearly show the most compromised users (i.e., the users with the highest level of impairment) and the most useful actions (i.e., the actions that address the higher number and most significant impairments of users) according to robot knowledge.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Ranking intervention actions for different profiles (action enumeration: 1) Denomination test. 2) Forward digit test. 3) Free and Cued Selective Reminding Test. 4) Stroop test. 5) Animals test. 6) Backward digit span test. 7) Rey&#x2019;s figure test. 8) Trailing Making Test form B. 9) Trailing Making Test form A. 10) Boston Naming Test 40-items.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Charts show <bold>(A)</bold> inferred (cumulative) impairment state of patients and; <bold>(B)</bold> inferred impact of known actions on patients.</p>
</caption>
<graphic xlink:href="frobt-09-883814-g006.tif"/>
</fig>
<p>The higher the ranking value the higher the &#x201c;seriousness&#x201d; of the associated impairments and consequently the significance of a test with respect to the cognitive state of a user. An example is <italic>profile 8</italic> whose cognitive state is characterized by several impairments as can be seen from the ICF scores resulting from the outcome of MMSE in <xref ref-type="fig" rid="F4">Figure 4</xref>. Consequently, as shown in <xref ref-type="fig" rid="F5">Figure 5</xref>, many of the considered tests have been computed as relevant to the cognitive state of this patient. Higher values have been computed for tests addressing impaired qualities, for example, the <italic>Forward Digit Test</italic> addressing <monospace>MemoryFunctioning</monospace> (medium impairment in <xref ref-type="fig" rid="F4">Figure 4</xref>) or the <italic>Trailing Making Test form A</italic> addressing <monospace>AttentionFunctioning</monospace> (serious impairment in <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<p>Vice versa low ranking values have been computed for not so serious impairments. An example is the <italic>user profile 5</italic> whose cognitive state is characterized by few soft impairments (see again the ICF scores resulting from the outcome of MMSE in <xref ref-type="fig" rid="F4">Figure 4</xref>). In this case, a &#x201c;minimum&#x201d; ranking value has been computed only for the <italic>Denomination Test</italic>, the <italic>Stroop Test</italic>, and the <italic>Boston Naming Test 40-item</italic> that address the soft impaired functioning qualities <monospace>OrientationFunctioning</monospace> and <monospace>LanguageFunctioning</monospace>. The outcome of the knowledge-based reasoning mechanisms has been assessed by an expert clinician. The results of this validation are reported in <xref ref-type="sec" rid="s5-4">Section 5.4</xref>.</p>
</sec>
<sec id="s5-3">
<title>5.3 Demonstrating Interaction Personalization</title>
<p>Once a personalized set of interventions has been defined, user profiles are further evaluated to decide how such actions should be performed (i.e., <italic>interaction preferences</italic>). This reasoning step relies on a number of inference rules that link ICF scores to the interaction preferences introduced in <xref ref-type="sec" rid="s4-2">Section 4.2</xref>. We have considered ICF scores concerning <monospace>PerceptualFunction</monospace> (PER) and <monospace>MemoryFunctioning</monospace> (MEM). MEM is linked to the interaction preference <italic>explanation</italic> as described in <xref ref-type="sec" rid="s4-2">Section 4.2</xref>. PER is linked to the interaction preferences <italic>sound level</italic>, <italic>font size</italic>, and <italic>subtitles</italic>. As shown in <xref ref-type="table" rid="T1">Table 1</xref>, a clinician assigns a score to PER by evaluating the number of robot&#x2019;s repetitions. Given the lack of a precise evaluation of hearing and seeing capabilities of users, we have used PER scores to implicitly <italic>measure</italic> the functioning qualities <monospace>HearingFunctioning</monospace> and <monospace>SeeingFuncitoning</monospace> and infer the related interaction preferences as described in <xref ref-type="sec" rid="s4-2">Section 4.2</xref>.</p>
<p>
<xref ref-type="table" rid="T2">Table 2</xref> shows the interaction preferences inferred for the considered users. The results show the capability of the developed knowledge-reasoning mechanisms to contextualize the execution of intervention actions (and thus robot behaviors) by defining a number of coherent interaction parameters. Users with soft or no impairment conditions of <monospace>PerceptualFunctioning</monospace> would not require particular interaction preferences for the execution of the associated intervention actions.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Inferred interaction parameters for different profiles.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">User - Parameter</th>
<th align="center">Sound level</th>
<th align="center">Font size</th>
<th align="center">Subtitle</th>
<th align="center">Explanation</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Profile 1</td>
<td>High</td>
<td>Regular</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td align="left">Profile 2</td>
<td>High</td>
<td>Regular</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td align="left">Profile 3</td>
<td>High</td>
<td>Regular</td>
<td>Yes</td>
<td>None</td>
</tr>
<tr>
<td align="left">Profile 4</td>
<td>High</td>
<td>Large</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td align="left">Profile 5</td>
<td>Regular</td>
<td>Regular</td>
<td>None</td>
<td>None</td>
</tr>
<tr>
<td align="left">Profile 6</td>
<td>High</td>
<td>Regular</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td align="left">Profile 7</td>
<td>High</td>
<td>Regular</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td align="left">Profile 8</td>
<td>High</td>
<td>Large</td>
<td>Yes</td>
<td>Yes</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>An example, is <italic>profile 5</italic> that has no impairment of <monospace>PerceptualFunctioning</monospace> (PER &#x3d; 0 in <xref ref-type="fig" rid="F4">Figure 4</xref>) and is associated to the interaction parameters &#x27e8; <italic>regular</italic>, <italic>regular</italic>, <italic>none</italic>, and <italic>none</italic> &#x27e9; in <xref ref-type="table" rid="T2">Table 2</xref>. Vice versa users with medium or serious impairment conditions of <monospace>PerceptualFunctioning</monospace> would require a specific configuration of robot behaviors for the execution of the associated intervention actions. Examples are <italic>profile 4</italic> and <italic>profile 8</italic> that have serious impairments of <monospace>PerceptualFunctioning</monospace> (PER &#x3d; 4 in <xref ref-type="fig" rid="F4">Figure 4</xref>) and are both associated to the interaction parameters &#x27e8; <italic>high</italic>, <italic>large</italic>, <italic>yes</italic>, and <italic>yes</italic> &#x27e9; in <xref ref-type="table" rid="T2">Table 2</xref>. These experiments show the feasibility of the developed knowledge-reasoning mechanisms in personalizing and adapting robot assistive behaviors to the health needs of different patients.</p>
<p>It is worth noting that the use of domain-dependent rules like the ones defined for PER would not limit the generality of the developed knowledge-reasoning approach. Rather, this situation shows how developed reasoning behaviors can be easily <italic>tailored</italic> to specific needs and features of different assistive scenarios.</p>
</sec>
<sec id="s5-4">
<title>5.4 Off-Line Result Discussion With the Clinician</title>
<p>The system identifies the subjects with a higher level of impairment, which needs more attention during a further comprehensive neuropsychological testing. Therefore, based on the stored profiles, the system suggests a broader set of further cognitive tests to the users with a higher average of cognitive issues, to provide more informative support for the clinician. We are referring particularly to subject numbers 1, 2, 4, 6, 7, and 8 that, respectively, report a raw score of 16, 17, 12, 18, 15, and 17 out 30 on MMSE. For example, subjects 1, 6, and 8 were strongly suggested to undergo the similar set of tests. In particular, those subjects, which showed a mild to medium-cognitive impairment, were asked to undergo tests related to executive functions and working memory (forward and backward digit span, and Trailing Making Test form A and B). Such cognitive impairments represent a crucial risk factor for the transition to mild cognitive impairment to a full-blown dementia syndrome. In addition, the subject number 8, which showed a more severe impairment related to languages, was suggested to undergo also a denomination test and the Boston Naming Test, both tests for language domain. Such suggestions were not mentioned in the other two subjects. Moreover, the memory function, tested by the Free and Cued Selective Reminding Test, was suggested to be studied in almost all the subjects except for the not impaired subject numbers 3 and 5.</p>
<p>Interestingly, the system reports less need for further test administration for subject numbers 4 and 7, respectively, the two subjects with worst cognitive performances. This is an expected performance as the implemented knowledge processing mechanisms assign ranks to intervention actions in a &#x201c;non-linear way.&#x201d; In particular, lower ranks are assigned to tests addressing too compromised functions as they are supposed to be managed separately. That may represent a counter-intuitive data, but a common clinic routine. Too much compromised subjects&#x2019; condition makes the clinical picture already clear and further analysis fruitless. Therefore, clinical practice is a balance between the need to accomplish an explicit vision of the case, and the economy of time and resources. On the other hand, regarding the subjects with a non-impaired neuro-cognitive profile (i.e., numbers 3 and 5. Score of 27 and 29 out 30), the system proposed fewer tests as informative. In conclusion, such results are aligned with standard clinical practice, thus the assistive robot could represent a useful tool for assessment process refinement.</p>
</sec>
</sec>
<sec id="s6">
<title>6 Conclusion and Future Works</title>
<p>This study presents an original cyclic procedure to support healthcare assistance with robots endowed with a novel integration of AI-based technologies supporting <italic>knowledge representation and reasoning</italic> and <italic>decision making and problem solving</italic>, two crucial capabilities to achieve <italic>personalization</italic> and <italic>adaptation</italic> of assistive behaviors. A <italic>human-in-the-loop</italic> approach is pursued to define a process in which a clinician is involved into the decisional process and an interleave of cognitive state evaluation and test administration allows her to maintain the control over the decisions made by a robot and its resulting assistive behaviors. The approach was demonstrated to be feasible and effective in a realistic scenario with eight participants. A clinician supervised the procedure evaluating the robot&#x2019;s behavior.</p>
<p>This study presented a first concrete result of a research initiative whose long-term goal is to foster the development of intelligent assistive robots capable of supporting healthcare professionals in dealing with larger number of patients. Indeed, despite the small sample size, the results suggest how the robot&#x2019;s interaction parameters can be fine-tuned to the residual abilities and the cognitive profile of the person who it is interacting with. In this sense, a better understanding of patients&#x2019; social, cognitive, and biological aspects will allow assistive robots to represent such information into their cognitive system, and use it to autonomously take more initiative to support both clinicians and patients. According to the feedback obtained during the off-line discussion with the clinicians, the <italic>decision making</italic> module can suggest/schedule an appropriate personalized care plan. This finding can suggest that the proposed ontologies based on ICF score can be generalized to be applied to social robots to improve and personalize the human&#x2013;robot interaction as well as to provide a care plan to the caregiver.</p>
<p>The future work plan aims at addressing two main aspects of overcoming current limitations. First, from a technical perspective, it aims to investigate user assessment and profile-building functions (e.g., via machine learning) to better identify user needs and to extend the set of assistive services supported by the cognitive architecture to enlarge the application opportunities. Second, from the user perspective, future work should consider larger involvement of participants&#x2019; cohort so as to perform a systematic evaluation to assess its concrete effectiveness, usability, and acceptance in real contexts.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material; further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s8">
<title>Ethics Statement</title>
<p>Ethical review and approval was not required for the study on human participants in accordance with the local legislation and institutional requirements. The patients/participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s9">
<title>Author Contributions</title>
<p>All authors contributed to the study conception and design. Material preparation, data collection, and analysis were performed by AS, GM, and AU. The first draft of the manuscript was written by AU and AO and all authors commented on previous versions of the manuscript. All authors read and approved the final manuscript.</p>
</sec>
<sec id="s10">
<title>Funding</title>
<p>This research is partially supported by Italian M.I.U.R. under project "SI-ROBOTICS: SocIal ROBOTICS for active and healthy ageing" (PON Ricerca e Innovazione 2014-2020 - G.A. ARS01_01120) and by Regione Toscana under project "Cloudia" (POR FESR 2014-2020). CNR authors are also supported by the EU project "TAILOR: Foundations of Trustworthy AI - Integrating Learning, Optimisation and Reasoning" (G.A. 952215).</p>
</sec>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors, and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn id="fn1">
<label>1</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://www.who.int/standards/classifications">https://www.who.int/standards/classifications</ext-link>
</p>
</fn>
<fn id="fn2">
<label>2</label>
<p>
<ext-link ext-link-type="uri" xlink:href="http://www.loa.istc.cnr.it/dolce/overview.html">http://www.loa.istc.cnr.it/dolce/overview.html</ext-link>
</p>
</fn>
<fn id="fn3">
<label>3</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://protege.stanford.edu">https://protege.stanford.edu</ext-link>
</p>
</fn>
<fn id="fn4">
<label>4</label>
<p>
<ext-link ext-link-type="uri" xlink:href="https://jena.apache.org/index.html">https://jena.apache.org/index.html</ext-link>
</p>
</fn>
<fn id="fn5">
<label>5</label>
<p>All procedures were in accordance with the 1964 Helsinki declaration and its later amendments or comparable ethical standards</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Anderson</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Matessa</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lebiere</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>ACT-R: A Theory of Higher Level Cognition and its Relation to Visual Attention</article-title>. <source>Hum.-Comput. Interact</source>. <pub-id pub-id-type="doi">10.1207/s15327051hci1204_5</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Andriella</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Torras</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Abdelnour</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Aleny&#xe0;</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Introducing CARESSER: A Framework for <italic>In Situ</italic> Learning Robot Social Assistance from Expert Knowledge and Demonstrations</article-title>. <source>User Model. User-Adapted Interact.</source> <pub-id pub-id-type="doi">10.1007/s11257-021-09316-5</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Angelini</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Mugellini</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Khaled</surname>
<given-names>O. A.</given-names>
</name>
<name>
<surname>R&#xf6;cke</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Guye</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Porcelli</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). &#x201c;<article-title>The Nestore E-Coach: Accompanying Older Adults through a Personalized Pathway to Wellbeing</article-title>,&#x201d; in <conf-name>PETRA &#x2019;19: Proceedings of the 12th ACM International Conference on PErvasive Technologies Related to Assistive Environments</conf-name> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>620</fpage>&#x2013;<lpage>628</lpage>. </citation>
</ref>
<ref id="B4">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Antoniou</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Harmelen</surname>
<given-names>F. v.</given-names>
</name>
</person-group> (<year>2009</year>). <source>Web Ontology Language: OWL</source>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>, <fpage>91</fpage>&#x2013;<lpage>110</lpage>. <comment>chap. Web Ontology Language: OWL</comment>. <pub-id pub-id-type="doi">10.1007/978-3-540-92673-3_4</pub-id>
<article-title>Web Ontology Language: OWL.</article-title> </citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Awaad</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Kraetzschmar</surname>
<given-names>G. K.</given-names>
</name>
<name>
<surname>Hertzberg</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>The Role of Functional Affordances in Socializing Robots</article-title>. <source>Int J Soc Robotics</source> <volume>7</volume>, <fpage>421</fpage>&#x2013;<lpage>438</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-015-0281-3</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bertolini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Salvini</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Pagliai</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Morachioli</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Acerbi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Trieste</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>On Robots and Insurance</article-title>. <source>Int J Soc Robotics</source> <volume>8</volume>, <fpage>381</fpage>&#x2013;<lpage>391</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-016-0345-z</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Borgo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Carrara</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Garbacz</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Vermaas</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>A Formal Ontological Perspective on the Behaviors and Functions of Technical Artifacts</article-title>. <source>Aiedam</source> <volume>23</volume>, <fpage>3</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1017/s0890060409000079</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Borgo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Knowledge-based Adaptive Agents for Manufacturing Domains</article-title>. <source>Eng. Comput.</source> <volume>35</volume>, <fpage>755</fpage>&#x2013;<lpage>779</lpage>. <pub-id pub-id-type="doi">10.1007/s00366-018-0630-6</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Borgo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Leit&#xe3;o</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2004</year>). <source>The Role of Foundational Ontologies in Manufacturing Domain Applications</source>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>, <fpage>670</fpage>&#x2013;<lpage>688</lpage>. <comment>chap. The Role of Foundational Ontologies in Manufacturing Domain Applications</comment>. <pub-id pub-id-type="doi">10.1007/978-3-540-30468-5_43</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bruno</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Recchiuto</surname>
<given-names>C. T.</given-names>
</name>
<name>
<surname>Papadopoulos</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Saffiotti</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Koulouglioti</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Menicatti</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Knowledge Representation for Culturally Competent Personal Robots: Requirements, Design Principles, Implementation, and Assessment</article-title>. <source>Int J Soc Robotics</source> <volume>11</volume>, <fpage>515</fpage>&#x2013;<lpage>538</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00519-w</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Casey</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Felzmann</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pegman</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kouroupetroglou</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Murphy</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Koumpis</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). &#x201c;<article-title>What People with Dementia Want: Designing MARIO an Acceptable Robot Companion</article-title>,&#x201d; in <source>Computers Helping People with Special Needs</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Miesenberger</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>B&#xfc;hler</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Penaz</surname>
<given-names>P.</given-names>
</name>
</person-group> (<publisher-loc>Berlin, Germany</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>318</fpage>&#x2013;<lpage>325</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-41264-1_44</pub-id> </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cavallo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Esposito</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Limosani</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Manzi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bevilacqua</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Felici</surname>
<given-names>E.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Acceptance of Robot-Era System: Results of Robotic Services in Smart Environments with Older Adults</article-title>. <source>J. Med. Int. Res</source>. </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fracasso</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Turno</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>User Needs and Preferences on AAL Systems that Support Older Adults and Their Carers</article-title>. <source>Ais</source> <volume>10</volume>, <fpage>49</fpage>&#x2013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.3233/ais-170471</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Coradeschi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Coraci</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Gonzalez</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Karlsson</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). &#x201c;<article-title>GiraffPlus: Combining Social Interaction and Long Term Monitoring for Promoting Independent Living</article-title>,&#x201d; in <conf-name>The 6th International Conference on Human System Interactions (HSI)</conf-name>, <fpage>578</fpage>&#x2013;<lpage>585</lpage>. <pub-id pub-id-type="doi">10.1109/hsi.2013.6577883</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>De Benedictis</surname>
<given-names>R. D.</given-names>
</name>
<name>
<surname>Fracasso</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Ai and Robotics to Help Older Adults: Revisiting Projects in Search of Lessons Learned</article-title>. <source>Paladyn, J. Behav. Robotics</source> <volume>12</volume>, <fpage>356</fpage>&#x2013;<lpage>378</lpage>. <pub-id pub-id-type="doi">10.1515/pjbr-2021-0025</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Desideri</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Ottaviani</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Malavasi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>di Marzio</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Bonifacci</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Emotional Processes in Human-Robot Interaction during Brief Cognitive Testing</article-title>. <source>Comput. Hum. Behav.</source> <volume>90</volume>, <fpage>331</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2018.08.013</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Di Nuovo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Varrasi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lucas</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Conti</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>McNamara</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Soranzo</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Assessment of Cognitive Skills via Human-Robot Interaction and Cloud Computing</article-title>. <source>J. Bionic Eng.</source> <volume>16</volume>, <fpage>526</fpage>&#x2013;<lpage>539</lpage>. <pub-id pub-id-type="doi">10.1007/s42235-019-0043-2</pub-id> </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>D&#x2019;Onofrio</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sancarlo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ricciardi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ruan</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Giuliani</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Cognitive Stimulation and Information Communication Technologies (ICT) in Alzheimer&#x2019;s Diseases: A Systematic Review</article-title>. <source>Int. J. Med. Biol. Front.</source> <volume>22</volume>, <fpage>97</fpage>. </citation>
</ref>
<ref id="B19">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Filippeschi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Peppoloni</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kostavelis</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Gerlowska</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ruffaldi</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Giakoumis</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). &#x201c;<article-title>Towards Skills Evaluation of Elderly for Human-Robot Interaction</article-title>,&#x201d; in <conf-name>2018 27th IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)</conf-name>, <fpage>886</fpage>&#x2013;<lpage>892</lpage>. <pub-id pub-id-type="doi">10.1109/roman.2018.8525843</pub-id> </citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fiorini</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Mancioppi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Semeraro</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Fujita</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Cavallo</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Unsupervised Emotional State Classification through Physiological Parameters for Social Robotics Applications</article-title>. <source>Knowledge-Based Syst.</source> <volume>190</volume>, <fpage>105217</fpage>. <pub-id pub-id-type="doi">10.1016/j.knosys.2019.105217</pub-id> </citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fiorini</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Tabeau</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>D&#x2019;Onofrio</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Coviello</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>De Mul</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sancarlo</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Co-creation of an Assistive Robot for Independent Living: Lessons Learned on Robot Design</article-title>. <source>Int. J. Interact. Des. Manuf.</source> <volume>14</volume>, <fpage>491</fpage>&#x2013;<lpage>502</lpage>. <pub-id pub-id-type="doi">10.1007/s12008-019-00641-z</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fiorini</surname>
<given-names>S. R.</given-names>
</name>
<name>
<surname>Bermejo-Alonso</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Goncalves</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Pignaton de Freitas</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Olivares Alarcos</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Olszewska</surname>
<given-names>J. I.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>A Suite of Ontologies for Robotics and Automation [Industrial Activities]</article-title>. <source>IEEE Robot. Autom. Mag.</source> <volume>24</volume>, <fpage>8</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1109/mra.2016.2645444</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Folstein</surname>
<given-names>M. F.</given-names>
</name>
<name>
<surname>Robins</surname>
<given-names>L. N.</given-names>
</name>
<name>
<surname>Helzer</surname>
<given-names>J. E.</given-names>
</name>
</person-group> (<year>1983</year>). <article-title>The Mini-Mental State Examination</article-title>. <source>Arch. Gen. Psychiatry</source> <volume>40</volume>, <fpage>812</fpage>. <pub-id pub-id-type="doi">10.1001/archpsyc.1983.01790060110016</pub-id> </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Garc&#xed;a-Betances</surname>
<given-names>R. I.</given-names>
</name>
<name>
<surname>Cabrera-Umpi&#xe9;rrez</surname>
<given-names>M. F.</given-names>
</name>
<name>
<surname>Ottaviano</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pastorino</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Arredondo</surname>
<given-names>M. T.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Parametric Cognitive Modeling of Information and Computer Technology Usage by People with Aging- and Disability-Derived Functional Impairments</article-title>. <source>Sensors</source> <volume>16</volume>. <pub-id pub-id-type="doi">10.3390/s16020266</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Guarino</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>1998</year>). &#x201c;<article-title>Formal Ontology in Information Systems</article-title>,&#x201d; in <conf-name>Proceedings of the first international conference (FOIS&#x2019;98)</conf-name>, <conf-loc>Trento, Italy</conf-loc>, <conf-date>June 6-8</conf-date> (<publisher-loc>Amsterdam, Netherlands</publisher-loc>: <publisher-name>IOS press</publisher-name>). <comment>vol. 46</comment>. </citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kostavelis</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Vasileiadis</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Skartados</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Kargakos</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Giakoumis</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bouganis</surname>
<given-names>C.-S.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Understanding of Human Behavior with a Robotic Agent through Daily Activity Analysis</article-title>. <source>Int J Soc Robotics</source> <volume>11</volume>, <fpage>437</fpage>&#x2013;<lpage>462</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00513-2</pub-id> </citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kotseruba</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Tsotsos</surname>
<given-names>J. K.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>40 Years of Cognitive Architectures: Core Cognitive Abilities and Practical Applications</article-title>. <source>Artif. Intell. Rev.</source> <volume>53</volume>, <fpage>17</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1007/s10462-018-9646-y</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Langley</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Laird</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Rogers</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Cognitive Architectures: Research Issues and Challenges</article-title>. <source>Cognitive Syst. Res.</source> <volume>10</volume>, <fpage>141</fpage>&#x2013;<lpage>160</lpage>. <pub-id pub-id-type="doi">10.1016/j.cogsys.2006.07.004</pub-id> </citation>
</ref>
<ref id="B29">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Lemaignan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ros</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Mosenlechner</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Alami</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Beetz</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). &#x201c;<article-title>ORO, a Knowledge Management Platform for Cognitive Architectures in Robotics</article-title>,&#x201d; in <conf-name>Intelligent Robots and Systems (IROS), 2010 IEEE/RSJ International Conference on</conf-name>, <fpage>3548</fpage>&#x2013;<lpage>3553</lpage>. <pub-id pub-id-type="doi">10.1109/iros.2010.5649547</pub-id> </citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lemaignan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Warnier</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sisbot</surname>
<given-names>E. A.</given-names>
</name>
<name>
<surname>Clodic</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Alami</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Artificial Cognition for Social Human-Robot Interaction: An Implementation</article-title>. <source>Artif. Intell.</source> <volume>247</volume>, <fpage>45</fpage>&#x2013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.1016/j.artint.2016.07.002</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeRouge</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sneha</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Tolle</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>User Profiles and Personas in the Design and Development of Consumer Health Technologies</article-title>. <source>Int. J. Med. Inf.</source> <volume>82</volume>, <fpage>e251</fpage>&#x2013;<lpage>e268</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2011.03.006</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lieto</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bhatt</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Oltramari</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Vernon</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The Role of Cognitive Architectures in General Artificial Intelligence</article-title>. <source>Cognitive Syst. Res.</source> <volume>48</volume>, <fpage>1</fpage>&#x2013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.1016/j.cogsys.2017.08.003</pub-id> </citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>J. X.</given-names>
</name>
<name>
<surname>Goryakin</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Maeda</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bruckner</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Scheffler</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Global Health Workforce Labor Market Projections for 2030</article-title>. <source>Hum. Resour. Health</source> <volume>15</volume>, <fpage>11</fpage>. <pub-id pub-id-type="doi">10.1186/s12960-017-0187-2</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mancioppi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fiorini</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Timpano Sportiello</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cavallo</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Novel Technological Solutions for Assessment, Treatment, and Assistance in Mild Cognitive Impairment</article-title>. <source>Front. Neuroinform.</source> <volume>13</volume>, <fpage>58</fpage>. <pub-id pub-id-type="doi">10.3389/fninf.2019.00058</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moro</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Nejat</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Mihailidis</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Learning and Personalizing Socially Assistive Robot Behaviors to Aid with Activities of Daily Living</article-title>. <source>ACM Trans. Human-Robot Interact</source>. <pub-id pub-id-type="doi">10.1145/3277903</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Murphy</surname>
<given-names>R. R.</given-names>
</name>
<name>
<surname>Gandudi</surname>
<given-names>V. B.</given-names>
</name>
<name>
<surname>Amin</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Clendenin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Moats</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>An Analysis of International Use of Robots for Covid-19</article-title>. <source>Robotics Aut. Syst.</source> <volume>148</volume>, <fpage>103922</fpage>. <pub-id pub-id-type="doi">10.1016/j.robot.2021.103922</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nocentini</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Fiorini</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Acerbi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sorrentino</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mancioppi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Cavallo</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A Survey of Behavioral Models for Social Robots</article-title>. <source>Robotics</source> <volume>8</volume>, <fpage>54</fpage>. <pub-id pub-id-type="doi">10.3390/robotics8030054</pub-id> </citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pellegrinelli</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Pedrocchi</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tolio</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Motion Planning and Scheduling for Human and Industrial-Robot Collaboration</article-title>. <source>CIRP Ann.</source> <volume>66</volume>, <fpage>1</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1016/j.cirp.2017.04.095</pub-id> </citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pino</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Palestra</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Trevino</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>De Carolis</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The Humanoid Robot Nao as Trainer in a Memory Program for Elderly People with Mild Cognitive Impairment</article-title>. <source>Int J Soc Robotics</source> <volume>12</volume>, <fpage>21</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00533-y</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Porzel</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Pomarlan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Be&#xdf;ler</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Malaka</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Beetz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bateman</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>A Formal Modal of Affordances for Flexible Robotic Task Execution</article-title>,&#x201d; in <conf-name>ECAI 2020 - 24th European Conference on Artificial Intelligence</conf-name>, <fpage>629</fpage>&#x2013;<lpage>636</lpage>. </citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rossi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ferland</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Tapus</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>User Profiling and Behavioral Adaptation for HRI: A Survey</article-title>. <source>Pattern Recognit. Lett.</source> <volume>99</volume>, <fpage>3</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2017.06.002</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Rossi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Santangelo</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Staffa</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Varrasi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Conti</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Di Nuovo</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Psychometric Evaluation Supported by a Social Robot: Personality Factors and Technology Acceptance</article-title>,&#x201d; in <conf-name>2018 27th IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)</conf-name> (<publisher-loc>Nanjing, China</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>802</fpage>&#x2013;<lpage>807</lpage>. <pub-id pub-id-type="doi">10.1109/roman.2018.8525838</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sorrentino</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mancioppi</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Coviello</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Cavallo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Fiorini</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Feasibility Study on the Role of Personality, Emotion, and Engagement in Socially Assistive Robotics: A Cognitive Assessment Scenario</article-title>. <source>Informatics</source> <volume>8</volume>, <fpage>23</fpage>. <pub-id pub-id-type="doi">10.3390/informatics8020023</pub-id> </citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tabrez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Luebbers</surname>
<given-names>M. B.</given-names>
</name>
<name>
<surname>Hayes</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A Survey of Mental Modeling Techniques in Human-Robot Teaming</article-title>. <source>Curr. Robot. Rep.</source> <volume>1</volume>, <fpage>259</fpage>&#x2013;<lpage>267</lpage>. <pub-id pub-id-type="doi">10.1007/s43154-020-00019-0</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tapus</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mataric</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Scassellati</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Socially Assistive Robotics [Grand Challenges of Robotics]</article-title>. <source>IEEE Robot. Autom. Mag.</source> <volume>14</volume>, <fpage>35</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1109/mra.2007.339605</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tapus</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>&#x162;&#x103;pu&#x15f;</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Matari&#x107;</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>User-robot Personality Matching and Assistive Robot Behavior Adaptation for Post-stroke Rehabilitation Therapy</article-title>. <source>Intel. Serv. Robot.</source> <volume>1</volume>, <fpage>169</fpage>&#x2013;<lpage>183</lpage>. <pub-id pub-id-type="doi">10.1007/s11370-008-0017-4</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tenorth</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Beetz</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Representations for Robot Knowledge in the KnowRob Framework</article-title>. <source>Artif. Intell.</source> <volume>247</volume>, <fpage>151</fpage>. </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cialdea Mayer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>PLATINUm: A New Framework for Planning and Acting</article-title>. <source>Lect. Notes Comput. Sci.</source> <volume>2017</volume>, <fpage>498</fpage>&#x2013;<lpage>512</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-70169-1_37</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A Holistic Approach to Behavior Adaptation for Socially Assistive Robots</article-title>. <source>Int J Soc Robotics</source> <volume>12</volume>, <fpage>617</fpage>&#x2013;<lpage>637</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00617-9</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Modeling Affordances and Functioning for Personalized Robotic Assistance</article-title>,&#x201d; in <conf-name>Principles of Knowledge Representation and Reasoning: Proceedings of the Sixteenth International Conference (AAAI Press)</conf-name>. <pub-id pub-id-type="doi">10.24963/kr.2020/94</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Umbrico</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cortellessa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Orlandini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cesta</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Toward Intelligent Continuous Assistance</article-title>. <source>J. Ambient. Intell. Hum. Comput.</source> <volume>12</volume>, <fpage>4513</fpage>&#x2013;<lpage>4527</lpage>. <pub-id pub-id-type="doi">10.1007/s12652-020-01766-w</pub-id> </citation>
</ref>
<ref id="B52">
<citation citation-type="book">
<collab>World Health Organization</collab> (<year>2001</year>). <source>International Classification of Functioning, Disability and Health: ICF</source>. <publisher-loc>Geneva, Switzerland</publisher-loc>: <publisher-name>World Health Organization</publisher-name>. </citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wykowska</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chaminade</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Embodied Artificial Agents for Understanding Human Social Cognition</article-title>. <source>Phil. Trans. R. Soc. B</source> <volume>371</volume>, <fpage>20150375</fpage>. <pub-id pub-id-type="doi">10.1098/rstb.2015.0375</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>