<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Health Serv.</journal-id>
<journal-title>Frontiers in Health Services</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Health Serv.</abbrev-journal-title>
<issn pub-type="epub">2813-0146</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frhs.2023.1211150</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Health Services</subject>
<subj-group>
<subject>Systematic Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Implementing AI in healthcare&#x2014;the relevance of trust: a scoping review</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Steerling</surname><given-names>Emilie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/2280971/overview"/></contrib>
<contrib contrib-type="author"><name><surname>Siira</surname><given-names>Elin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Nilsen</surname><given-names>Per</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1510436/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Svedberg</surname><given-names>Petra</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1988089/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Nygren</surname><given-names>Jens</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1988091/overview" /></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><addr-line>School of Health and Welfare</addr-line>, <institution>Halmstad University</institution>, <addr-line>Halmstad</addr-line>, <country>Sweden</country></aff>
<aff id="aff2"><label><sup>2</sup></label><addr-line>Department of Health, Medicine and Caring Sciences</addr-line>, <institution>Link&#x00F6;ping University</institution>, <addr-line>Link&#x00F6;ping</addr-line>, <country>Sweden</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited by:</bold> Marjan Askari, Erasmus University Rotterdam, Netherlands</p></fn>
<fn fn-type="edited-by"><p><bold>Reviewed by:</bold> David Sommerfeld, University of California, San Diego, United States Tayana Soukup, Imperial College London, United Kingdom</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Emilie Steerling <email>emilie.steerling@hh.se</email></corresp>
</author-notes>
<pub-date pub-type="epub"><day>24</day><month>08</month><year>2023</year></pub-date>
<pub-date pub-type="collection"><year>2023</year></pub-date>
<volume>3</volume><elocation-id>1211150</elocation-id>
<history>
<date date-type="received"><day>24</day><month>04</month><year>2023</year></date>
<date date-type="accepted"><day>11</day><month>08</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Steerling, Siira, Nilsen, Svedberg and Nygren.</copyright-statement>
<copyright-year>2023</copyright-year><copyright-holder>Steerling, Siira, Nilsen, Svedberg and Nygren</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract><sec><title>Background</title>
<p>The process of translation of AI and its potential benefits into practice in healthcare services has been slow in spite of its rapid development. Trust in AI in relation to implementation processes is an important aspect. Without a clear understanding, the development of effective implementation strategies will not be possible, nor will AI advance despite the significant investments and possibilities.</p>
</sec><sec><title>Objective</title>
<p>This study aimed to explore the scientific literature regarding how trust in AI in relation to implementation in healthcare is conceptualized and what influences trust in AI in relation to implementation in healthcare.</p>
</sec><sec><title>Methods</title>
<p>This scoping review included five scientific databases. These were searched to identify publications related to the study aims. Articles were included if they were published in English, after 2012, and peer-reviewed. Two independent reviewers conducted an abstract and full-text review, as well as carrying out a thematic analysis with an inductive approach to address the study aims. The review was reported in accordance with the PRISMA-ScR guidelines.</p>
</sec><sec><title>Results</title>
<p>A total of eight studies were included in the final review. We found that trust was conceptualized in different ways. Most empirical studies had an individual perspective where trust was directed toward the technology&#x0027;s capability. Two studies focused on trust as relational between people in the context of the AI application rather than as having trust in the technology itself. Trust was also understood by its determinants and as having a mediating role, positioned between characteristics and AI use. The thematic analysis yielded three themes: individual characteristics, AI characteristics and contextual characteristics, which influence trust in AI in relation to implementation in healthcare.</p>
</sec><sec><title>Conclusions</title>
<p>Findings showed that the conceptualization of trust in AI differed between the studies, as well as which determinants they accounted for as influencing trust. Few studies looked beyond individual characteristics and AI characteristics. Future empirical research addressing trust in AI in relation to implementation in healthcare should have a more holistic view of the concept to be able to manage the many challenges, uncertainties, and perceived risks.</p>
</sec>
</abstract>
<kwd-group>
<kwd>trust</kwd>
<kwd>artificial intelligence</kwd>
<kwd>implementation</kwd>
<kwd>healthcare</kwd>
<kwd>scoping review</kwd>
</kwd-group><contract-num rid="cn001">20200208 01H</contract-num><contract-num rid="cn002">2022054 06</contract-num><contract-sponsor id="cn001">Knowledge Foundation</contract-sponsor><contract-sponsor id="cn002">Swedish Research Council</contract-sponsor><counts>
<fig-count count="2"/>
<table-count count="3"/><equation-count count="0"/><ref-count count="59"/><page-count count="0"/><word-count count="0"/></counts><custom-meta-wrap><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Implementation Science</meta-value></custom-meta></custom-meta-wrap>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><label>1.</label><title>Introduction</title>
<p>Artificial intelligence (AI) can be understood as &#x201C;a computerized system that is equipped with the capacity to perform tasks or reasoning processes that we usually associated with the intelligence level of a human being&#x201D; (<xref ref-type="bibr" rid="B1">1</xref>). These systems have the potential to transform healthcare at many levels and solve many of its current challenges (<xref ref-type="bibr" rid="B2">2</xref>&#x2013;<xref ref-type="bibr" rid="B4">4</xref>), e.g., by reducing costs and workloads, improving efficiency and quality, as well as by making earlier and more accurate diagnoses (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B5">5</xref>). The expectations on AI are high and the European Union (<xref ref-type="bibr" rid="B2">2</xref>) and the European Commission are making significant investments in AI (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>Despite the rapid development of AI and its potential benefits when implemented in healthcare, the process of translation into practice has been slow (<xref ref-type="bibr" rid="B7">7</xref>). AI systems tend to be complex, unpredictable, lack evidence, and difficult to grasp, hence the many uncertainties and risks related to its use, e.g., patient harm, bias, and lack of privacy (<xref ref-type="bibr" rid="B2">2</xref>). Trust in AI and its trustworthiness have therefore been regarded as important aspects to address (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B9">9</xref>). Based on literature from other scientific fields, trust is fundamental for a functioning health system (<xref ref-type="bibr" rid="B10">10</xref>) where patients are in vulnerable situations since it is known to increase the tolerance of uncertainty, as well as to reduce the perceived complexity (<xref ref-type="bibr" rid="B11">11</xref>). Trust is understood as a way of dealing with uncertainty (<xref ref-type="bibr" rid="B12">12</xref>), and according to Luhmann (<xref ref-type="bibr" rid="B13">13</xref>), trust is an attitude which leaves room for risk-taking behavior. To be trustworthy is a characteristic of someone who is competent to perform an action and has the moral attitude toward those who depend on the performance (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B15">15</xref>). Being trustworthy helps in gaining trust but does not imply trust <italic>per se</italic> (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>).</p>
<p>Most research in AI in healthcare has so far been primarily focused on AI&#x0027;s performance (<xref ref-type="bibr" rid="B18">18</xref>), fairness, trustworthiness (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B19">19</xref>&#x2013;<xref ref-type="bibr" rid="B22">22</xref>), legal and ethical issues (<xref ref-type="bibr" rid="B21">21</xref>&#x2013;<xref ref-type="bibr" rid="B27">27</xref>), and transparency and explainability (<xref ref-type="bibr" rid="B19">19</xref>&#x2013;<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B27">27</xref>).</p>
<p>Aspects such as AI&#x0027;s influence and interaction with the context in which it is implemented are also important to consider for successful implementation of AI (<xref ref-type="bibr" rid="B28">28</xref>). There appears to be a general lack of empirical research investigating implementation processes in relation to AI in healthcare (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Health professionals are trusted and authorized to give advice and treatment based on their profession and expertise (<xref ref-type="bibr" rid="B30">30</xref>&#x2013;<xref ref-type="bibr" rid="B33">33</xref>), and an implementation of AI into practice is believed to disrupt healthcare by questioning these health professionals&#x0027; existing authority, as well as influencing organizational structures, roles, and practices (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B29">29</xref>). The many challenges, uncertainties, and perceived risks reflect the importance of trust in AI in relation to implementation in healthcare.</p>
<p>In order to successfully implement AI into routine applications in healthcare and change clinical practice, an understanding of trust in AI in relation to the change processes is needed. No previous studies exploring the concept trust in AI in relation to implementation in healthcare have to our knowledge been performed, which implies there could be a lack of conceptual clarity. Without a clear understanding of trust in AI, it could be difficult to identify implementation strategies, which means that AI will not advance despite the significant investments and possibilities. The aim of this paper was thus to explore the scientific literature regarding how trust in AI is conceptualized in relation to implementation in healthcare and what influences trust in AI in relation to implementation in healthcare.</p>
</sec>
<sec id="s2" sec-type="methods"><label>2.</label><title>Methods</title>
<sec id="s2a"><label>2.1.</label><title>Study design</title>
<p>We chose a scoping review methodology to explore all relevant literature addressing trust in AI in relation to implementation in healthcare, since this methodology is useful for identifying knowledge gaps, scoping a body of literature, or clarifying concepts (<xref ref-type="bibr" rid="B34">34</xref>). We used the methodological framework developed by Arksey and O&#x0027;Malley (<xref ref-type="bibr" rid="B35">35</xref>) and followed the five stages: (1) identifying the research question, (2) identifying relevant articles, (3) selecting articles, (4) charting the data, and (5) collating, summarizing, and reporting the results. The review followed the recommendations in the Preferred Reporting Items for Systematic Reviews and Meta-Analysis for Scoping Reviews (PRISMA-ScR) checklist (<xref ref-type="bibr" rid="B34">34</xref>), and since it was based on publicly available studies there was no ethical consideration related to the handling of personal and sensitive information. A review protocol based on Arksey and O&#x0027;Malley&#x0027;s (<xref ref-type="bibr" rid="B35">35</xref>) framework was developed, and the final version of the protocol can be found in <xref ref-type="sec" rid="s10">Data Sheet 1</xref>.</p>
</sec>
<sec id="s2b"><label>2.2.</label><title>Identifying the research question</title>
<p>To address the aim, we formulated two research questions:
<list list-type="simple">
<list-item><label>1.</label>
<p>How is trust in AI conceptualized in relation to implementation in healthcare?</p></list-item>
<list-item><label>2.</label>
<p>What influences trust in AI in relation to implementation in healthcare?</p></list-item>
</list></p>
</sec>
<sec id="s2c"><label>2.3.</label><title>Identifying relevant articles</title>
<p>A thorough search for published literature was developed and carried out together with an experienced librarian. Search terms included a combination of terms related to implementation, AI, and healthcare. We used standardized subject headings describing the terms and subcategories provided by the databases. Truncation of words allowed for alternative endings and were used for implementation, improvement, innovation, and intervention. The term trust had to be specific since the aim was to explore how trust was conceptualized in AI in relation to implementation in healthcare. The electronic database search was recorded in a table (<xref ref-type="sec" rid="s10">Data Sheet 2</xref>). An initial search was carried out in CINAHL and PubMed to identify keywords and subject headings, which were then included in the search strategy for the selected databases. Five electronic databases (PubMed, CINAHL, PsychINFO, Web of Science and Scopus) were systematically searched to identify relevant scientific literature. In addition, reference lists of the identified research articles were reviewed manually.</p>
<p>The eligibility criteria ensured that the content of the included studies was relevant to the research question (<xref ref-type="bibr" rid="B36">36</xref>). The focus was on trust in AI in relation to implementation in healthcare, and there was no restriction placed on the type of methodology used in the paper (e.g., qualitative, quantitative, mixed methods or theoretical). To be included, articles had to: (a) address &#x201C;trust&#x201D; in AI in (b) relation to implementation in healthcare. Although there are closely related terms for trust, we found it important to be specific since the aim was to conceptualize &#x201C;trust&#x201D; in AI in relation to its implementation in healthcare. Articles were excluded if they were non-English, not available in full text, not peer reviewed or published before 2012 (<xref ref-type="table" rid="T1">Table&#x00A0;1</xref>). The decision to exclude articles published before 2012 was made to allow a focus on more recent development of AI, due to its fast-changing nature. AI was uncommon in healthcare settings prior to 2012 (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<table-wrap id="T1" position="float"><label>Table 1</label>
<caption><p>Inclusion and exclusion criteria.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="center">Inclusion criteria</th>
<th valign="top" align="center">Exclusion criteria</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>-</label>
<p>Studies addressing trust in relation to implementation of AI in healthcare.</p></list-item>
<list-item><label>-</label>
<p>Peer reviewed</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>-</label>
<p>Abstract missing.</p></list-item>
<list-item><label>-</label>
<p>Published before 2012.</p></list-item>
<list-item><label>-</label>
<p>Not in English.</p></list-item>
<list-item><label>-</label>
<p>Only mentioning trust.</p></list-item>
</list></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>We defined implementation as &#x201C;An intentional effort designed to change or adapt or uptake interventions into routines&#x201D;, which was based on a definition used by two earlier reviews with a focus on implementation of AI into healthcare practice (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B28">28</xref>). We also made a distinction between trust and trustworthiness, and we excluded studies that were only mentioning trust without giving it further attention or dealing with it in relation to implementation in healthcare.</p>
</sec>
<sec id="s2d"><label>2.4.</label><title>Selecting articles</title>
<p>The eligible articles were uploaded into Endnote X9 software where duplicates were removed, and thereafter imported into Rayyan. The initial screening of titles and abstracts was conducted in collaboration between two reviewers (authors 1 and 2), who communicated and met regularly to discuss any disagreements or uncertainties regarding which articles to include or exclude based on selected criteria. If agreement could not be reached, the other authors were consulted through discussions. The full article was read if focus of an article was unclear based on title and abstract. In the next step, the same two reviewers (authors 1 and 2) independently conducted the full-text review on the remaining articles, and disagreements and uncertainties were again resolved through discussion with the other authors.</p>
</sec>
<sec id="s2e"><label>2.5.</label><title>Charting the data</title>
<p>First, we developed a standard data charting form, following the guidelines by Arksey and O&#x0027;Malley (<xref ref-type="bibr" rid="B35">35</xref>), based on characteristics of the articles: (1) country; (2) publication year; (3) methodological design; (4) healthcare setting; (5) aim of the study; (6) application area; (7) intended user; (8) definition of trust (<xref ref-type="table" rid="T2">Table&#x00A0;2</xref>). Two reviewers (authors 1 and 2) extracted the data from the articles and thereafter confirmed with the other authors. The aim was to explore all relevant literature rather than provide a quantitative or qualitative synthesis. The methodological quality or risk of bias of the included studies were therefore not reviewed, which is consistent with guidance on the conduct of scoping reviews (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B37">37</xref>).</p>
<table-wrap id="T2" position="float"><label>Table 2</label>
<caption><p>Characteristics of included studies .</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="center">Author(s)</th>
<th valign="top" align="center">Country of origin</th>
<th valign="top" align="center">Methodological design</th>
<th valign="top" align="center">Healthcare setting</th>
<th valign="top" align="center">Aim of the study</th>
<th valign="top" align="center">Application area</th>
<th valign="top" align="center">Intended user</th>
<th valign="top" align="center">Definition of Trust</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Datta Burton et al. (<xref ref-type="bibr" rid="B38">38</xref>), 2021</td>
<td valign="top" align="left">The United Kingdom</td>
<td valign="top" align="left">Opinion paper, with empirical support</td>
<td valign="top" align="left">Neurology</td>
<td valign="top" align="left">To explore questions of trust between patients and clinicians and between clinicians and researchers.</td>
<td valign="top" align="left">Brain modelling</td>
<td valign="top" align="left">Clinicians (unspecified)</td>
<td valign="top" align="left">A triangle of trust; &#x201C;relationships between patients and clinicians, and between clinicians and researchers&#x201D; (<xref ref-type="bibr" rid="B38">38</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Choi et al. (<xref ref-type="bibr" rid="B39">39</xref>), 2020</td>
<td valign="top" align="left">The United States &#x0026; Canada</td>
<td valign="top" align="left">Opinion paper, without empirical support</td>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">To outline several ethical and practical concerns in integrating AI with human cognition in the real-world: bias and pitfalls of AI, ethics of trust and risk regarding AI, and design of the human&#x2014;AI interface.</td>
<td valign="top" align="left">Image recognition</td>
<td valign="top" align="left">Clinicians (radiologist)</td>
<td valign="top" align="left">&#x201C;A human&#x0027;s propensity to submit to vulnerability and unpredictability, and nevertheless to use that automation, as measured by intention expressed in speech or writing, or by measurable bodily actions to actually use the automation&#x201D; (<xref ref-type="bibr" rid="B40">40</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Esmaeilzadeh et al. (<xref ref-type="bibr" rid="B41">41</xref>), 2021</td>
<td valign="top" align="left">The United States</td>
<td valign="top" align="left">Quantitative: survey study</td>
<td valign="top" align="left">Healthcare, general</td>
<td valign="top" align="left">To examine how potential users perceive the benefits, risks, and use of AI clinical applications for their healthcare purposes and how their perception may be different if faced with three healthcare service encounter scenarios.</td>
<td valign="top" align="left">Diagnosis and treatment</td>
<td valign="top" align="left">Patients (with acute or chronic conditions)</td>
<td valign="top" align="left">&#x201C;Trust can be defined as trust in clinicians and the clinical tools they use (such as AI clinical applications)&#x201D; (<xref ref-type="bibr" rid="B42">42</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Fan et al. (<xref ref-type="bibr" rid="B43">43</xref>), 2018</td>
<td valign="top" align="left">China</td>
<td valign="top" align="left">Quantitative: survey study</td>
<td valign="top" align="left">Hospital</td>
<td valign="top" align="left">To explore the adoption of artificial intelligence-based medical diagnosis support system by integrating Unified theory of user acceptance of technology and trust theory.</td>
<td valign="top" align="left">Diagnosis</td>
<td valign="top" align="left">Clinicians (unspecified)</td>
<td valign="top" align="left">&#x201C;The beliefs about a technology&#x0027;s capability rather than its will or its motives.&#x201D; (<xref ref-type="bibr" rid="B44">44</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Liu &#x0026; Tao, (<xref ref-type="bibr" rid="B45">45</xref>), 2022</td>
<td valign="top" align="left">China</td>
<td valign="top" align="left">Quantitative: survey study</td>
<td valign="top" align="left">Healthcare service delivery</td>
<td valign="top" align="left">To examine the roles of trust and three AI-specific in public acceptance of smart healthcare services based on an extended Technology Acceptance Model.</td>
<td valign="top" align="left">Smart healthcare services</td>
<td valign="top" align="left">The general population</td>
<td valign="top" align="left">&#x201C;The degree to which an individual perceives that smart healthcare services are dependable, reliable, and trustworthy in supporting one&#x0027;s healthcare activities&#x201D; (<xref ref-type="bibr" rid="B45">45</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Prakash &#x0026; Das, (<xref ref-type="bibr" rid="B46">46</xref>), 2021</td>
<td valign="top" align="left">India</td>
<td valign="top" align="left">Mixed methods</td>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">To develop and test a model based on theories of Unified Theory of Acceptance and Use of Technology, status quo bias, and technology trust.</td>
<td valign="top" align="left">Diagnosis</td>
<td valign="top" align="left">Clinicians (radiologist)</td>
<td valign="top" align="left">&#x201C;The willingness of a party to be vulnerable to the actions of another party&#x2026;&#x201D; (<xref ref-type="bibr" rid="B47">47</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Roski et al. (<xref ref-type="bibr" rid="B48">48</xref>), 2021</td>
<td valign="top" align="left">The United States</td>
<td valign="top" align="left">Opinion paper, without empirical support</td>
<td valign="top" align="left">Healthcare, general</td>
<td valign="top" align="left">To describe how AI risk mitigation practices could be promulgated through strengthened industry self-governance, specifically through certification and accreditation of AI development and implementation organizations.</td>
<td valign="top" align="left">AI, general</td>
<td valign="top" align="left">N/a</td>
<td valign="top" align="left">N/a</td>
</tr>
<tr>
<td valign="top" align="left">Yakar et al. (<xref ref-type="bibr" rid="B49">49</xref>), 2021</td>
<td valign="top" align="left">Netherlands</td>
<td valign="top" align="left">Quantitative: survey study</td>
<td valign="top" align="left">Radiology, dermatology, and robotic surgery</td>
<td valign="top" align="left">To investigate the general population&#x0027;s view AI in medicine with specific emphasis on three areas that have experienced major progress in AI research in the past years, namely radiology, robotic surgery, and dermatology.</td>
<td valign="top" align="left">Diagnosis, communication, and surgery</td>
<td valign="top" align="left">The general population</td>
<td valign="top" align="left">N/a</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2f"><label>2.6.</label><title>Collating, summarizing, and reporting the results</title>
<p>We then used a thematic analysis with an inductive approach to analyze data associated with the research questions, how trust in AI is conceptualized in relation to implementation in healthcare and what influences trust in AI in relation to implementation in healthcare. We followed the guide of Braun and Clarke (<xref ref-type="bibr" rid="B50">50</xref>) with six phases: (1) data familiarization; (2) initial code generation; (3) generating themes; (4) theme review; (5) theme defining and naming; (6) and report production. The first step involved reading and rereading the articles, as well as making notes. Two reviewers (authors 1 and 2) reflected individually and generated independently lists of codes from words and phrases, which were coded regarding trust in AI in relation to implementation in healthcare. The reviewers then compared their codes and interpretations, and the relationships between the codes were discussed, which were referred to as subthemes. The conceptualization of trust was either clearly defined or defined by its determinants. The subthemes were then analyzed, and three overarching themes were generated. All authors discussed continuously the data analysis to enhance its quality and validity. No qualitative data analysis software was used.</p>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3.</label><title>Results</title>
<p>A total of 815 articles were retrieved from the five databases. Three articles were identified through manual searches of reference lists. The number of articles for review was reduced to 454 after duplicates were removed. 426 of the 454 (93.8&#x0025;) were excluded in the title and abstract screening, for reasons highlighted in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>. The term trust was often only mentioned, but not further addressed (<italic>n&#x2009;&#x003D;</italic>&#x2009;170). 235 articles investigated trust but not in AI in relation to implementation, thirteen articles were not in the healthcare setting, six articles were published before 2012 and two articles had no abstract. This resulted in a high number of excluded articles. Only 28 articles remained for full text review. Twelve of these articles were excluded because they only mentioned trust and did not further address or elaborate on the concept in the full text, and eight articles were excluded because they did not address trust in relation to AI implementation in healthcare. A total of eight articles met all criteria and were included in the study.</p>
<fig id="F1" position="float"><label>Figure 1</label>
<caption><p>PRISMA-ScR flowchart.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="frhs-03-1211150-g001.tif"/>
</fig>
<sec id="s3a"><label>3.1.</label><title>Study characteristics</title>
<p>The included studies were published between 2018 and 2022. Most articles were from the United States (<italic>n</italic>&#x2009;&#x003D;&#x2009;3), two from China, and the remainder from the United Kingdom (<italic>n</italic>&#x2009;&#x003D;&#x2009;1), India (<italic>n</italic>&#x2009;&#x003D;&#x2009;1), Canada (<italic>n</italic>&#x2009;&#x003D;&#x2009;1) and Netherlands (<italic>n</italic>&#x2009;&#x003D;&#x2009;1). Many of the studies (<italic>n</italic>&#x2009;&#x003D;&#x2009;6) were conducted in hospital settings (neurological practice, radiology, dermatology, and robotic surgery), except for two studies which involved healthcare management at home and healthcare in general. AI was often used for diagnosis (<italic>n</italic>&#x2009;&#x003D;&#x2009;4). Other application areas were brain modelling (1), image recognition (1), smart healthcare services (1), treatment (1), surgery (1), communication (1). One study was too general to have a specific purpose. Four studies were based on quantitative studies, three were opinion papers, and one mixed method. The studies examined the perceptions of different intended users: clinicians (<italic>n</italic>&#x2009;&#x003D;&#x2009;4), general population (<italic>n&#x2009;&#x003D;&#x2009;2</italic>), and patients (<italic>n</italic>&#x2009;&#x003D;&#x2009;1). The characteristics of the included studies are shown in <xref ref-type="table" rid="T2">Table&#x00A0;2</xref>.</p>
</sec>
<sec id="s3b"><label>3.2.</label><title>How is trust in AI conceptualized in relation to implementation in healthcare?</title>
<p>Six out of the eight studies provided a definition of trust (<xref ref-type="table" rid="T2">Table&#x00A0;2</xref>). Most empirical studies had an individual perspective where trust was directed toward the technology&#x0027;s capability (<italic>n&#x2009;</italic>&#x003D;&#x2009;4), e.g., describing trust as human&#x0027;s propensity or willingness to submit to the vulnerability of the technology&#x0027;s capability (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>) or the perception of AI as being dependable, reliable, and trustworthy in supporting healthcare activities (<xref ref-type="bibr" rid="B45">45</xref>). Two studies had a contextual perspective and focused on trust as relational between people in the context of the AI application rather than having trust in the technology itself. Datta Burton et al. (<xref ref-type="bibr" rid="B38">38</xref>) argued that it is necessary to develop the human side of these tools, which represents a triangle of trust relationships: between patients and clinicians, and between clinicians and researchers. Esmaeilzadeh et al. (<xref ref-type="bibr" rid="B41">41</xref>) focused on care encounters and understood trust as the degree to which an individual believes that the clinical encounter is trustworthy and referred to Reddy et al. (<xref ref-type="bibr" rid="B42">42</xref>) who understood trust as &#x201C;Trust is in the clinicians and the clinical tools they use&#x201D;. Two studies only defined trust indirectly by describing trust determinants (<xref ref-type="bibr" rid="B48">48</xref>, <xref ref-type="bibr" rid="B49">49</xref>).</p>
</sec>
<sec id="s3c"><label>3.3.</label><title>What influences trust in AI in relation to implementation in healthcare?</title>
<p>The inductive coding yielded three themes regarding what influences trust in AI implementation in healthcare, which could be understood as interconnected: <italic>individual characteristics, AI characteristics, and contextual characteristics</italic>. These themes were based on 10 subthemes and 34 codes (<xref ref-type="table" rid="T3">Table&#x00A0;3</xref>).</p>
<table-wrap id="T3" position="float"><label>Table 3</label>
<caption><p>Influences of trust in relation to implementation of AI in healthcare based on inductive thematic analysis.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="center">Themes</th>
<th valign="top" align="center">Subthemes</th>
<th valign="top" align="center">Codes</th>
<th valign="top" align="center">Articles</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="4">Individual characteristics</td>
<td valign="top" align="left">Demographic characteristics</td>
<td valign="top" align="left">Age, education, sex/gender, geographic origin, and employment.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Knowledge</td>
<td valign="top" align="left">Usage experience, tacit knowledge, and tech skills.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Personal traits</td>
<td valign="top" align="left">Cognition and positive attitude.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Health condition</td>
<td valign="top" align="left">Health condition and healthcare consumption.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">AI characteristics</td>
<td valign="top" align="left">Individualization</td>
<td valign="top" align="left">Personalization, privacy, and anthropomorphism.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B45">45</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">&#x201C;Black box&#x201D;</td>
<td valign="top" align="left">Self-learning, non-transparent, and autonomous.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B48">48</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Technical objectivity</td>
<td valign="top" align="left">Data-driven, accurate, lack of moral values, and lack of empathy.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">Contextual characteristics</td>
<td valign="top" align="left">Healthcare culture</td>
<td valign="top" align="left">Medical area, task complexity, &#x201C;skilled clinician&#x201D;, professional expertise, custodians, and opinion of important others.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Interpersonal relations</td>
<td valign="top" align="left">Collaboration, personal interactions, and mutual understanding</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B48">48</xref>, <xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Governance</td>
<td valign="top" align="left">Policies, guidelines, and standards/regulation.</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B48">48</xref>)</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s3c1"><label>3.3.1.</label><title>Individual characteristics</title>
<p>The individual characteristics capture those qualities that make the individuals different from each other, such as age, sex/gender, personality. These characteristics influence individuals&#x2019; trust in AI in relation to an implementation in healthcare. <italic>Demographic characteristics</italic> such as gender, age and education were found to relate to trust by moderating the relationship between antecedents and behavioral intention (<italic>n&#x2009;&#x003D;&#x2009;</italic>4). For example, being male, higher educated, employed or student, and with Western background were predictors of trust in AI among the general population (<xref ref-type="bibr" rid="B49">49</xref>). Disposition to trust technology (a person&#x0027;s general tendency to be willing to depend on technology) varied among clinicians based on their living experiences (<xref ref-type="bibr" rid="B43">43</xref>) and cultural background (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>). <italic>Knowledge</italic> and technological skills were found to influence trust in AI (<italic>n&#x2009;&#x003D;&#x2009;5)</italic>, which emphasized the need for education and training (<xref ref-type="bibr" rid="B49">49</xref>). Four studies understood trust as influenced by earlier usage experience or technological skills (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>), e.g., radiologists were used to highly complex machines in their routine clinical practice, and ease of use may therefore not be a concern in the adoption-related decision making (<xref ref-type="bibr" rid="B46">46</xref>). <italic>Personal traits</italic> such as cognition and having a positive attitude were associated with higher levels of trust (<italic>n&#x2009;&#x003D;&#x2009;3</italic>), e.g., disposition to trust technology was related to trust in AI use (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>), and understood as influenced by the individual&#x0027;s cognition and personality (<xref ref-type="bibr" rid="B46">46</xref>). <italic>Health conditions</italic> and healthcare consumption were also something that influenced trust (<italic>n&#x2009;&#x003D;&#x2009;2</italic>), e.g., individuals with chronic conditions may not trust AI clinical applications if no physician interaction were included in healthcare delivery (<xref ref-type="bibr" rid="B41">41</xref>) and individuals who utilized less healthcare were associated with a higher level of trust in AI (<xref ref-type="bibr" rid="B49">49</xref>).</p>
</sec>
<sec id="s3c2"><label>3.3.2.</label><title>AI characteristics</title>
<p>Trust in relation to the characteristics of AI was frequently mentioned in the literature, where aspects such as its performance, capacity, and trustworthiness were focused on. AI&#x0027;s ability to <italic>individualization</italic> was shown to enhance trust, which was understood as care tailored to the patients&#x0027; unique needs (<italic>n&#x2009;&#x003D;&#x2009;2</italic>). Personalization was based on patients&#x0027; health information, which required sharing sensitive personal data and caused concerns such as risks of privacy breaches (<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B45">45</xref>). AI&#x0027;s anthropomorphic characteristics enhanced trust in AI in relation to an implementation since it generated a sense of social presence. It was referred to as the perceived level of humanlike characteristics such as human appearance, self-consciousness, and emotion (<xref ref-type="bibr" rid="B45">45</xref>). AI characteristics such as &#x201C;<italic>black box&#x201D;</italic>, self-learning, non-transparent and autonomous characteristics brought uncertainty and threatened trust in the implementation of AI (<italic>n&#x2009;</italic>&#x003D;&#x2009;5), since inputs and operations were not visible to the user. <italic>Technical objectivity</italic>, which included characteristics such as data-driven, accuracy, lack of moral values, and lack of empathy, was also related to trust (<italic>n&#x2009;</italic>&#x003D;&#x2009;<italic>5)</italic>, since they in some cases could produce results that were more accurate and reliable than those of even the most skilled diagnostician (<xref ref-type="bibr" rid="B38">38</xref>).</p>
</sec>
<sec id="s3c3"><label>3.3.3.</label><title>Contextual characteristics</title>
<p>The theme contextual characteristics concerned the influence on trust in AI in relation to implementation in healthcare regarding the context in which individuals and AI are embedded. The contextual characteristics in relation to implementation of AI in healthcare consisted of the following subthemes: <italic>healthcare culture, interpersonal relationships, and governance. Healthcare culture</italic> included medical area, professional expertise, and opinion of important others (<italic>n&#x2009;&#x003D;&#x2009;5</italic>). For example, a &#x201C;skilled clinician&#x201D; was considered someone who had embodied tacit knowledge through years of experience in a community of experts (<xref ref-type="bibr" rid="B38">38</xref>). Opinion of important others, such as clinicians, colleagues, and seniors, shaped individuals&#x2019; initial trust (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>). Trust in AI in relation to implementation in healthcare depended also on the medical area, e.g., the perceived risks of using AI in radiology and dermatology compared to robotic surgery (<xref ref-type="bibr" rid="B49">49</xref>). <italic>Interpersonal relationship,</italic> collaboration, personal interactions, and mutual understanding were found to influence trust (<italic>n&#x2009;&#x003D;&#x2009;4</italic>), especially between different stakeholders (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B48">48</xref>). Thus, reduced communication in relation to AI implementation was believed to result in less trust among patients (<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B49">49</xref>). Yakar et al. (<xref ref-type="bibr" rid="B49">49</xref>) investigated trust in AI in the areas of radiology, surgery and dermatology, and the results showed that those who found personal interactions important had less trust in all three areas. <italic>Governance,</italic> including policies, standards, and guidelines had to be defined to enhance trust in AI (<italic>n</italic>&#x2009;<italic>&#x003D;&#x2009;4</italic>). The lack of clear guidelines in medical context was believed to lead to more uncertainties and less trust (<xref ref-type="bibr" rid="B41">41</xref>). Roski et al. (<xref ref-type="bibr" rid="B48">48</xref>) highlighted the importance of different stakeholder-consented framework and goals to enhance trust, which was also a condition for self-governance. Datta Burton et al. (<xref ref-type="bibr" rid="B38">38</xref>) suggested policies that encourage greater clinician engagement in the evaluation of a computational model that would lead to more responsible adoption.</p>
</sec>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4.</label><title>Discussion</title>
<p>This study was conducted to explore the scientific literature regarding how trust in AI is conceptualized in relation to implementation in healthcare and what influences trust in AI in relation to implementation in healthcare. Only eight studies were found to meet the strict inclusion criteria. The results showed that the conceptualization of trust in AI differed between the studies, as well as what they accounted for as influencing trust. We identified three themes that influenced trust in AI in relation to implementation in healthcare: individual characteristics, AI characteristics and contextual characteristics. Most research focused on the individual characteristics or AI characteristics, and the focus was rarely on the context or implementation processes.</p>
<p>AI in healthcare is a relatively new endeavor but the use of AI has become more common in healthcare setting during the past decade (<xref ref-type="bibr" rid="B3">3</xref>). Studies on the implementation of AI in healthcare are therefore fairly new research areas. This could explain the low number of included studies, which all were recently published and mostly from high income countries. Another explanation for the low number could be that trust is rarely mentioned in implementation science frameworks, theories, or models (<xref ref-type="bibr" rid="B51">51</xref>). The findings showed that the intended users were often clinicians (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>), which also aligns with implementation science where the focus is on clinicians rather than patients. Most of the empirical studies were cross-sectional where questionnaires were used to measure trust as the individual&#x0027;s attitudes and perceptions of AI&#x0027;s capability (<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B49">49</xref>) rather than considering other influencing variables. These studies discussed AI at a general level where the individuals had no or very little experience with practical AI tools, instead of addressing trust where the tools have been implemented and used over longer periods. One should thus be careful in using these perspectives in the development of implementation strategies to avoid building strategies on opinions, perceptions, and potential misconceptions rather than on actual experiences. Moreover, these fairly superficial perspectives on trust in AI in relation to implementation give little insight since they do not consider the context and the underlying values.</p>
<p>The conceptualization of trust in AI in relation to implementation in healthcare differed between the included studies. Some studies focused on individual characteristics and AI characteristics (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>), and other studies concentrated on the relations between people (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>). Trust in AI in relation to implementation in healthcare did not always have a specific definition. Instead, it was understood indirectly as influenced by different characteristics or determinants, and as having a mediating role, positioned between perceptions of AI characteristics and AI use. These different approaches to trust in AI reveal its complexity and the need of having a holistic understanding of the concept spanning different levels and dimensions.</p>
<p>The three themes that was found to influence trust in AI in relation to implementation in healthcare can be compared to implementation science, which emphasizes the determinants that influence the implementation by understanding the context in which they are used (<xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B53">53</xref>). In line with Leeman et al. (<xref ref-type="bibr" rid="B54">54</xref>). The determinants to facilitate implementation need to be known for appropriate strategies to be chosen. The themes are well-aligned with the Consolidated Framework for Implementation Research (CFIR), which is one of the most widely used determinant frameworks in implementation science (<xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>). Trust could be placed in the assessment category in CFIR, situated between determinants and outcomes, where also the concepts of acceptability, appropriateness, feasibility, implementation readiness and implementation climate are placed (<xref ref-type="bibr" rid="B55">55</xref>).</p>
<fig id="F2" position="float"><label>Figure 2</label>
<caption><p>The determinants associated with trust in AI in relation to implementation in healthcare mapped onto CFIR domains and Constructs (<xref ref-type="bibr" rid="B55">55</xref>).</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="frhs-03-1211150-g002.tif"/>
</fig>
<p>The theme individual characteristics such as an individual&#x0027;s circumstances was shown to influence trust in AI (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>). The result showed that individuals in vulnerable positions (less educated people, unemployed, people with non-Western immigration background, older people, and patients with chronic conditions) had low degree of trust in AI (<xref ref-type="bibr" rid="B49">49</xref>). Hence, the relationship between trust and the individuals&#x0027; perception of control or empowerment. This may be consistent with Luhmann (<xref ref-type="bibr" rid="B11">11</xref>) who argued that people are willing to trust if they possess inner security. Moreover, perceptions of AI characteristics such as being a non-transparent &#x201C;black box&#x201D; with autonomous and self-learning capacity were related to lack of trust in AI since these characteristics obstruct the understanding of its decisions. Knowledge and technological skills were other aspects that were shown to enhance trust in AI, which may also be understood as related to control or empowerment.</p>
<p>This study showed that trust in AI in relation to implementation in healthcare may be related to knowledge within a context. People&#x0027;s perception of AI as meaningful, useful, or valuable contributed to trust (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>). The results showed that trust in AI was not only influenced by its &#x201C;technical&#x201D; objectivity, efficiency, and accuracy. For example, person-centered care does not only consider medical competence as technical skills but also relational moral competency, empathy, compassion, and trust (<xref ref-type="bibr" rid="B41">41</xref>), which could explain why AI&#x0027;s anthropomorphic characteristics and personalization enhanced trust in AI (<xref ref-type="bibr" rid="B45">45</xref>). Healthcare culture is based on knowledge within a context and could be why the individuals&#x0027; trust in AI was often shaped by important others (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B49">49</xref>), as well as why interpersonal relationships, collaboration and common understanding were found to influence trust (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B48">48</xref>, <xref ref-type="bibr" rid="B49">49</xref>). It also explains the importance of governance and the need of common guidelines (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B48">48</xref>).</p>
<p>Knowledge within a context and its influence on trust in AI in relation to implementation in healthcare could be compared to Normalization Process Theory (NPT), another widely used theoretical approach in implementation science. The theory understands implementation as a possible challenge toward individuals&#x0027; existing ways of working or thinking about care (<xref ref-type="bibr" rid="B56">56</xref>). NPT suggests that people need to make sense of AI together to understand their specific roles and responsibilities in relation to AI use in healthcare, and the importance of new agreements and values that give meanings to their actions (<xref ref-type="bibr" rid="B57">57</xref>). This could be explained by our ability to contextualize information through narratives (<xref ref-type="bibr" rid="B58">58</xref>), which is also in line with Luhmann (<xref ref-type="bibr" rid="B11">11</xref>) who viewed trust as possible only in a familiar world.</p>
<p>Only considering AI&#x0027;s technical aspects when implementing AI in healthcare is not enough. AI tools should not be understood apart from the context and the people using them. Existing values and understanding of care can become barriers to trust in AI in relation to implementation in healthcare if there is a lack of coherence. There is thus a need to understand the context in relation to implementation (<xref ref-type="bibr" rid="B59">59</xref>) to be able to align AI to existing values (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B57">57</xref>). Differences in values must be considered for trust to be present when implementing AI in healthcare. The use of AI could thus add value to clinical reasoning rather than competing with it according to Datta Burton et al. (<xref ref-type="bibr" rid="B38">38</xref>).</p>
<sec id="s4a"><label>4.1.</label><title>Strength and limitations</title>
<p>The study has some strengths that are worth highlighting. The search was designed together with a librarian and the selection of relevant studies were conducted independently by two reviewers with consensus. We used a comprehensive search strategy and adhered to a structure for scoping reviews outlined by Arksey and O&#x0027;Malley (<xref ref-type="bibr" rid="B35">35</xref>).</p>
<p>The study also has shortcomings that must be considered when interpreting the findings. Trust in AI in relation to implementation in healthcare relates to a young research field, and we found it therefore necessary to include any type of methodology in this study. This means the conceptualization of trust in AI was based on both results and reflections. The study was limited to the published literature in English, and we did not search wider grey literature where we may have identified additional relevant literature. Only a small number of articles met the strict inclusion criteria since many of the articles were excluded because they only mentioned trust or did not address trust in AI in relation to implementation in healthcare. Most of the included studies were conducted in high-income countries and the results may therefore not be relevant to other countries.</p>
</sec>
<sec id="s4b"><label>4.2.</label><title>Implications and suggestions for future work</title>
<p>This scoping review showed that there were different approaches to trust, which demonstrates that trust can be understood at different levels and dimensions. Only considering one aspect could mean that inappropriate strategies are used to support implementation. For example, there were few empirical studies that addressed trust beyond individual characteristics and AI characteristics. Future empirical studies thus need to have a holistic view on trust. The results also showed that in order to establish trust in AI in relation to implementation in healthcare, it is important to align AI to existing values and to take account of social interactions and negotiants of values in relation to care. This scoping review also found that trust in AI was often influenced by the opinion of important others (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>). Future studies could therefore investigate how these important others facilitate trust in AI in relation to implementation in healthcare. Three of the included studies mentioned that trust grows with time and maturity (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B46">46</xref>). However, none of these studies investigated this change empirically. There is therefore also a need for a better understanding of how trust in AI changes during implementation in healthcare.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions"><label>5.</label><title>Conclusions</title>
<p>Findings from the scoping review revealed that there is a variation in the scientific literature how trust in AI in relation to its implementation in healthcare has been conceptualized. Trust is often conceptualized by its determinants and having a mediating role, positioned between characteristics and AI use. There were also differences in what was believed to influence trust in AI. We found three themes that influenced trust in AI in relation to implementation in healthcare: individual characteristics, AI characteristics and contextual characteristics. Today, most research focuses only on one or two perspectives, for example the individual characteristics or the AI characteristics. Future studies addressing trust in AI in relation to implementation in healthcare should have a more holistic view on trust to be able to manage the many challenges and develop appropriate strategies to support the implementation of AI in healthcare.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s10">Supplementary Material</xref>, further inquiries can be directed to the corresponding author/s.</p>
</sec>
<sec id="s7" sec-type="author-contributions"><title>Author contributions</title>
<p>ES conceptualized the study with input from author PN, PS and JN. All authors contributed to the study design. ES retrieved the records from the databases. Authors ES and ES participated in the screening process and the extraction of the data. Data analysis was performed by authors ES and ES, and then discussed with all authors. The manuscript was drafted by ES with input from the other authors. All authors thereafter drafted and revised the manuscript and approved the final version.</p>
</sec>
<sec id="s8" sec-type="funding-information"><title>Funding</title>
<p>Knowledge Foundation (grant 20200208 01H) and the Swedish Research Council (grant 2022054 06). The funders were not involved in any aspect of study design, collection, analysis, interpretation of data, or in the writing or publication design.</p>
</sec>
<sec id="s9" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author PN declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="s11" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s10" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frhs.2023.1211150/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frhs.2023.1211150/full&#x0023;supplementary-material</ext-link></p>
<supplementary-material id="SD1" content-type="local-data">
<media mimetype="application" mime-subtype="vnd.openxmlformats-officedocument.wordprocessingml.document" xlink:href="Datasheet1.docx"/></supplementary-material>
<supplementary-material id="SD2" content-type="local-data">
<media mimetype="application" mime-subtype="vnd.openxmlformats-officedocument.wordprocessingml.document" xlink:href="Datasheet2.docx"/></supplementary-material></sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petersson</surname><given-names>L</given-names></name><name><surname>Larsson</surname><given-names>I</given-names></name><name><surname>Nygren</surname><given-names>JM</given-names></name><name><surname>Nilsen</surname><given-names>P</given-names></name><name><surname>Neher</surname><given-names>M</given-names></name><name><surname>Reed</surname><given-names>JE</given-names></name><etal/></person-group> <article-title>Challenges to implementing artificial intelligence in healthcare: a qualitative interview study with healthcare leaders in Sweden</article-title>. <source>BMC Health Serv Res</source>. (<year>2022</year>) <volume>22</volume>:<fpage>850</fpage>. <pub-id pub-id-type="doi">10.1186/s12913-022-08215-8</pub-id><pub-id pub-id-type="pmid">35778736</pub-id></citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="other"><collab>EPRS</collab>. <comment>Artificial intelligence in healthcare: Applications, risks, and ethical and societal impacts (2022)</comment>. <ext-link ext-link-type="uri" xlink:href="https://www.europarl.europa.eu/RegData/etudes/STUD/2022/729512/EPRS_STU(2022)729512_EN.pdf">https://www.europarl.europa.eu/RegData/etudes/STUD/2022/729512/EPRS_STU(2022)729512_EN.pdf</ext-link> <comment>(Accessed November 22, 2022)</comment>.</citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Topol</surname><given-names>EJ</given-names></name></person-group>. <article-title>High-performance medicine: the convergence of human and artificial intelligence</article-title>. <source>Nat Med</source>. (<year>2019</year>) <volume>25</volume>:<fpage>44</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id><pub-id pub-id-type="pmid">30617339</pub-id></citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bajwa</surname><given-names>J</given-names></name><name><surname>Munir</surname><given-names>U</given-names></name><name><surname>Nori</surname><given-names>A</given-names></name><name><surname>Williams</surname><given-names>B</given-names></name></person-group>. <article-title>Artificial intelligence in healthcare: transforming the practice of medicine</article-title>. <source>Future Healthcare J</source>. (<year>2021</year>) <volume>8</volume>(<issue>2</issue>):<fpage>e188</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.7861/fhj.2021-0095</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mehta</surname><given-names>N</given-names></name><name><surname>Pandit</surname><given-names>A</given-names></name><name><surname>Shukla</surname><given-names>S</given-names></name></person-group>. <article-title>Transforming healthcare with big data analytics and artificial intelligence: a systematic mapping study</article-title>. <source>J Biomed Inform</source>. (<year>2019</year>) <volume>100</volume>:<fpage>103311</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbi.2019.103311</pub-id><pub-id pub-id-type="pmid">31629922</pub-id></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="other"><collab>European Commission</collab>. <comment>A European approach to artificial intelligence (2022).</comment> <ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/policies/european-approach-artificial-intelligence">https://digital-strategy.ec.europa.eu/en/policies/european-approach-artificial-intelligence</ext-link> <comment>(Accessed November 9, 2022)</comment>.</citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname><given-names>M</given-names></name><name><surname>Savage</surname><given-names>C</given-names></name><name><surname>Nair</surname><given-names>M</given-names></name><name><surname>Larsson</surname><given-names>I</given-names></name><name><surname>Svedberg</surname><given-names>P</given-names></name><name><surname>Nygren</surname><given-names>JM</given-names></name></person-group>. <article-title>Artificial intelligence application in health care practice: scoping review</article-title>. <source>J Med Internet Res</source>. (<year>2022</year>) <volume>24</volume>:<fpage>e40238</fpage>. <pub-id pub-id-type="doi">10.2196/40238</pub-id><pub-id pub-id-type="pmid">36197712</pub-id></citation></ref>
<ref id="B8"><label>8.</label><citation citation-type="other"><collab>HLEG</collab>. <comment>Ethics guidelines for trustworthy AI (2019).</comment> <ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai">https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</ext-link> <comment>(Accessed March 2, 2023)</comment>.</citation></ref>
<ref id="B9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gille</surname><given-names>F</given-names></name><name><surname>Jobin</surname><given-names>A</given-names></name><name><surname>Ienca</surname><given-names>M</given-names></name></person-group>. <article-title>What we talk about when we talk about trust: theory of trust in healthcare</article-title>. <source>Intell-Based Med</source>. (<year>2020</year>) <volume>1-2</volume>:<fpage>100001</fpage>. <pub-id pub-id-type="doi">10.1016/j.ibmed.2020.100001</pub-id></citation></ref>
<ref id="B10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gille</surname><given-names>F</given-names></name><name><surname>Smith</surname><given-names>S</given-names></name><name><surname>Mays</surname><given-names>N</given-names></name></person-group>. <article-title>Why public trust in health care systems matters and deserves greater research attention</article-title>. <source>J Health Serv Res Policy</source>. (<year>2015</year>) <volume>20</volume>(<issue>1</issue>):<fpage>62</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1177/1355819614543161</pub-id><pub-id pub-id-type="pmid">25038059</pub-id></citation></ref>
<ref id="B11"><label>11.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Luhmann</surname><given-names>N</given-names></name></person-group>. <source>Trust and power</source>. <publisher-loc>Cambridge</publisher-loc>: <publisher-name>Polity Press</publisher-name> (<year>2017</year>). <fpage>224</fpage>.</citation></ref>
<ref id="B12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Asan</surname><given-names>O</given-names></name><name><surname>Emrah Bayrak</surname><given-names>A</given-names></name><name><surname>Choudhury</surname><given-names>A</given-names></name></person-group>. <article-title>Artificial intelligence and human trust in healthcare: focus on clinicians</article-title>. <source>J Med Internet Res</source>. (<year>2020</year>) <volume>22</volume>:<fpage>e15154</fpage>. <pub-id pub-id-type="doi">10.2196/15154</pub-id><pub-id pub-id-type="pmid">32558657</pub-id></citation></ref>
<ref id="B13"><label>13.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Luhmann</surname><given-names>N</given-names></name></person-group>. <article-title>Familiarity, confidence, trust: problems and alternatives</article-title>. In: <person-group person-group-type="editor"><name><surname>Gambetta</surname><given-names>D</given-names></name></person-group>, editors. <source>Trust: Making and breaking cooperative relations</source>. <publisher-loc>Oxford</publisher-loc>: <publisher-name>University of Oxford</publisher-name> (<year>2000</year>). p. <fpage>94</fpage>&#x2013;<lpage>107</lpage>.</citation></ref>
<ref id="B14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dlugatch</surname><given-names>R</given-names></name><name><surname>Georgieva</surname><given-names>A</given-names></name><name><surname>Kerasidou</surname><given-names>A</given-names></name></person-group>. <article-title>Trustworthy artificial intelligence and ethical design: public perceptions of trustworthiness of an AI-based decision-support tool in the context of intrapartum care</article-title>. <source>BMC Med Ethics</source>. (<year>2023</year>) <volume>24</volume>:<fpage>42</fpage>. <pub-id pub-id-type="doi">10.1186/s12910-023-00917-w</pub-id><pub-id pub-id-type="pmid">37340408</pub-id></citation></ref>
<ref id="B15"><label>15.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Hawley</surname><given-names>K</given-names></name></person-group>. <source>How to be trustworthy</source>. <publisher-loc>Oxford, New York</publisher-loc>: <publisher-name>Oxford University Press</publisher-name> (<year>2019</year>). <fpage>176</fpage>.</citation></ref>
<ref id="B16"><label>16.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ryan</surname><given-names>M</given-names></name></person-group>. <article-title>In AI we trust: ethics, artificial intelligence, and reliability</article-title>. <source>Sci Eng Ethics</source>. (<year>2020</year>) <volume>26</volume>(<issue>4</issue>):<fpage>2749</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1007/s11948-020-00228-y</pub-id><pub-id pub-id-type="pmid">32524425</pub-id></citation></ref>
<ref id="B17"><label>17.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>O&#x2019;Neill</surname><given-names>O</given-names></name></person-group>. <article-title>Linking trust to trustworthiness</article-title>. <source>Int J Philos Stud</source>. (<year>2018</year>) <volume>26</volume>(<issue>2</issue>):<fpage>293</fpage>&#x2013;<lpage>300</lpage>. <pub-id pub-id-type="doi">10.1080/09672559.2018.1454637</pub-id></citation></ref>
<ref id="B18"><label>18.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fernandes</surname><given-names>M</given-names></name><name><surname>Vieira</surname><given-names>SM</given-names></name><name><surname>Leite</surname><given-names>F</given-names></name><name><surname>Palos</surname><given-names>C</given-names></name><name><surname>Finkelstein</surname><given-names>S</given-names></name><name><surname>Sousa</surname><given-names>JMC</given-names></name></person-group>. <article-title>Clinical decision support systems for triage in the emergency department using intelligent systems: a review</article-title>. <source>Artif Intell Med</source>. (<year>2020</year>) <volume>102</volume>:<fpage>101762</fpage>. <pub-id pub-id-type="doi">10.1016/j.artmed.2019.101762</pub-id><pub-id pub-id-type="pmid">31980099</pub-id></citation></ref>
<ref id="B19"><label>19.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>J</given-names></name><name><surname>Zhang</surname><given-names>Z-M</given-names></name></person-group>. <article-title>Ethics and governance of trustworthy medical artificial intelligence</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2023</year>) <volume>23</volume>:<fpage>7</fpage>. <pub-id pub-id-type="doi">10.1186/s12911-023-02103-9</pub-id><pub-id pub-id-type="pmid">36639799</pub-id></citation></ref>
<ref id="B20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Obermeyer</surname><given-names>Z</given-names></name><name><surname>Powers</surname><given-names>B</given-names></name><name><surname>Vogeli</surname><given-names>C</given-names></name><name><surname>Mullainathan</surname><given-names>S</given-names></name></person-group>. <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>. <source>Science</source>. (<year>2019</year>) <volume>366</volume>:<fpage>447</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="pmid">31649194</pub-id></citation></ref>
<ref id="B21"><label>21.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Siala</surname><given-names>H</given-names></name><name><surname>Wang</surname><given-names>Y</given-names></name></person-group>. <article-title>SHIFTing artificial intelligence to be responsible in healthcare: a systematic review</article-title>. <source>Soc Sci Med</source>. (<year>2022</year>) <volume>296</volume>:<fpage>114782</fpage>. <pub-id pub-id-type="doi">10.1016/j.socscimed.2022.114782</pub-id><pub-id pub-id-type="pmid">35152047</pub-id></citation></ref>
<ref id="B22"><label>22.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Trocin</surname><given-names>C</given-names></name><name><surname>Mikalef</surname><given-names>P</given-names></name><name><surname>Papamitsiou</surname><given-names>Z</given-names></name><name><surname>Conboy</surname><given-names>K</given-names></name></person-group>. <article-title>Responsible AI for digitial health: a synthesis and a research agenda</article-title>. <source>Info Syst Front</source>. (<year>2021</year>). <pub-id pub-id-type="doi">10.1007/s10796-021-10146-4</pub-id></citation></ref>
<ref id="B23"><label>23.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gooding</surname><given-names>P</given-names></name><name><surname>Kariotis</surname><given-names>T</given-names></name></person-group>. <article-title>Ethics and law in research on algorithmic and data-driven technology in mental health care: scoping review</article-title>. <source>JMIR Ment Health</source>. (<year>2021</year>) <volume>8</volume>:<fpage>e24668</fpage>. <pub-id pub-id-type="doi">10.2196/24668</pub-id><pub-id pub-id-type="pmid">34110297</pub-id></citation></ref>
<ref id="B24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>&#x010C;artolovni</surname><given-names>A</given-names></name><name><surname>Tomi&#x010D;i&#x0107;</surname><given-names>A</given-names></name><name><surname>Lazi&#x0107; Mosler</surname><given-names>E</given-names></name></person-group>. <article-title>Ethical, legal, and social consideration of AI-based medical-support tools: a scoping review</article-title>. <source>Int J Med Inf</source>. (<year>2022</year>) <volume>161</volume>:<fpage>104738</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104738</pub-id></citation></ref>
<ref id="B25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Beil</surname><given-names>M</given-names></name><name><surname>Proft</surname><given-names>I</given-names></name><name><surname>van Heerden</surname><given-names>D</given-names></name><name><surname>Sviri</surname><given-names>S</given-names></name><name><surname>van Heerden</surname><given-names>PV</given-names></name></person-group>. <article-title>Ethical consideration about artificial intelligence for prognosis in intensive care</article-title>. <source>Intensive Care Med Exp</source>. (<year>2020</year>) <volume>7</volume>:<fpage>70</fpage>. <pub-id pub-id-type="doi">10.1186/s40635-019-0286-6</pub-id></citation></ref>
<ref id="B26"><label>26.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Murphy</surname><given-names>K</given-names></name><name><surname>Di Ruggiero</surname><given-names>E</given-names></name><name><surname>Upshur</surname><given-names>R</given-names></name><name><surname>Willison</surname><given-names>DJ</given-names></name><name><surname>Malhotra</surname><given-names>N</given-names></name><name><surname>Cai</surname><given-names>JC</given-names></name><etal/></person-group> <article-title>Artificial intelligence for good health: a scoping review of the ethics literature</article-title>. <source>BMC Med Ethics</source>. (<year>2021</year>) <volume>22</volume>:<fpage>14</fpage>. <pub-id pub-id-type="doi">10.1186/s12910-021-00577-8</pub-id><pub-id pub-id-type="pmid">33588803</pub-id></citation></ref>
<ref id="B27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coeckelberg</surname><given-names>M</given-names></name></person-group>. <article-title>Ethics of artificial intelligence: some ethical issues and regulatory challenges</article-title>. <source>Technol Regul</source>. (<year>2019</year>) <volume>1</volume>:<fpage>31</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.26116/techreg.2019.003</pub-id></citation></ref>
<ref id="B28"><label>28.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gama</surname><given-names>F</given-names></name><name><surname>Tyskbo</surname><given-names>D</given-names></name><name><surname>Nygren</surname><given-names>J</given-names></name><name><surname>Barlow</surname><given-names>J</given-names></name><name><surname>Reed</surname><given-names>J</given-names></name><name><surname>Svedberg</surname><given-names>P</given-names></name></person-group>. <article-title>Implementation frameworks for artificial intelligence translation into health care practice: scoping review</article-title>. <source>J Med Internet Res</source>. (<year>2022</year>) <volume>24</volume>:<fpage>e32215</fpage>. <pub-id pub-id-type="doi">10.2196/32215</pub-id><pub-id pub-id-type="pmid">35084349</pub-id></citation></ref>
<ref id="B29"><label>29.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Svedberg</surname><given-names>P</given-names></name><name><surname>Reed</surname><given-names>J</given-names></name><name><surname>Nilsen</surname><given-names>P</given-names></name><name><surname>Barlow</surname><given-names>J</given-names></name><name><surname>Macrae</surname><given-names>C</given-names></name><name><surname>Nygren</surname><given-names>J</given-names></name></person-group>. <article-title>Toward successful implementation of artificial intelligence in health care practice: protocol for a research program</article-title>. <source>JMIR Res Protoc</source>. (<year>2022</year>) <volume>11</volume>:<fpage>e34920</fpage>. <pub-id pub-id-type="doi">10.2196/34920</pub-id><pub-id pub-id-type="pmid">35262500</pub-id></citation></ref>
<ref id="B30"><label>30.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Simon</surname><given-names>J</given-names></name></person-group>. <source>The routledge handbook of trust and philosophy</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Routledge</publisher-name> (<year>2020</year>). <fpage>454</fpage>.</citation></ref>
<ref id="B31"><label>31.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Asan</surname><given-names>O</given-names></name><name><surname>Yu</surname><given-names>Z</given-names></name><name><surname>Crotty</surname><given-names>BH</given-names></name></person-group>. <article-title>How clinician-patient communication affects trust in health information sources: temporal trends from a national cross-sectional survey</article-title>. <source>PLoS ONE</source>. (<year>2021</year>) <volume>16</volume>:<fpage>e0247583</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0247583</pub-id><pub-id pub-id-type="pmid">33630952</pub-id></citation></ref>
<ref id="B32"><label>32.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kerasidou</surname><given-names>A</given-names></name></person-group>. <article-title>Artificial intelligence and the ongoing need for empathy, compassion and trust in healthcare</article-title>. <source>Bull World Health Organ</source>. (<year>2020</year>) <volume>98</volume>:<fpage>245</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.2471/BLT.19.237198</pub-id><pub-id pub-id-type="pmid">32284647</pub-id></citation></ref>
<ref id="B33"><label>33.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Markov&#x00E1;</surname><given-names>I</given-names></name></person-group>. <source>The dialogical mind. Common sense and ethics</source>. <publisher-loc>Cambridge</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name> (<year>2016</year>). <fpage>260</fpage>.</citation></ref>
<ref id="B34"><label>34.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tricco</surname><given-names>AC</given-names></name><name><surname>Lillie</surname><given-names>E</given-names></name><name><surname>Zarin</surname><given-names>W</given-names></name><name><surname>O&#x0027;Brien</surname><given-names>KK</given-names></name><name><surname>Colquhoun</surname><given-names>H</given-names></name><name><surname>Levac</surname><given-names>D</given-names></name><etal/></person-group> <article-title>PRISMA Extension for scoping reviews (PRISMA-ScR): checklist and explanation</article-title>. <source>Ann Intern Med</source>. (<year>2018</year>) <volume>169</volume>:<fpage>467</fpage>&#x2013;<lpage>73</lpage>. <pub-id pub-id-type="doi">10.7326/M18-0850</pub-id><pub-id pub-id-type="pmid">30178033</pub-id></citation></ref>
<ref id="B35"><label>35.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arksey</surname><given-names>H</given-names></name><name><surname>O&#x2019;Malley</surname><given-names>L</given-names></name></person-group>. <article-title>Scoping studies: towards a methodological framework</article-title>. <source>Int J Soc Res Methodol</source>. (<year>2005</year>) <volume>8</volume>:<fpage>19</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1080/1364557032000119616</pub-id></citation></ref>
<ref id="B36"><label>36.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Booth</surname><given-names>A</given-names></name><name><surname>Sutton</surname><given-names>A</given-names></name><name><surname>Clowes</surname><given-names>M</given-names></name><name><surname>Martyn-St James</surname><given-names>M</given-names></name></person-group>. <source>Systematic approach to a successful literature review</source>. <publisher-loc>London</publisher-loc>: <publisher-name>Sage Publications</publisher-name> (<year>2021</year>). <fpage>424</fpage>.</citation></ref>
<ref id="B37"><label>37.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peters</surname><given-names>MDJ</given-names></name><name><surname>Marnie</surname><given-names>C</given-names></name><name><surname>Colquhoun</surname><given-names>H</given-names></name><name><surname>Garritty</surname><given-names>CM</given-names></name><name><surname>Hempel</surname><given-names>S</given-names></name><name><surname>Horsley</surname><given-names>T</given-names></name><etal/></person-group> <article-title>Scoping reviews: reinforcing and advancing the methodology and application</article-title>. <source>Syst Rev</source>. (<year>2021</year>) <volume>10</volume>(<issue>263</issue>):<fpage>1</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1186/s13643-021-01821-3</pub-id><pub-id pub-id-type="pmid">33388080</pub-id></citation></ref>
<ref id="B38"><label>38.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Datta Burton</surname><given-names>S</given-names></name><name><surname>Mahfoud</surname><given-names>T</given-names></name><name><surname>Aicardi</surname><given-names>C</given-names></name><name><surname>Rose</surname><given-names>N</given-names></name></person-group>. <article-title>Clinical translation of computational brain models: understanding the salience of trust in clinician-researcher relationships</article-title>. <source>Interdiscip Sci Rev</source>. (<year>2021</year>) <volume>46</volume>:<fpage>1</fpage>&#x2013;<lpage>2</lpage>. <pub-id pub-id-type="doi">10.1080/03080188.2020.1840223</pub-id></citation></ref>
<ref id="B39"><label>39.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Choi</surname><given-names>HH</given-names></name><name><surname>Chang</surname><given-names>SD</given-names></name><name><surname>Kohli</surname><given-names>MD</given-names></name></person-group>. <article-title>Implementation and design of artificial intelligence in abdominal imaging</article-title>. <source>Abdom Radiol</source>. (<year>2020</year>) <volume>45</volume>:<fpage>4084</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1007/s00261-020-02471-0</pub-id></citation></ref>
<ref id="B40"><label>40.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sheridan</surname><given-names>TB</given-names></name></person-group>. <article-title>Individual differences in attributes of trust in automation: measurement and application to system design</article-title>. <source>Front Psychol</source>. (<year>2019</year>) <volume>10</volume>:<fpage>1117</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2019.01117</pub-id><pub-id pub-id-type="pmid">31178783</pub-id></citation></ref>
<ref id="B41"><label>41.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Esmaeilzadeh</surname><given-names>P</given-names></name><name><surname>Mirzaei</surname><given-names>T</given-names></name><name><surname>Dharanikota</surname><given-names>S</given-names></name></person-group>. <article-title>Patients&#x2019; perception toward human&#x2014;artificial intelligence interaction in health care: experimental study</article-title>. <source>JMIR</source>. (<year>2021</year>) <volume>23</volume>:<fpage>e25856</fpage>. <pub-id pub-id-type="doi">10.2196/25856</pub-id><pub-id pub-id-type="pmid">34842535</pub-id></citation></ref>
<ref id="B42"><label>42.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reddy</surname><given-names>S</given-names></name><name><surname>Allan</surname><given-names>S</given-names></name><name><surname>Coghlan</surname><given-names>S</given-names></name><name><surname>Cooper</surname><given-names>PA</given-names></name></person-group>. <article-title>A governance model for the application of AI in health care</article-title>. <source>J Am Med Inform Assoc</source>. (<year>2020</year>) <volume>27</volume>:<fpage>491</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocz192</pub-id><pub-id pub-id-type="pmid">31682262</pub-id></citation></ref>
<ref id="B43"><label>43.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fan</surname><given-names>W</given-names></name><name><surname>Liu</surname><given-names>J</given-names></name><name><surname>Zhu</surname><given-names>W</given-names></name><name><surname>Pardalos</surname><given-names>PM</given-names></name></person-group>. <article-title>Investigating the impacting factors for the healthcare professionals to adopt artificial intelligence-based medical diagnosis support system (AIMDSS)</article-title>. <source>Ann Oper Res</source>. (<year>2018</year>) <volume>294</volume>:<fpage>567</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1007/s10479-018-2818-y</pub-id></citation></ref>
<ref id="B44"><label>44.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>McKnight</surname><given-names>DH</given-names></name></person-group>. <article-title>Trust in information technology</article-title>. In: <person-group person-group-type="editor"><name><surname>Davis</surname><given-names>GB</given-names></name></person-group>, editors. <source>The blackwell encyclopedia of management. Vol. 7 management information systems</source>. <publisher-loc>Malden, MA</publisher-loc>: <publisher-name>Blackwell</publisher-name> (<year>2005</year>). p. <fpage>329</fpage>&#x2013;<lpage>31</lpage>.</citation></ref>
<ref id="B45"><label>45.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname><given-names>K</given-names></name><name><surname>Tao</surname><given-names>D</given-names></name></person-group>. <article-title>The roles of trust, personalization, loss of privacy, and anthropomorphism in public acceptance of smart healthcare services</article-title>. <source>Comput Human Behav</source>. (<year>2022</year>) <volume>127</volume>:<fpage>107026</fpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2021.107026</pub-id></citation></ref>
<ref id="B46"><label>46.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Prakash</surname><given-names>AW</given-names></name><name><surname>Das</surname><given-names>S</given-names></name></person-group>. <article-title>Medical practitioner&#x2019;s adoption of intelligent clinical diagnostic decision support systems: a mixed-methods study</article-title>. <source>Info Manage</source>. (<year>2021</year>) <volume>58</volume>:<fpage>103524</fpage>. <pub-id pub-id-type="doi">10.1016/j.im.2021.103524</pub-id></citation></ref>
<ref id="B47"><label>47.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mayer</surname><given-names>RC</given-names></name><name><surname>Davis</surname><given-names>JH</given-names></name><name><surname>Schoorman</surname><given-names>FD</given-names></name></person-group>. <article-title>An integrative model of organizational trust</article-title>. <source>Acad Manage Rev</source>. (<year>1995</year>) <volume>20</volume>:<fpage>709</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.2307/258792</pub-id></citation></ref>
<ref id="B48"><label>48.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roski</surname><given-names>J</given-names></name><name><surname>Maier</surname><given-names>EJ</given-names></name><name><surname>Vigilante</surname><given-names>K</given-names></name><name><surname>Kane</surname><given-names>EA</given-names></name><name><surname>Matheny</surname><given-names>ME</given-names></name></person-group>. <article-title>Enhancing trust in AI through industry self-governance</article-title>. <source>J Am Med Inform Assoc</source>. (<year>2021</year>) <volume>28</volume>:<fpage>1582</fpage>&#x2013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocab065</pub-id><pub-id pub-id-type="pmid">33895824</pub-id></citation></ref>
<ref id="B49"><label>49.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yakar</surname><given-names>D</given-names></name><name><surname>Ongena</surname><given-names>YP</given-names></name><name><surname>Kwee</surname><given-names>TC</given-names></name><name><surname>Haan</surname><given-names>M</given-names></name></person-group>. <article-title>Do people favor artificial intelligence over physicians? A survey among the general population and their view on artificial intelligence in medicine</article-title>. <source>Value Health</source>. (<year>2021</year>) <volume>25</volume>:<fpage>374</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1016/j.jval.2021.09.004</pub-id><pub-id pub-id-type="pmid">35227448</pub-id></citation></ref>
<ref id="B50"><label>50.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Braun</surname><given-names>V</given-names></name><name><surname>Clarke</surname><given-names>V</given-names></name></person-group>. <article-title>Thematic analysis</article-title>. In: <person-group person-group-type="editor"><name><surname>Cooper</surname><given-names>H</given-names></name></person-group>, editors. <source>APA Handbook of research methods in psychology: research designs</source>. <publisher-loc>Washington, DC</publisher-loc>: <publisher-name>American Psychological Association</publisher-name> (<year>2022</year>). p. <fpage>57</fpage>&#x2013;<lpage>91</lpage>.</citation></ref>
<ref id="B51"><label>51.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Nilsen</surname><given-names>P</given-names></name></person-group>. <article-title>Overview of theories, models and frameworks in implementation science</article-title>. In: <person-group person-group-type="editor"><name><surname>Nilsen</surname><given-names>P</given-names></name><name><surname>Birken</surname><given-names>SA</given-names></name></person-group>, editors. <source>Handbook on implementation science</source>. <publisher-loc>Cheltenham</publisher-loc>: <publisher-name>Edward Elgar Publishing Limited</publisher-name> (<year>2020</year>). p. <fpage>8</fpage>&#x2013;<lpage>31</lpage>. <ext-link ext-link-type="uri" xlink:href="https://www.elgaronline.com/display/edcoll/9781788975988/9781788975988.00008.xml">https://www.elgaronline.com/display/edcoll/9781788975988/9781788975988.00008.xml</ext-link></citation></ref>
<ref id="B52"><label>52.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Damschroder</surname><given-names>LJ</given-names></name></person-group>. <article-title>Clarity out of chaos: use of theory in implementation research</article-title>. <source>Psychiatry Res</source>. (<year>2020</year>) <volume>283</volume>:<fpage>112461</fpage>. <pub-id pub-id-type="doi">10.1016/j.psychres.2019.06.036</pub-id><pub-id pub-id-type="pmid">31257020</pub-id></citation></ref>
<ref id="B53"><label>53.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>May</surname><given-names>CR</given-names></name><name><surname>Mair</surname><given-names>F</given-names></name><name><surname>Finch</surname><given-names>T</given-names></name><name><surname>MacFarlane</surname><given-names>A</given-names></name><name><surname>Dowrick</surname><given-names>C</given-names></name><name><surname>Treweek</surname><given-names>S</given-names></name><etal/></person-group> <article-title>Development of a theory of implementation and integration: normalization process theory</article-title>. <source>Implement Sci</source>. (<year>2009</year>) <volume>4</volume>:<fpage>29</fpage>. <pub-id pub-id-type="doi">10.1186/1748-5908-4-29</pub-id><pub-id pub-id-type="pmid">19460163</pub-id></citation></ref>
<ref id="B54"><label>54.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leeman</surname><given-names>J</given-names></name><name><surname>Birken</surname><given-names>SA</given-names></name><name><surname>Powell</surname><given-names>BJ</given-names></name><name><surname>Rohweder</surname><given-names>C</given-names></name><name><surname>Shea</surname><given-names>CM</given-names></name></person-group>. <article-title>Beyond &#x201C;implementation strategies&#x201D;: classifying the full range of strategies used in implementation science and practice</article-title>. <source>Implement Sci</source>. (<year>2017</year>) <volume>12</volume>:<fpage>125</fpage>. <pub-id pub-id-type="doi">10.1186/s13012-017-0657-x</pub-id><pub-id pub-id-type="pmid">29100551</pub-id></citation></ref>
<ref id="B55"><label>55.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Damschroder</surname><given-names>LJ</given-names></name><name><surname>Reardon</surname><given-names>CM</given-names></name><name><surname>Opra Widerquist</surname><given-names>MA</given-names></name><name><surname>Lowery</surname><given-names>J</given-names></name></person-group>. <article-title>Conceptualizing outcomes for use with the consolidated framework for implementation research (CFIR): the CFIR outcomes addendum</article-title>. <source>Implement Sci</source>. (<year>2022</year>) <volume>17</volume>:<fpage>7</fpage>. <pub-id pub-id-type="doi">10.1186/s13012-021-01181-5</pub-id><pub-id pub-id-type="pmid">35065675</pub-id></citation></ref>
<ref id="B56"><label>56.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>May</surname><given-names>C</given-names></name><name><surname>Cummings</surname><given-names>A</given-names></name><name><surname>Girling</surname><given-names>M</given-names></name><name><surname>Bracher</surname><given-names>M</given-names></name><name><surname>Mair</surname><given-names>FS</given-names></name><name><surname>May</surname><given-names>CM</given-names></name><etal/></person-group> <article-title>Using normalization process theory in feasibility studies and process evaluations of complex healthcare interventions: a systematic review</article-title>. <source>Implement Sci</source>. (<year>2018</year>) <volume>13</volume>:<fpage>18</fpage>. <pub-id pub-id-type="doi">10.1186/s13012-018-0758-1</pub-id><pub-id pub-id-type="pmid">29357876</pub-id></citation></ref>
<ref id="B57"><label>57.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>May</surname><given-names>CR</given-names></name><name><surname>Albers</surname><given-names>B</given-names></name><name><surname>Bracher</surname><given-names>M</given-names></name><name><surname>Finch</surname><given-names>TL</given-names></name><name><surname>Gilbert</surname><given-names>A</given-names></name><name><surname>Girling</surname><given-names>M</given-names></name><etal/></person-group> <article-title>Translational framework for implementation evaluation and research: a normalization process theory coding manual for qualitative research and instrument development</article-title>. <source>Implement Sci</source>. (<year>2022</year>) <volume>17</volume>:<fpage>19</fpage>. <pub-id pub-id-type="doi">10.1186/s13012-022-01191-x</pub-id><pub-id pub-id-type="pmid">35193611</pub-id></citation></ref>
<ref id="B58"><label>58.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coeckelberg</surname><given-names>M</given-names></name></person-group>. <article-title>Narrative responsibility and artificial intelligence: how AI challenges human responsibility and sense-making</article-title>. <source>AI Soc</source>. (<year>2021</year>):<fpage>1</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1007/s00146-021-01375-x</pub-id></citation></ref>
<ref id="B59"><label>59.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nilsen</surname><given-names>P</given-names></name></person-group>. <article-title>Making sense of implementation theories, models and frameworks</article-title>. <source>Implement Sci</source>. (<year>2015</year>) <volume>10</volume>:<fpage>53</fpage>. <pub-id pub-id-type="doi">10.1186/s13012-015-0242-0</pub-id><pub-id pub-id-type="pmid">25895742</pub-id></citation></ref></ref-list>
</back>
</article>