<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="discussion">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Public Health</journal-id>
<journal-title>Frontiers in Public Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Public Health</abbrev-journal-title>
<issn pub-type="epub">2296-2565</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpubh.2023.1254334</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Public Health</subject>
<subj-group>
<subject>Opinion</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Brave (in a) new world: an ethical perspective on chatbots for medical advice</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Erren</surname> <given-names>Thomas C.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/415522/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lewis</surname> <given-names>Philip</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/204437/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Shaw</surname> <given-names>David M.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>University of Cologne, University Hospital of Cologne, Cologne</institution>, <addr-line>North Rhine-Westphalia</addr-line>, <country>Germany</country></aff>
<aff id="aff2"><sup>2</sup><institution>Care and Public Health Research Institute, Maastricht University</institution>, <addr-line>Maastricht</addr-line>, <country>Netherlands</country></aff>
<aff id="aff3"><sup>3</sup><institution>Institute for Biomedical Ethics, University of Basel</institution>, <addr-line>Basel</addr-line>, <country>Switzerland</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Ahmed Shuhaiber, Zayed University, United Arab Emirates</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: M. Ahmed, Phcog.Net, India</p></fn>

<corresp id="c001">&#x0002A;Correspondence: Thomas C. Erren <email>tim.erren&#x00040;uni-koeln.de</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>17</day>
<month>08</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>11</volume>
<elocation-id>1254334</elocation-id>
<history>
<date date-type="received">
<day>06</day>
<month>07</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>31</day>
<month>07</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2023 Erren, Lewis and Shaw.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Erren, Lewis and Shaw</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions> 
<kwd-group>
<kwd>chatbot</kwd>
<kwd>ChatGPT</kwd>
<kwd>medical advice</kwd>
<kwd>ethics</kwd>
<kwd>confidentiality and privacy</kwd>
<kwd>risks</kwd>
<kwd>hallucination</kwd>
</kwd-group>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="28"/>
<page-count count="4"/>
<word-count count="3046"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Digital Public Health</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<list list-type="simple">
<list-item><p>&#x0201C;<italic>Words can be like X-rays if you use them properly&#x02014;they&#x00027;ll go through anything. You read and you&#x00027;re pierced.&#x0201D;</italic></p></list-item>
<list-item><p>-Aldous Huxley, Brave New World (<xref ref-type="bibr" rid="B1">1</xref>).</p></list-item>
</list>
<sec id="s1">
<title>1. Introduction</title>
<p>Huxley&#x00027;s dystopian novel <italic>Brave New World</italic> (<xref ref-type="bibr" rid="B1">1</xref>) describes a futuristic World State with immense scientific advances, but also psychological manipulation and classical conditioning. Today&#x00027;s &#x0201C;free-to-use&#x0201D; ChatGPT, which is taking the world by storm, should lead to discussions about the disruptive impact that artificial intelligence (AI) could have on our future, how to shape it, and how to avoid dystopian developments. Indeed, AI is an important frontier in public health that requires ethical discussion (<xref ref-type="bibr" rid="B2">2</xref>).</p>
<p>ChatGPT is a large language model (LLM) trained with very large amounts of textual data to generate new texts in response to text prompts from humans. Its responses resemble human answers to human questions. The progress and success of this deep learning model has also puzzled its developers at OpenAI (San Francisco, CA) (<xref ref-type="bibr" rid="B3">3</xref>). Both concerns and possibilities (<xref ref-type="bibr" rid="B4">4</xref>&#x02013;<xref ref-type="bibr" rid="B9">9</xref>) have been raised about the impact and developments of ChatGPT or similar AI.</p>
<p>Like a tsunami, the emergence of chatbots sweeps us into <italic>terra incognita</italic>. More waves of AI will follow. In such a climate, our opinion targets chatbots to contribute to medical advice from ethical points of view. By medical advice [MA] we mean integrated, private, confidential, dependable, and trustworthy health and medical information for citizens. Numerous articles from 2023 deal with ChatGPT and medicine and MA, but few from the perspectives of ethics (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B5">5</xref>&#x02013;<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>&#x02013;<xref ref-type="bibr" rid="B14">14</xref>).</p>
<p>ChatGPT&#x00027;s ability to provide on-demand and specific answers to questions could surpass the use of &#x0201C;Dr Google&#x0201D; when searching for medical and health-related information (<xref ref-type="bibr" rid="B15">15</xref>&#x02013;<xref ref-type="bibr" rid="B17">17</xref>). While &#x0201C;Dr Google&#x0201D; returns torrents of information that citizens must wade through, &#x0201C;Dr ChatGPT&#x0201D; offers users more focused distillations, although the results may still not be accurate.</p>
<p>Considering that chatbots mimic conversational interaction, we ask: What could come next? Where can AI take us, possibly faster than most expect? What can we do? And what should we do? In the following sections, we outline current warnings of chatbots like ChatGPT from developers and calls for ethical discourse. In regards to MA, we sketch potential developments of chatbots and associated risks, hallucinations, and &#x0201C;bullshit.&#x0201D; From an ethics perspective, we address the critical confidentiality of information and data, which serve as key drivers of advancing AI, and close with imperative questions and guardrails to benefit from chatbots and avoid dystopian developments.</p></sec>
<sec id="s2">
<title>2. Current warnings and calls for ethical discourse</title>
<p>A powerful call for ethical discourse came on May 30, 2023, in a one-sentence statement signed by more than 350 AI executives, researchers, and engineers: &#x0201C;Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war&#x0201D; (<xref ref-type="bibr" rid="B18">18</xref>). On May 16, 2023 (<xref ref-type="bibr" rid="B19">19</xref>), the OpenAI CEO Altman urged for regulation of AI in a US Senate panel hearing: &#x0201C;We think that regulatory intervention by governments will be critical to mitigate the risks of increasingly powerful models.&#x0201D; Moreover, &#x0201C;&#x02018;The Godfather of AI&#x00027; leaves Google and warns of danger ahead&#x0201D; wrote the NYT on May 1, 2023 (<xref ref-type="bibr" rid="B20">20</xref>): &#x0201C;For half a century, Geoffrey Hinton nurtured the technology at the heart of chatbots like ChatGPT. Now he worries it will cause serious harm.&#x0201D;</p>
<p>Clearly, even developers foresee massive potential for disruption by AI technology. Thus, the question arises as to how we can &#x0201C;prioritise responsible and beneficial applications that serve the best interests of society&#x0201D; (<xref ref-type="bibr" rid="B13">13</xref>), including utmost reliability, privacy, confidentiality, data protection, and disclosure of AI interests? We think that we should have ethical debate and develop safe-guards and red lines to allow the good and disallow the bad&#x02014;and given the stakes, to initially err on the side of caution.</p></sec>
<sec id="s3">
<title>3. Chatbot developments</title>
<p>One evolutionary step for chatbots like ChatGPT is that &#x0201C;chatting,&#x0201D; which today consists of typing and reading, will become talking and listening. That the voice serves as the AI control and response device, would be in line with the HAL 9000 computer in the science fiction classic &#x0201C;2001: A Space Odyssey.&#x0201D; In that film, HAL (Heuristically programmed ALgorithmic computer) is an advanced AI, programmed to obey and not harm its creators, and should respond to voice instructions of the human crew when controlling their spaceship Discovery One.</p>
<p>Such technically advanced advice service seems neither far-fetched nor distant. <italic>Alexa, Siri</italic>, and <italic>Cortana</italic>, produced by Amazon, Apple, and Microsoft, respectively, are already voice-activated and voice-responding internet-connected devices as part of the &#x0201C;Internet of Things (IoTs).&#x0201D; Combined with spoken language systems, citizens will talk and listen to chatbots that combine extensive (personal and general) information with massive computing power. Unlike ChatGPT, which has been closed off from new information since September 2021 (<xref ref-type="bibr" rid="B7">7</xref>), advanced AI for MA would have access to real-time information in order to provide up-to-date MA.</p></sec>
<sec id="s4">
<title>4. Risks, hallucinations, and &#x0201C;bullshit&#x0201D;</title>
<p>What about potential risks to citizens&#x02014;both patients and doctors&#x02014;who become &#x0201C;information providers and consumers&#x0201D;? What about potential mind-manipulation&#x02014;be that intentional or unintentional&#x02014;of citizens through convincingly worded and reasoned individualized advice? With Huxley&#x00027;s <italic>Brave New World</italic> in mind, is it possible that the boundaries between human and machine will become so blurred that citizens will no longer be able to distinguish MA provided by chatbots from that given by humans? Or might they not recognize who they give their personal information to? Could weakened encryption of human-machine exchanges reduce individuals&#x00027; control over data (<xref ref-type="bibr" rid="B21">21</xref>) and open doors to ethically unethical surveillance?</p>
<p>While made-up &#x0201C;facts&#x0201D; or hallucinations in AI (<xref ref-type="bibr" rid="B7">7</xref>) limit ChatGPT&#x00027;s results in relation to science, representatives of medicine are beginning to weigh its potential utility and benefits for numerous areas and applications. But: In view of the broad interest in ChatGPT, please bear a key point in mind: despite extensive media coverage stating the contrary, ChatGPT is not capable of human levels of thought. It is a sophisticated chatbot that is trained on vast quantities of data to offer persuasively sounding responses. Sometimes these responses are accurate; sometimes they are not. Sometimes its rhetoric is so persuasive that gaps in logic and facts are obscured. In effect, ChatGPT includes the generation of &#x0201C;bullshit&#x0201D; (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>) i.e., speech intended to persuade but without regard for truth, and such &#x0201C;bullshit&#x0201D; can be right some of the time. The question is whether citizens should seek MA from such a fallible information source.</p></sec>
<sec id="s5">
<title>5. Critical confidentiality: information and data = key drivers of AI advances</title>
<p>The current lack of information about how personal data is &#x0201C;used&#x0201D; makes AI boxes opaque: Are citizens aware of this non-transparent use and what control is in place so that personal data is not shared and disseminated for uses beyond MA? A key driver is that the more information citizens provide to AI, the more personalized (and potentially better) MA can become. However, this could lead patients and doctors to provide ever more information at the expense of privacy and confidentiality, making citizens and their data unduly transparent; thereby, potentially opening the door to other uses of their data.</p>
<p>In a nutshell, the <italic>modus operandi</italic> of current chatbot success is that &#x0201C;Artificial intelligence could never have been so successful in recent years &#x02026; if these corporations had not collected masses of data. This information enabled them to train their AI models in the first place. This&#x02014;in addition to an increase in computing power&#x02014;is the driver of the current AI boom&#x0201D; (<xref ref-type="bibr" rid="B24">24</xref>).</p></sec>
<sec id="s6">
<title>6. Imperative questions from an ethics perspective</title>
<p>That society will not abandon the potential of LLMs is a realistic prospect. Under this assumption, the following questions (<xref ref-type="boxed-text" rid="Box1">Box</xref>) should be urgently discussed in order to consider the mindful use of AI for MA. The answers to these questions seem open, but they must be found, and quickly.</p>
<boxed-text id="Box1">
<label>Box 1</label>
<title>Imperative questions for the mindful use of AI for medical advice [MA].</title>
<p><bold>Regulation of AI</bold></p>
<list list-type="simple">
<list-item><p>&#x02022; Who &#x0201C;programs and controls&#x0201D; AI and &#x0201C;how,&#x0201D; i.e., with what interests: what biases result for MA?</p></list-item>
</list>
<p><bold>Control of personal information</bold></p>
<list list-type="simple">
<list-item><p>&#x02022; Who protects the information that AI collects from individual citizens and through doctors, and how?</p></list-item>
<list-item><p>&#x02022; How will potential use (&#x0201C;sharing&#x0201D;) of information for purposes other than the requested MA be regulated or ruled out? For instance: How can we safeguard patients&#x00027; information from commercial exploitation (e.g., the generation of MA were to be misused as a Trojan horse for commercial advantages)?</p></list-item>
</list>
<p><bold>MA &#x00026; the role of doctors</bold></p>
<list list-type="simple">
<list-item><p>&#x02022; How can we deal with MA for which AI cannot provide explanations as to how it was arrived at? (<xref ref-type="bibr" rid="B25">25</xref>).</p></list-item>
<list-item><p>&#x02022; Medical knowledge&#x02014;which chatbots will have more of than a doctor at any given time&#x02014;does not equate to quality of MA: What are doctors&#x00027; roles in reviewing, monitoring, and controlling MA by AI?</p></list-item>
<list-item><p>&#x02022; Can doctors become biased by AI-provided diagnoses and AI-suggested treatments such that they miss true causes and more appropriate therapies of ill-health? In other words, could they become over-reliant on AI?</p></list-item>
<list-item><p>&#x02022; Could it be that doctors who do not use AI such as ChatGPT may give less than adequate information and advice and could such doctors be accused of providing substandard care? (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>).</p></list-item>
<list-item><p>&#x02022; What knowledge and how much time do doctors need to invest to understand MA via AI and when can they use or endorse AI recommendations and with how much confidence?</p></list-item>
<list-item><p>&#x02022; Human decisions may be badly influenced by information provided by chatbots: What are doctors&#x00027; roles in regards to scrutinize and maintain control over MA via AI?</p></list-item>
</list>
<p><bold>Liability</bold></p>
<list list-type="simple">
<list-item><p>&#x02022; Who is liable for MA via AI as an available resource (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B28">28</xref>)?</p></list-item>
<list-item><p>&#x02022; Who is liable when doctors use (or ignore) MA via AI? (<xref ref-type="bibr" rid="B25">25</xref>)</p></list-item>
</list>
<p><bold>Regarding all of the above</bold></p>
<list list-type="simple">
<list-item><p>&#x02022; Who should set which boundaries and how and when?</p></list-item>
</list>
</boxed-text>
<p>To exemplify the complexities above, let us briefly look at liability. When and how doctors who use medical AI could be held liable under current law is explored step-by-step elsewhere (<xref ref-type="bibr" rid="B25">25</xref>). Because AI is new to medical practice in general and to medical advice in particular, and with the lack of case law on liability when using AI, physicians would be entering <italic>terra incognita</italic>. To offer orientation, Price et al. (<xref ref-type="bibr" rid="B25">25</xref>) took the following approach: with a view to more general principles of tort law, examples of likely or potential legal consequences of the use of AI in clinical practice were developed. Importantly, the current legal basis for liability for medical AI, in which MA can play a central role, is unlikely to remain unchanged. As a rule of thumb, whenever AI is used to replace human (clinician) judgment, this may pose safety risks to patients and may render clinicians legally liable (<xref ref-type="bibr" rid="B26">26</xref>).</p></sec>
<sec id="s7">
<title>7. Ethical guardrails to benefit from chatbots and avoid dystopian developments</title>
<p>The ever-evolving chatbots have the potential to benefit us in personalized ways, but they also have the potential to manipulate and condition us through effective words and language. As for the power of information and data, they are the fuel for the performance and ultimately the competence of chatbots. As one step toward remedying conceivable misuse of information, the publication practice that authors disclose all possible conflicts of interest should also apply to AI and the companies that develop such products. But: Shouldn&#x00027;t we collect, store, connect, and share information about citizens as little as possible&#x02014;and if at all, then anonymized and encrypted?</p>
<p>Overall, we have outlined current warnings from AI developers, sketched potential developments and associated risks of using chatbots regarding MA, and provided imperative ethical questions. As humans are unlikely to forego the use of AI, significant ethical challenges need to be addressed. Echoing the cautionary tale in the introduction, we need to guard against bias, protect trust, equality and privacy, and establish a &#x0201C;Code of Conduct for AI in Health Care&#x0201D; (<xref ref-type="bibr" rid="B11">11</xref>) and guidelines for MA.</p>
<p>Of course, we should do all we ethically can to benefit from chatbot advice, provided it is medically sound. It is equally clear that we must avoid the danger of Orwellian transparency (<xref ref-type="bibr" rid="B27">27</xref>) and of conditioning (Huxley&#x00027;s &#x0201C;mind-manipulation&#x0201D;) to believe in non-sensical information about our bodies and health and in non-sensical MA. The latter would be a recipe for not having to be brave in the new AI world that lies ahead.</p></sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>TE: Conceptualization, Writing&#x02014;original draft, Writing&#x02014;review and editing. PL: Writing&#x02014;review and editing. DS: Writing&#x02014;review and editing.</p></sec>
</body>
<back>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>We acknowledge support for the Article Processing Charge from the DFG (German Research Foundation, 491454339).</p>
</sec>
<ack><p>TE acknowledges stimulating working conditions as a visiting scholar at the UC Berkeley.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Huxley</surname> <given-names>A</given-names></name></person-group>. <source>Brave New World.</source> <publisher-loc>London</publisher-loc>: <publisher-name>Chatto and Windus</publisher-name> (<year>1932</year>).</citation>
</ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>De Angelis</surname> <given-names>L</given-names></name> <name><surname>Baglivo</surname> <given-names>F</given-names></name> <name><surname>Arzilli</surname> <given-names>G</given-names></name> <name><surname>Privitera</surname> <given-names>GP</given-names></name> <name><surname>Ferragina</surname> <given-names>P</given-names></name> <name><surname>Tozzi</surname> <given-names>AE</given-names></name> <etal/></person-group>. <article-title>ChatGPT and the rise of large language models: the new AI-driven infodemic threat in public health</article-title>. <source>Front Public Health.</source> (<year>2023</year>) <volume>11</volume>:<fpage>1166120</fpage>. <pub-id pub-id-type="doi">10.3389/fpubh.2023.1166120</pub-id><pub-id pub-id-type="pmid">37181697</pub-id></citation></ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="web"><person-group person-group-type="author"><collab>Heaven WD. Artificial Intelligence&#x02014;The Inside Story of How ChatGPT Was Built From the People Who Made it.</collab></person-group> (<year>2023</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.technologyreview.com/2023/03/03/1069311/inside-story-oral-history-how-chatgpt-built-openai/">https://www.technologyreview.com/2023/03/03/1069311/inside-story-oral-history-how-chatgpt-built-openai/</ext-link> (accessed 7.5.2023).</citation>
</ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brainard</surname> <given-names>J</given-names></name></person-group>. <article-title>Journals take up arms against AI-written text</article-title>. <source>Science.</source> (<year>2023</year>) <volume>379</volume>:<fpage>740</fpage>&#x02013;<lpage>1</lpage>. <pub-id pub-id-type="doi">10.1126/science.adh2762</pub-id><pub-id pub-id-type="pmid">36821673</pub-id></citation></ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chow</surname> <given-names>JCL</given-names></name> <name><surname>Sanders</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>K</given-names></name></person-group>. <article-title>Impact of ChatGPT on medical chatbots as a disruptive technology</article-title>. <source>Front Artif Intell.</source> (<year>2023</year>) <volume>6</volume>:<fpage>1166014</fpage>. <pub-id pub-id-type="doi">10.3389/frai.2023.1166014</pub-id><pub-id pub-id-type="pmid">37091303</pub-id></citation></ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dave</surname> <given-names>T</given-names></name> <name><surname>Athaluri</surname> <given-names>SA</given-names></name> <name><surname>Singh</surname> <given-names>S</given-names></name></person-group>. <article-title>ChatGPT in medicine: an overview of its applications, advantages, limitations, future prospects, ethical considerations</article-title>. <source>Front Artif Intell.</source> (<year>2023</year>) <volume>6</volume>:<fpage>1169595</fpage>. <pub-id pub-id-type="doi">10.3389/frai.2023.1169595</pub-id><pub-id pub-id-type="pmid">37215063</pub-id></citation></ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shaw</surname> <given-names>D</given-names></name> <name><surname>Morfeld</surname> <given-names>P</given-names></name> <name><surname>Erren</surname> <given-names>T</given-names></name></person-group>. <article-title>The (mis)use of ChatGPT in science and education: turing, Djerassi, &#x0201C;athletics&#x0201D; and ethics</article-title>. <source>EMBO Rep.</source> (<year>2023</year>) <volume>23</volume>:<fpage>e57501</fpage>. <pub-id pub-id-type="doi">10.15252/embr.202357501</pub-id><pub-id pub-id-type="pmid">37259767</pub-id></citation></ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stokel-Walker</surname> <given-names>C</given-names></name> <name><surname>Van Noorden</surname> <given-names>R</given-names></name></person-group>. <article-title>What ChatGPT and generative AI mean for science</article-title>. <source>Nature</source>. (<year>2023</year>) <volume>614</volume>:<fpage>214</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1038/d41586-023-00340-6</pub-id><pub-id pub-id-type="pmid">36747115</pub-id></citation></ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thorp</surname> <given-names>HH</given-names></name></person-group>. <article-title>ChatGPT is fun, but not an author</article-title>. <source>Science</source>. (<year>2023</year>) <volume>379</volume>:<fpage>313</fpage>. <pub-id pub-id-type="doi">10.1126/science.adg7879</pub-id><pub-id pub-id-type="pmid">36701446</pub-id></citation></ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Beltrami</surname> <given-names>EJ</given-names></name> <name><surname>Grant-Kels</surname> <given-names>JM</given-names></name></person-group>. <article-title>Consulting ChatGPT: ethical dilemmas in language model artificial intelligence</article-title>. <source>J Am Acad Dermatol</source>. (<year>2023</year>) <volume>2</volume>:<fpage>52</fpage>. <pub-id pub-id-type="doi">10.1016/j.jaad.2023.02.052</pub-id><pub-id pub-id-type="pmid">36907556</pub-id></citation></ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="journal"><person-group person-group-type="author"><collab>Dorr DA Adams L and Embi P</collab></person-group>. (<year>2023</year>). <article-title>Harnessing the Promise of Artificial Intelligence Responsibly</article-title>. <source>JAMA</source>. <volume>329</volume>:<fpage>1347</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2023.2771</pub-id><pub-id pub-id-type="pmid">36972068</pub-id></citation></ref>
<ref id="B12">
<label>12.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kavian</surname> <given-names>JA</given-names></name> <name><surname>Wilkey</surname> <given-names>HL</given-names></name> <name><surname>Patel</surname> <given-names>PA</given-names></name> <name><surname>Boyd</surname> <given-names>JC</given-names></name></person-group>. <article-title>Harvesting the power of artificial intelligence for surgery: uses, implications, ethical considerations</article-title>. <source>Am Surg.</source> (<year>2023</year>) <volume>23</volume>:<fpage>31348231175454</fpage>. <pub-id pub-id-type="doi">10.1177/00031348231175454</pub-id><pub-id pub-id-type="pmid">37148260</pub-id></citation></ref>
<ref id="B13">
<label>13.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H</given-names></name> <name><surname>Moon</surname> <given-names>JT</given-names></name> <name><surname>Purkayastha</surname> <given-names>S</given-names></name> <name><surname>Celi</surname> <given-names>LA</given-names></name> <name><surname>Trivedi</surname> <given-names>H</given-names></name> <name><surname>Gichoya</surname> <given-names>WJ</given-names></name></person-group>. <article-title>Ethics of large language models in medicine and medical research</article-title>. <source>Lancet Digit Health</source>. (<year>2023</year>) <volume>23</volume>:<fpage>83</fpage>. <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00083-3</pub-id><pub-id pub-id-type="pmid">37120418</pub-id></citation></ref>
<ref id="B14">
<label>14.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marchandot</surname> <given-names>B</given-names></name> <name><surname>Matsushita</surname> <given-names>K</given-names></name> <name><surname>Carmona</surname> <given-names>A</given-names></name> <name><surname>Trimaille</surname> <given-names>A</given-names></name> <name><surname>Morel</surname> <given-names>O</given-names></name></person-group>. <article-title>ChatGPT: the next frontier in academic writing for cardiologists or a pandora&#x00027;s box of ethical dilemmas</article-title>. <source>Eur Heart J Open</source>. (<year>2023</year>) <volume>3</volume>:<fpage>oead007</fpage>. <pub-id pub-id-type="doi">10.1093/ehjopen/oead007</pub-id><pub-id pub-id-type="pmid">36915398</pub-id></citation></ref>
<ref id="B15">
<label>15.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lam-Po-Tang</surname> <given-names>J</given-names></name> <name><surname>McKay</surname> <given-names>D</given-names></name></person-group>. <article-title>Dr Google, MD: a survey of mental health-related internet use in a private practice sample</article-title>. <source>Australas Psychiatr.</source> (<year>2010</year>) <volume>18</volume>:<fpage>130</fpage>&#x02013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.3109/10398560903473645</pub-id><pub-id pub-id-type="pmid">20175669</pub-id></citation></ref>
<ref id="B16">
<label>16.</label>
<citation citation-type="web"><person-group person-group-type="author"><collab>Hyman I,. The Risks of Consulting Dr. Google. Googling Information Can Cause Harm Anxiety, Especially in a Pandemic.</collab></person-group> (<year>2020</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.psychologytoday.com/us/blog/mental-mishaps/202004/the-risks-consulting-dr-google">https://www.psychologytoday.com/us/blog/mental-mishaps/202004/the-risks-consulting-dr-google</ext-link> (accessed 23.5.2023).</citation>
</ref>
<ref id="B17">
<label>17.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Bulck</surname> <given-names>L</given-names></name> <name><surname>Moons</surname> <given-names>P</given-names></name></person-group>. <article-title>What if your patient switches from Dr. Google to Dr. ChatGPT? a vignette-based survey of the trustworthiness, value and danger of ChatGPT-generated responses to health questions</article-title>. <source>Eur J Cardiovasc Nurs</source>. (<year>2023</year>) <volume>23</volume>:<fpage>zvad038</fpage>. <pub-id pub-id-type="doi">10.1093/eurjcn/zvad038</pub-id><pub-id pub-id-type="pmid">37094282</pub-id></citation></ref>
<ref id="B18">
<label>18.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Center</surname> <given-names>for AI safety</given-names></name></person-group>. <article-title>Statement on AI Risk</article-title>. AI experts and public figures express their concern about AI risk (<year>2023</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.safe.ai/statement-on-ai-risk">https://www.safe.ai/statement-on-ai-risk</ext-link> (accessed June 20, 2023).</citation>
</ref>
<ref id="B19">
<label>19.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Fung</surname> <given-names>B,.</given-names></name></person-group> (<year>2023</year>). <source>Mr. ChatGPT goes to Washington: OpenAI CEO sam altman testifies before congress on AI risks</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://edition.cnn.com/2023/05/16/tech/sam-altman-openai-congress/index.html">https://edition.cnn.com/2023/05/16/tech/sam-altman-openai-congress/index.html</ext-link> (accessed June 20, 2023).</citation>
</ref>
<ref id="B20">
<label>20.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Metz</surname> <given-names>C</given-names></name></person-group>. <source>&#x02018;The Godfather of A.I.&#x00027; Leaves Google and Warns of Danger Ahead</source>. (<year>2023</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.nytimes.com/2023/05/01/technology/ai-google-chatbot-engineer-quits-hinton.html">https://www.nytimes.com/2023/05/01/technology/ai-google-chatbot-engineer-quits-hinton.html</ext-link> (accessed June 20, 2023).</citation>
</ref>
<ref id="B21">
<label>21.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sadowski</surname> <given-names>J</given-names></name> <name><surname>Viljoen</surname> <given-names>S</given-names></name> <name><surname>Whittaker</surname> <given-names>M</given-names></name></person-group>. <article-title>Everyone should decide how their digital data are used&#x02014;not just tech companies</article-title>. <source>Nature</source>. (<year>2021</year>) <volume>595</volume>:<fpage>169</fpage>&#x02013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1038/d41586-021-01812-3</pub-id><pub-id pub-id-type="pmid">34211184</pub-id></citation></ref>
<ref id="B22">
<label>22.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Frankfurt</surname> <given-names>HG</given-names></name></person-group>. <article-title>On bullshit</article-title>. <source>Raritan Q Rev.</source> (<year>1986</year>) <volume>6</volume>:<fpage>81</fpage>&#x02013;<lpage>100</lpage>.</citation>
</ref>
<ref id="B23">
<label>23.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Frankfurt</surname> <given-names>HG</given-names></name></person-group>. <source>On Bullshit</source>. <publisher-loc>Princeton, NJ</publisher-loc>: <publisher-name>Princeton University Press</publisher-name> (<year>2005</year>).</citation>
</ref>
<ref id="B24">
<label>24.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Hoppenstedt</surname> <given-names>M,. Chefin der Signal-App: Wir werden nicht auf den KI-Zug aufspringen. Interview with Meredith Whittaker.</given-names></name></person-group> (<year>2023</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.spiegel.de/netzwelt/netzpolitik/signal-chefin-meredith-whittaker-wir-werden-nicht-auf-den-ki-zug-aufspringen-a-0b223227-560a-41f9-b772-7d7860df3098">https://www.spiegel.de/netzwelt/netzpolitik/signal-chefin-meredith-whittaker-wir-werden-nicht-auf-den-ki-zug-aufspringen-a-0b223227-560a-41f9-b772-7d7860df3098</ext-link> (accessed June 20, 2023).</citation>
</ref>
<ref id="B25">
<label>25.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Price</surname> <given-names>WN</given-names></name> <name><surname>Gerke</surname> <given-names>S</given-names></name> <name><surname>Cohen</surname> <given-names>GI</given-names></name></person-group>. <article-title>Potential liability for physicians using artificial intelligence</article-title>. <source>JAMA</source>. (<year>2019</year>) <volume>322</volume>:<fpage>1765</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2019.15064</pub-id><pub-id pub-id-type="pmid">31584609</pub-id></citation></ref>
<ref id="B26">
<label>26.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Haupt</surname> <given-names>CE</given-names></name> <name><surname>Marks</surname> <given-names>M</given-names></name></person-group>. <article-title>AI-generated medical advice-GPT and beyond</article-title>. <source>JAMA</source>. (<year>2023</year>) <volume>329</volume>:<fpage>1349</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2023.5321</pub-id><pub-id pub-id-type="pmid">36972070</pub-id></citation></ref>
<ref id="B27">
<label>27.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Orwell</surname> <given-names>G</given-names></name></person-group>. <article-title>Nineteen <italic>Eighty-Four</italic></article-title>. London: Martin Secker and Warburg Ltd (<year>1949</year>).</citation>
</ref>
<ref id="B28">
<label>28.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Lima</surname> <given-names>C,. AI chatbots Won&#x00027;t Enjoy tech&#x00027;s Legal Shield, Section 230 Authors Say.</given-names></name></person-group> (<year>2023</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.washingtonpost.com/politics/2023/03/17/ai-chatbots-wont-enjoy-techs-legal-shield-section-230-authors-say/">https://www.washingtonpost.com/politics/2023/03/17/ai-chatbots-wont-enjoy-techs-legal-shield-section-230-authors-say/</ext-link> (accessed June 20, 2023).</citation>
</ref>
</ref-list> 
</back>
</article> 
