<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v25i1e47184</article-id>
      <article-id pub-id-type="pmid">37314848</article-id>
      <article-id pub-id-type="doi">10.2196/47184</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Investigating the Impact of User Trust on the Adoption and Use of ChatGPT: Survey Analysis</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Urena</surname>
            <given-names>Estefania</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chaturvedi</surname>
            <given-names>Akhil</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Joseph</surname>
            <given-names>Amanda</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Choudhury</surname>
            <given-names>Avishek</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Industrial and Management Systems Engineering</institution>
            <institution>Benjamin M. Statler College of Engineering and Mineral Resources</institution>
            <institution>West Virginia University</institution>
            <addr-line>321 Engineering Sciences Building</addr-line>
            <addr-line>1306 Evansdale Drive</addr-line>
            <addr-line>Morgantown, WV, 26506</addr-line>
            <country>United States</country>
            <phone>1 304 293 9431</phone>
            <email>avishek.choudhury@mail.wvu.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5342-0709</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Shamszare</surname>
            <given-names>Hamid</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-7786-3452</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Industrial and Management Systems Engineering</institution>
        <institution>Benjamin M. Statler College of Engineering and Mineral Resources</institution>
        <institution>West Virginia University</institution>
        <addr-line>Morgantown, WV</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Avishek Choudhury <email>avishek.choudhury@mail.wvu.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>14</day>
        <month>6</month>
        <year>2023</year>
      </pub-date>
      <volume>25</volume>
      <elocation-id>e47184</elocation-id>
      <history>
        <date date-type="received">
          <day>10</day>
          <month>3</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>19</day>
          <month>4</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>19</day>
          <month>4</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>25</day>
          <month>5</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Avishek Choudhury, Hamid Shamszare. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 14.06.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2023/1/e47184" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>ChatGPT (Chat Generative Pre-trained Transformer) has gained popularity for its ability to generate human-like responses. It is essential to note that overreliance or blind trust in ChatGPT, especially in high-stakes decision-making contexts, can have severe consequences. Similarly, lacking trust in the technology can lead to underuse, resulting in missed opportunities.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study investigated the impact of users’ trust in ChatGPT on their intent and actual use of the technology. Four hypotheses were tested: (1) users’ intent to use ChatGPT increases with their trust in the technology; (2) the actual use of ChatGPT increases with users’ intent to use the technology; (3) the actual use of ChatGPT increases with users’ trust in the technology; and (4) users’ intent to use ChatGPT can partially mediate the effect of trust in the technology on its actual use.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>This study distributed a web-based survey to adults in the United States who actively use ChatGPT (version 3.5) at least once a month between February 2023 through March 2023. The survey responses were used to develop 2 latent constructs: <italic>Trust</italic> and <italic>Intent to Use</italic>, with <italic>Actual Use</italic> being the outcome variable. The study used partial least squares structural equation modeling to evaluate and test the structural model and hypotheses.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>In the study, 607 respondents completed the survey. The primary uses of ChatGPT were for information gathering (n=219, 36.1%), entertainment (n=203, 33.4%), and problem-solving (n=135, 22.2%), with a smaller number using it for health-related queries (n=44, 7.2%) and other activities (n=6, 1%). Our model explained 50.5% and 9.8% of the variance in <italic>Intent to Use</italic> and <italic>Actual Use</italic>, respectively, with path coefficients of 0.711 and 0.221 for <italic>Trust</italic> on <italic>Intent to Use</italic> and <italic>Actual Use</italic>, respectively. The bootstrapped results failed to reject all 4 null hypotheses, with <italic>Trust</italic> having a significant direct effect on both <italic>Intent to Use</italic> (β=0.711, 95% CI 0.656-0.764) and <italic>Actual Use</italic> (β=0.302, 95% CI 0.229-0.374). The indirect effect of <italic>Trust</italic> on <italic>Actual Use</italic>, partially mediated by <italic>Intent to Use</italic>, was also significant (β=0.113, 95% CI 0.001-0.227).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Our results suggest that trust is critical to users’ adoption of ChatGPT. It remains crucial to highlight that ChatGPT was not initially designed for health care applications. Therefore, an overreliance on it for health-related advice could potentially lead to misinformation and subsequent health risks. Efforts must be focused on improving the ChatGPT’s ability to distinguish between queries that it can safely handle and those that should be redirected to human experts (health care professionals). Although risks are associated with excessive trust in artificial intelligence–driven chatbots such as ChatGPT, the potential risks can be reduced by advocating for shared accountability and fostering collaboration between developers, subject matter experts, and human factors researchers.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT</kwd>
        <kwd>trust in AI</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>technology adoption</kwd>
        <kwd>behavioral intention</kwd>
        <kwd>chatbot</kwd>
        <kwd>human factors</kwd>
        <kwd>trust</kwd>
        <kwd>adoption</kwd>
        <kwd>intent</kwd>
        <kwd>survey</kwd>
        <kwd>shared accountability</kwd>
        <kwd>AI policy</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Artificial Intelligence (AI) has been a subject of research and intrigue for scientists, engineers, and thinkers since the emergence of computing machines. The genesis of AI can be traced back to the 1950s, marking the commencement of an extensive voyage that would ultimately lead to the development of intricate, human-like machines capable of independent thinking, learning, and reasoning [<xref ref-type="bibr" rid="ref1">1</xref>]. Initially, AI was perceived as a solution to all problems—a technology that could mechanize every task and supplant human labor. Early research focused on building rule-based systems that could make decisions based on predetermined logical rules. Nevertheless, these systems had limited usefulness as they were rigid and could not learn from data or adapt to novel situations [<xref ref-type="bibr" rid="ref2">2</xref>]. In the 1960s and 1970s, the emphasis of AI research shifted toward developing expert systems that could reason and make decisions based on extensive domain-specific knowledge [<xref ref-type="bibr" rid="ref3">3</xref>]. These systems were widely used in various fields, such as medicine, finance, and engineering, and were seen as a major advancement in AI research [<xref ref-type="bibr" rid="ref4">4</xref>]. However, the limitations of expert systems became apparent in the 1980s and 1990s, as they could not handle the complexity and ambiguity of real-world problems [<xref ref-type="bibr" rid="ref5">5</xref>]. This led to the development of machine learning algorithms that could learn from data and make decisions based on statistical patterns. With the advent of the internet and the availability of massive amounts of data, deep learning algorithms emerged, which are capable of learning complex patterns in images, speech, and text.</p>
        <p>In recent years, AI has been widely adopted in various fields, including health care, finance, transportation, and entertainment. AI-powered technologies such as self-driving cars, virtual assistants, and personalized recommendations have become integral to our daily lives. One of the most substantial breakthroughs in AI research has been the emergence of large-scale language models that are built on Generative Pre-trained Transformers such as ChatGPT (Chat Generative Pre-trained Transformer; OpenAI) [<xref ref-type="bibr" rid="ref6">6</xref>]. These models are trained on vast amounts of textual data and can generate human-like responses to natural language queries. ChatGPT has revolutionized the field of natural language processing and has paved the way for a new generation of AI-powered language applications. ChatGPT is a cutting-edge language model that OpenAI developed in 2019. It is based on a transformer architecture—a deep learning model that has demonstrated remarkable efficacy in processing sequential data, particularly natural language. ChatGPT was trained on a colossal corpus of text data, which included various sources such as books, articles, and websites.</p>
        <p>ChatGPT has garnered substantial traction among computer users, largely due to its impressive ability to generate responses that resemble those of the human language [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. Many users appreciate the convenience and efficiency of this technology, particularly in various applications such as chatbots, virtual assistants, and customer service agents [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. However, along with its burgeoning popularity, ChatGPT has prompted concerns about the broader implications of its use [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Among these concerns is the potential for its exploitation for malicious purposes, such as social engineering attacks or other forms of fraud [<xref ref-type="bibr" rid="ref20">20</xref>]. Another issue relates to the possibility of the technology exacerbating preexisting societal biases, as the model’s training data may have inadvertently reflected these biases and cause ChatGPT to produce biased responses [<xref ref-type="bibr" rid="ref21">21</xref>]. Moreover, ChatGPT’s ability to produce highly convincing fake text has sparked unease regarding its potential misuse in disinformation campaigns, deep fakes, and other malicious activities [<xref ref-type="bibr" rid="ref22">22</xref>]. These concerns have catalyzed efforts by researchers and policy makers to identify and address the risks associated with this technology, including developing techniques to detect and prevent malicious use and ensuring that the training data used for ChatGPT and similar models are diverse, representative, and free of any biases [<xref ref-type="bibr" rid="ref22">22</xref>]. Therefore, it is crucial to remain vigilant and proactively address the possible risks arising from its use [<xref ref-type="bibr" rid="ref23">23</xref>].</p>
        <p>The consequences of overreliance or exhibiting blind trust in ChatGPT, particularly in high-stakes decision-making contexts, cannot be overstated. Although impressive in its capabilities, the technology is not impervious to errors, especially if it has been trained on biased or incomplete data. Given its nature of continuously learning from internet texts, failure to adequately verify and validate ChatGPT’s responses can result in incorrect or incomplete decisions, which can have substantial and far-reaching implications in health care, finance, and law [<xref ref-type="bibr" rid="ref24">24</xref>]. Conversely, a complete lack of trust in ChatGPT can lead to the underuse of this technology. Such distrust can lead to hesitancy to use the technology for decision-making, leading to missed opportunities and slower decision-making processes.</p>
        <p>Excessive or lack of trust in ChatGPT can have deleterious effects. Striking a balance between trust and validation is essential to ensure the responsible and efficacious use of ChatGPT to maximize its benefits and mitigate its associated risks. Therefore, this study captured users’ trust in ChatGPT and explored its impact on user intent to use the technology. Additionally, it explored its direct and indirect effects on the actual use of ChatGPT. As illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>, we explored the following 4 hypotheses:</p>
        <list list-type="bullet">
          <list-item>
            <p>H1: User’s intent to use ChatGPT increases with their trust in the technology.</p>
          </list-item>
          <list-item>
            <p>H2: The actual use of ChatGPT increases with users’ intent to use the technology.</p>
          </list-item>
          <list-item>
            <p>H3: The actual use of ChatGPT increases with users’ trust in the technology.</p>
          </list-item>
          <list-item>
            <p>H4: Users’ intent to use ChatGPT can partially mediate the effect of trust in the technology on its actual use.</p>
          </list-item>
        </list>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>The conceptual structural framework. H1 through H4 indicate the hypotheses. The dashed line connecting trust and actual use indicates the indirect effect, whereas solid lines indicate the direct paths.</p>
          </caption>
          <graphic xlink:href="jmir_v25i1e47184_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Hypothesis Development</title>
        <p>In this study, we define <italic>Trust</italic> in ChatGPT as a user’s willingness to take chances based on the recommendations made by this technology. This implies that the user believes that the technology has the capacity to execute a particular task accurately while keeping in mind the possibility of negative outcomes. The <italic>Intent to Use</italic> [<xref ref-type="bibr" rid="ref25">25</xref>] ChatGPT refers to the degree to which an end user perceives the technology as useful and user-friendly and their willingness to adopt and use it for decision-making purposes. <italic>Actual Use</italic> of ChatGPT refers to the extent to which end users have used the technology for decision-making purposes in their respective fields. The extant literature attests to a positive correlation between users’ trust in technology and their inclination to use it, as evidenced by many studies [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. Notably, one investigation probing patients’ and clinicians’ perceptions of chatbots found a substantial nexus between users’ trust in AI-based health care chatbot services and their intention to use them [<xref ref-type="bibr" rid="ref30">30</xref>]. Similarly, a study examining virtual assistants in the health care domain revealed a positive correlation between users’ trust in the technology and their willingness to use it for managing their health [<xref ref-type="bibr" rid="ref32">32</xref>]. Furthermore, a study conducted in the marketing realm concluded that chatbots augment customers’ trust and purchase intention [<xref ref-type="bibr" rid="ref29">29</xref>]. Against this backdrop, we posited that the degree of users’ intent to use ChatGPT will increase concomitantly with their trust in the technology, thereby underscoring a positive association between the 2 variables. We articulated this hypothesis as <italic>H1: users’ intent to use ChatGPT increases with their trust in the technology.</italic></p>
        <p>Successful technology implementation depends on users’ intention to use it and their actual use. Despite users’ intentions to use technology, they may not put it into practice for several reasons, such as the lack of time, resources, technical skills, or negative experiences with the technology [<xref ref-type="bibr" rid="ref33">33</xref>]. Prior research has established a positive correlation between intent to use and actual use of technology, indicating that users who intend to use the technology are more likely to actually use it [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. For instance, studies on adopting robots as assistive social agents found that users’ intent to use them strongly predicted their actual use [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. In addition, research on adopting conversational agents in the form of chatbots for disease diagnosis showed that users’ intention to use the chatbot influenced their actual use of the chatbot [<xref ref-type="bibr" rid="ref34">34</xref>]. Thus, we hypothesized that users’ intent to use ChatGPT will positively influence their actual use of the technology. We articulated this hypothesis as <italic>H2: the actual use of ChatGPT increases with users’ intent to use the technology.</italic></p>
        <p>Trust can also influence the actual use of ChatGPT. A survey study involving 359 participants revealed that users’ intentions to continue using chatbot services were influenced mainly by their trust in the chatbot [<xref ref-type="bibr" rid="ref35">35</xref>]. A health care study using interviews revealed that trust is vital in determining whether individuals will use chatbots for disease diagnosis [<xref ref-type="bibr" rid="ref34">34</xref>]. Specifically, the level of trust in chatbots as conversational agents was a decisive factor in the interviewees’ decision to use the technology. This finding supports the notion that trust positively impacts the actual use of technology, highlighting its critical role in adopting and implementing new technological solutions. Therefore, we hypothesized that trust in ChatGPT will impact the actual use of the technology. We articulated this hypothesis as <italic>H3: the actual use of ChatGPT increases with users’ trust in the technology.</italic></p>
        <p>We also explored the following hypothesis: <italic>H4: users’ intent to use ChatGPT can partially mediate the effect of trust in the technology on its actual use.</italic> If users trust ChatGPT, they may be more likely to form positive attitudes toward using the technology and develop an intention to use it. This intention, in turn, may lead to the actual use of the technology. Therefore, users’ intent to use ChatGPT could be a pathway through which trust in the technology can partially mediate its effect on actual use. A study on technology acceptance for assistive social robots among older adult users found that the intention to use plays a mediating role in the relationship between trust and actual use [<xref ref-type="bibr" rid="ref31">31</xref>]. This suggests that trust alone may not be sufficient to predict the actual use of assistive social robots among older adult users, as the intention to use plays an important role in this relationship. By considering this potential mediating effect, researchers can gain a more comprehensive understanding of the factors influencing users’ adoption of ChatGPT.</p>
      </sec>
    </sec>
    <sec sec-type="method">
      <title>Methods</title>
      <sec>
        <title>Ethics Approval</title>
        <p>The study obtained ethical approval from West Virginia University, Morgantown (protocol 2302725983).</p>
      </sec>
      <sec>
        <title>Semistructured Survey</title>
        <p>We distributed a web-based semistructured survey to adults in the United States who actively use ChatGPT (version 3.5) at least once a month. We collected the data from February 2023 through March 2023. The survey was designed on Qualtrics (Qualtrics LLC) and was distributed by Centiment (Centiment LLC), an audience-paneling service. We leveraged Centiment’s service as they reach a broader and more representative audience via their network and social media. They also use fingerprinting technology that combines IP address, device type, screen size, and cookies to ensure that only unique panelists enter the survey.</p>
        <p>We conducted a soft launch of the survey and collected 40 responses. A soft launch is a small-scale test of a survey before it is distributed to a larger audience. A soft launch aims to identify any potential issues with the survey, such as unclear or confusing questions, technical glitches, or other problems that may affect the quality of the data collected. The survey was then distributed to a larger audience.</p>
        <p><xref ref-type="table" rid="table1">Table 1</xref> shows the descriptive statistics of the survey questions used in this study. We developed 2 latent constructs based on the question (predictors): <italic>Trust</italic> and <italic>Intent to Use</italic>. Participant responses to all the questions were captured using a 4-point Likert scale ranging from 1=<italic>strongly disagree</italic> to 4=<italic>strongly agree</italic>. The <italic>Actual Use</italic> factor, the outcome variable, was captured using a single-item question capturing the frequency of use ranging from 1=<italic>once a month</italic> to 4=<italic>almost every day</italic>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Descriptive statistics of study variables (N=607).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="770"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Survey items</td>
                <td>Value, mean (SD)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>Trust</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT<sup>a</sup> is competent in providing the information and guidance I need</td>
                <td>3.20 (0.83)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT is reliable in providing consistent and dependable information</td>
                <td>3.16 (0.80)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT is transparent</td>
                <td>3.12 (0.86)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT is trustworthy in the sense that it is dependable and credible</td>
                <td>3.17 (0.84)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT will not cause harm, manipulate its responses, create negative consequences for me</td>
                <td>3.10 (0.88)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT will act with integrity and be honest with me</td>
                <td>3.19 (0.82)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT is secure and protects my privacy and confidential information</td>
                <td>3.27 (0.81)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Intent to Use</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>I am willing to use ChatGPT for healthcare related queries</td>
                <td>3.10 (0.86)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>I am willing to take decisions based on the recommendations provided by ChatGPT</td>
                <td>3.13 (0.82)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>I am willing to use ChatGPT in future</td>
                <td>3.38 (0.76)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Actual Use</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>How frequently do you use ChatGPT</td>
                <td>3.33 (1.10)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>ChatGPT: Chat Generative Pre-trained Transformer.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>All the analyses were done in R (R Foundation for Statistical Computing) [<xref ref-type="bibr" rid="ref36">36</xref>] using the <italic>seminr</italic> package [<xref ref-type="bibr" rid="ref37">37</xref>]. We evaluated and validated the latent constructs’ convergent and discriminant validity. The convergent and reliability were assessed using 3 criteria [<xref ref-type="bibr" rid="ref38">38</xref>]: factor loadings (&#62;0.50), composite reliability (&#62;0.70), and average variance extracted (&#62;0.50). The discriminant validity was accessed using the Heterotrait-Monotrait ratio (&#60;0.90) [<xref ref-type="bibr" rid="ref39">39</xref>]. After validating the latent construct (measurement model), we leveraged the partial least squares structural equation modeling (PLS-SEM) to test the structural model and hypotheses. The PLS-SEM method is a well-established method for multivariate analysis [<xref ref-type="bibr" rid="ref40">40</xref>]. It allows for estimating complex models with several constructs, indicator variables, and structural paths without imposing distributional assumptions on the data [<xref ref-type="bibr" rid="ref41">41</xref>]. PLS-SEM is also suitable for small sample sizes when models comprise many constructs and items [<xref ref-type="bibr" rid="ref42">42</xref>]. Thus, PLS-SEM is a good method for exploratory research as it offers the flexibility needed for the interplay between theory and data [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>In all, 607 respondents completed the survey, of which 182 (30%) used ChatGPT at least once a month, 158 (26%) used it once per week, 149 (24.5%) used it more than once per week, and 118 (19.4%) used it almost every day. Most respondents had at minimum a high school diploma (n=204, 33.6%) or a bachelor’s degree (n=262, 43.2%). Most of the respondents used ChatGPT for information gathering (n=219, 36.1%), entertainment (n=203, 33.4%), and problem-solving (n=135, 22.2%). We also noted users who used the technology for health-related queries (n=44, 7.2%) and other activities (n=6, 1%), such as generating ideas, grammar checks, and writing blog content. Participants acknowledged the ease of use, usefulness, and accessibility as the 3 most important factors encouraging them to use ChatGPT. Other factors were in the following order: trustworthiness, algorithm quality, privacy, brand value, and transparency.</p>
      <p><xref ref-type="table" rid="table2">Table 2</xref> depicts that the effect of <italic>Trust</italic> on <italic>Intent to Use</italic> was stronger than its effect on <italic>Actual Use</italic>, with path coefficients of 0.711 and 0.221, respectively. The model explained 50.5% and 9.8% of the variance in Intent to Use and Actual Use, respectively. Reliability estimates indicated high levels of internal consistency for all 3 latent variables, with Cronbach α and rho values exceeding the recommended threshold of 0.7. The average variance extracted for <italic>Trust</italic> and <italic>Intent to Use</italic> also exceeded the recommended threshold of 0.5, indicating that these variables are well-defined and reliable. <xref ref-type="table" rid="table3">Table 3</xref> shows the Heterotrait-Monotrait ratios for the paths between <italic>Trust</italic> and <italic>Intent to Use</italic>, <italic>Trust</italic> and <italic>Actual Use</italic>, and Intent to Use and Actual Use. The results suggest that the Heterotrait-Monotrait ratios are below the recommended threshold of 0.9, indicating discriminant validity in the model.</p>
      <p>According to our bootstrapped PLS-SEM results, we found support for all 4 hypotheses. <xref rid="figure2" ref-type="fig">Figure 2</xref> illustrates the conceptual framework that connects trust in ChatGPT, users’ intent to use ChatGPT, and its actual use. Factors T1 through T7 indicate the 7 observed variables forming the latent construct of <italic>Trust</italic>, and factors U1 through U3 form the construct of <italic>Intent to Use</italic>. The thickness of the arrows in the inner model reflects the magnitude of the direct effects.</p>
      <p>H1 posited that trust in ChatGPT would have a direct effect on users’ intentions to use the technology. Our results confirmed this hypothesis (β=0.711, 95% CI 0.656-0.764), indicating a strong positive relationship.</p>
      <p>H2 suggested that users’ intent to use ChatGPT would have an effect on their actual use. This was also supported by our data (β=0.114, 95% CI 0.001-0.229), underlining the role of intent as a predictor of use.</p>
      <p>H3 proposed that trust in ChatGPT would directly influence its actual use. Our results corroborated this hypothesis (β=0.302, 95% CI 0.229-0.374), affirming that trust can directly drive actual use.</p>
      <p>Finally, H4 postulated that the effect of trust on actual use would be partially mediated by the intent to use. Our analysis also confirmed this, with the indirect effect of trust on actual use through intent to use being significant (β=0.113, 95% CI 0.003-0.227).</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Model fit and reliability measures.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="30"/>
          <col width="590"/>
          <col width="210"/>
          <col width="170"/>
          <thead>
            <tr valign="top">
              <td colspan="2">
                <break/>
              </td>
              <td>Intent to Use</td>
              <td>Actual Use</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td colspan="4">
                <bold>Model fit</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>
                <italic>R<sup>2</sup></italic>
              </td>
              <td>0.505</td>
              <td>0.098</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Adjusted <italic>R</italic><sup>2</sup></td>
              <td>0.504</td>
              <td>0.095</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Trust</td>
              <td>0.711</td>
              <td>0.221</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Intent to Use</td>
              <td>N/A<sup>a</sup></td>
              <td>0.114</td>
            </tr>
            <tr valign="top">
              <td colspan="4">
                <bold>Reliability measures</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Cronbach α</td>
              <td>.876</td>
              <td>N/A</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Rho C</td>
              <td>0.904</td>
              <td>N/A</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>AVE<sup>b</sup></td>
              <td>0.575</td>
              <td>N/A</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Rho A</td>
              <td>0.880</td>
              <td>N/A</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table2fn1">
            <p><sup>a</sup>N/A: not applicable.</p>
          </fn>
          <fn id="table2fn2">
            <p><sup>b</sup>AVE: average variance extracted.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Discriminant validity measures.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="350"/>
          <col width="220"/>
          <col width="280"/>
          <col width="150"/>
          <thead>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Original estimate</td>
              <td>Bootstrap, mean (SD)</td>
              <td>95% CI</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Trust → Intent to Use</td>
              <td>0.896</td>
              <td>0.897 (0.035)</td>
              <td>0.827-0.962</td>
            </tr>
            <tr valign="top">
              <td>Trust → Actual Use</td>
              <td>0.320</td>
              <td>0.320 (0.040)</td>
              <td>0.241-0.397</td>
            </tr>
            <tr valign="top">
              <td>Intent to Use → Actual Use</td>
              <td>0.320</td>
              <td>0.321 (0.044)</td>
              <td>0.233-0.406</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Conceptual framework illustrating the significant paths connecting trust in ChatGPT (Chat Generative Pre-trained Transformer), users' intent to use ChatGPT, and its actual use (AU). T1 through T7: factors for trust; U1 through U3: factors for intent to use. *<italic>P</italic>&#60;.05 and ***<italic>P</italic>&#60;.001.</p>
        </caption>
        <graphic xlink:href="jmir_v25i1e47184_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This is the first study exploring the role of trust in ChatGPT’s adoption from a human factors viewpoint. This study contributes to the extant literature by shedding light on the importance of trust as a determinant of both the intention to use and the actual use of chatbot technologies. Furthermore, the study highlights the mediating role of intention to use in the relationship between trust and actual use. These insights are particularly relevant for organizations and developers seeking to design and market chatbot technologies that users are more likely to adopt and use. Moreover, the results show how users engage with chatbot technologies, including information gathering, entertainment, problem-solving, and health-related queries. This highlights the potential of chatbot technologies to meet various needs and suggests that developers may consider designing chatbots with diverse functionalities to enhance user satisfaction and engagement.</p>
        <p>Our findings complement and build upon the insights from the other studies by providing a nuanced understanding of the role of trust in chatbot adoption. Our study found that trust has a significant direct effect on both intentions to use (β=0.711) and actual use (β=0.302) of the technology. Moreover, the indirect effect of trust on actual use, partially mediated by intent to use, was also significant. This aligns with the prior study [<xref ref-type="bibr" rid="ref44">44</xref>], which explored the antecedents and consequences of chatbot initial trust. They revealed that compatibility, perceived ease of use, and social influence significantly boost users’ initial trust toward chatbots, enhancing the intention to use chatbots and encouraging engagement. Another study [<xref ref-type="bibr" rid="ref45">45</xref>] focused on the impact of anthropomorphism on user response to chatbots from the perspective of trust and relationship norms. Their findings complement our study by highlighting the role of anthropomorphism in trust formation, ultimately influencing chatbot use. Following the technology acceptance model and diffusion of innovations theory, a prior study [<xref ref-type="bibr" rid="ref28">28</xref>] examined the intention of users to use chatbots on smartphones for shopping. The study found that attitude toward chatbots was considerably influenced by perceived usefulness, the ease of use, enjoyment, price consciousness, perceived risk, and personal innovativeness. On the other hand, the intention to use was directly influenced only by trust, personal innovativeness, and attitude. Therefore, the study supports our findings by emphasizing the role of trust in the intention to use chatbots and adding other factors such as personal innovativeness and attitude. Similarly, a study [<xref ref-type="bibr" rid="ref29">29</xref>] reported that credibility, competence, anthropomorphism, social presence, and informativeness influence user trust in chatbots, affecting purchase intention—thus, emphasizing the importance of trust and its antecedents in determining the use of chatbots.</p>
      </sec>
      <sec>
        <title>Theoretical Contribution</title>
        <p>Our study makes several important theoretical contributions to understanding trust and its role in adopting and using AI-based chatbots (ChatGPT). By examining the direct and indirect effects of trust on intentions to use and actual use of the technology, the study confirms the importance of trust in the adoption process. It extends the existing literature by highlighting the underlying mechanisms through which trust influences actual use. This new understanding contributes to developing a more comprehensive theoretical framework for studying chatbot adoption.</p>
        <p>Our findings emphasize the critical role of trust in adopting and using chatbots. By demonstrating that trust has a significant direct effect on intentions to use and actual use, the study reinforces the centrality of trust in technology adoption research. This is consistent with the findings of prior literature, which also underscore the importance of trust in various aspects of chatbot adoption, such as initial trust [<xref ref-type="bibr" rid="ref44">44</xref>], response to anthropomorphic attributes [<xref ref-type="bibr" rid="ref45">45</xref>], and purchase intention [<xref ref-type="bibr" rid="ref29">29</xref>].</p>
        <p>Our study extends the existing literature by uncovering the mediating role of intention to use in the relationship between trust and actual use. By showing that the indirect effect of trust on actual use is partially mediated by intention to use, the study provides valuable insights into the mechanisms through which trust influences actual use. This novel contribution enhances our understanding of the complex interplay between trust and behavioral outcomes, laying the groundwork for future research on the dynamics of trust in technology adoption.</p>
      </sec>
      <sec>
        <title>Policy Implications</title>
        <p>Our study’s findings can significantly inform the decision-making processes for policy makers and public administrators as they face the challenges of implementing AI-driven solutions. By emphasizing the importance of trust, our study lays the groundwork for addressing potential pitfalls and governance challenges, ultimately promoting the successful integration of chatbots.</p>
        <p>First, establishing trust in AI-powered conversational agents should be a priority for policy makers and technology developers. This can be achieved through transparent disclosure of the agents’ operational processes, information sources, and guiding algorithms. Disclosures should be easily accessible, user-friendly, and presented in clear language. Additionally, conversational agents should include explicit disclaimers to minimize the risk of misleading or erroneous responses.</p>
        <p>Second, developers and policy makers should design conversational agents prioritizing user needs and preferences. Incorporating features that allow users to tailor the agent’s responses to their specific requirements, such as tone, vocabulary, and response time, will enhance user satisfaction. Furthermore, agents should prioritize providing accurate and relevant information while minimizing the potential for algorithmic bias, which could result in discriminatory or inaccurate responses.</p>
        <p>Third, policy makers should encourage shared accountability to promote the responsible development and deployment of chatbots such as ChatGPT. We define shared accountability as a collaborative approach to ensuring the responsible development and deployment of AI-based technologies, involving stakeholders who share responsibility for ensuring the technology’s accuracy, safety, and ethical use. This approach fosters a culture of transparency and responsibility, enabling stakeholders to identify and address potential issues and optimize the technology for the benefit of all users.</p>
        <p>By promoting shared accountability, policy makers can help create a culture of responsibility and transparency that motivates all stakeholders to optimize the technology. For example, developers and data-quality teams will be motivated to ensure that the AI is accurate and reliable. At the same time, users will be encouraged to provide feedback and report any issues or concerns. This sense of accountability and responsibility can make a substantial difference in ensuring that the technology is developed and deployed in a responsible and ethical manner. Furthermore, shared accountability can help to address concerns around biases and other ethical considerations in AI development. By involving diverse stakeholders in the development process, policy makers can ensure that the technology is designed to meet the needs and expectations of a broad range of users while minimizing the risk of unintentional harm or bias.</p>
        <p>Lastly, policy makers should establish policies and regulations promoting the responsible development and deployment of conversational agents [<xref ref-type="bibr" rid="ref46">46</xref>]. These policies should mandate adherence to ethical and legal guidelines related to privacy, data security, and bias. Policy makers should also provide guidance on appropriate use cases for conversational agents, such as information retrieval and customer service. Implementing such policies and regulations will ensure that conversational agents are developed and deployed to maximize benefits while minimizing potential risks and misuse.</p>
      </sec>
      <sec>
        <title>Practical Implications</title>
        <p>Our study also contributes to the human factors and health sciences literature by examining the role of trust in adopting AI-driven chatbots such as ChatGPT for health-related purposes. Our findings align with and extend the current understanding of other studies by identifying key factors influencing user adoption, such as trustworthiness, algorithm quality, privacy, transparency, and brand value [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref50">50</xref>]. From a human factors perspective, our study emphasizes the importance of designing chatbot technologies that cater to user needs and preferences while addressing potential concerns and risks.</p>
        <p>Moreover, given the increasing use of AI-powered chatbots for various activities, it is important to note that many respondents used the technology for health-related queries. This implies that health providers can leverage chatbots to provide health information and support to patients [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. However, to ensure user safety and the accuracy of health information provided, health providers must collaborate with technology providers to develop and integrate reliable and trustworthy health-related information sources into the chatbots [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Given the complexity and sensitivity of health-related issues, users must exercise caution when seeking health advice from an AI chatbot such as ChatGPT. Users should be aware of the limitations of AI technology in the medical field and should not use ChatGPT as a replacement for professional medical advice. To mitigate these risks, it may be useful for ChatGPT developers to provide clear disclaimers and warnings regarding the limitations of the technology in the medical field and simultaneously work toward integrating reliable medical databases to provide more accurate and trustworthy health advice.</p>
        <p>Although risks are associated with excessive trust in AI-driven chatbots such as ChatGPT, it is important to recognize that these technologies continually evolve as they process new data from the internet. However, biased or false information across the web can potentially influence ChatGPT’s responses, reinforcing misinformation or perpetuating skew perspectives. To address this concern, a proactive approach should be gradually adopted to develop mechanisms that filter out false or biased information from the chatbot’s training model.</p>
        <p>Since data floating on the internet can be manipulated, systematic efforts should be made to design and implement robust algorithms that identify and remove unreliable or unbalanced data, ensuring that ChatGPT is trained on diverse and accurate information. This can help prevent the chatbot from placing excessive weightage on certain polarities of data, which may result from skewed information on the internet. By refining the chatbot’s training model and incorporating more reliable data sources, the performance of ChatGPT can be continually improved to provide more accurate and unbiased responses.</p>
        <p>In addition to these technological improvements, collaboration between developers, subject matter experts, and human factors researchers can further ensure that AI-driven chatbots such as ChatGPT are designed and deployed with a comprehensive understanding of user needs and potential challenges. By addressing the risks associated with excessive trust and actively improving the chatbot’s performance, the development and application of AI-driven technologies such as ChatGPT can continue advancing, promoting positive outcomes and responsible use in various domains.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Our study has limitations, including using a cross-sectional survey and self-report measures, which may introduce biases. The limited geographic scope of the sample, focused on US respondents, may affect the generalizability of our findings to other cultural contexts. Future research should use longitudinal data; explore trust in chatbot adoption across different cultural contexts; and control for potential confounding factors such as participants’ familiarity with AI technology, prior experiences with chatbots, and demographic factors. Future research should use various methods, such as tracking actual chatbot use and conducting qualitative interviews, to assess trust and user behavior. Increasing data collection frequency and ensuring participants’ anonymity can also help mitigate biases. Future research can better understand trust’s role in chatbot adoption by addressing these limitations and enabling developers and organizations to design technologies that meet users’ needs and expectations.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>Our study provides novel insights into the factors driving the adoption of chatbot technologies such as ChatGPT. Our results suggest that trust is critical to users’ adoption of ChatGPT and few people tend to use it for health-related queries. Even as ChatGPT evolves, it remains crucial to highlight that this tool, while powerful, was not initially designed with a specific focus on health care applications. Therefore, an overreliance on it for health-related advice or diagnoses could potentially lead to misinformation and subsequent health risks.</p>
        <p>Efforts must also be focused on improving the system’s ability to distinguish between queries that it can safely handle and those that should be redirected to a human health care professional.</p>
        <p>Companies and policy makers should prioritize building trust and transparency in developing and deploying chatbots. Although risks are associated with excessive trust in AI-driven chatbots such as ChatGPT, it is important to recognize that the potential risks can be reduced by advocating for shared accountability and fostering collaboration between developers, subject matter experts (such as health care professionals), and human factors researchers.</p>
        <p>A systematic collaborative approach can ensure that AI-driven chatbots are designed and deployed with a comprehensive understanding of user needs and potential challenges. By addressing the risks associated with excessive trust and actively improving the chatbot’s performance, the development and application of AI-driven technologies such as ChatGPT can continue advancing, promoting positive outcomes and responsible use in various domains.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">ChatGPT</term>
          <def>
            <p>Chat Generative Pre-trained Transformer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">PLS-SEM</term>
          <def>
            <p>partial least squares structural equation modeling</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahuja</surname>
              <given-names>AS</given-names>
            </name>
          </person-group>
          <article-title>The impact of artificial intelligence in medicine on the future role of the physician</article-title>
          <source>PeerJ</source>
          <year>2019</year>
          <month>10</month>
          <day>4</day>
          <volume>7</volume>
          <fpage>e7702</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31592346"/>
          </comment>
          <pub-id pub-id-type="doi">10.7717/peerj.7702</pub-id>
          <pub-id pub-id-type="medline">31592346</pub-id>
          <pub-id pub-id-type="pii">7702</pub-id>
          <pub-id pub-id-type="pmcid">PMC6779111</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russell</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Norvig</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <source>Artificial Intelligence: A Modern Approach</source>
          <year>2010</year>
          <publisher-loc>Upper Saddle River, NJ</publisher-loc>
          <publisher-name>Pearson Education, Inc</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bobrow</surname>
              <given-names>DG</given-names>
            </name>
            <name name-style="western">
              <surname>Hayes</surname>
              <given-names>PJ</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence — where are we?</article-title>
          <source>Artif Intell</source>
          <year>1985</year>
          <month>03</month>
          <volume>25</volume>
          <issue>3</issue>
          <fpage>375</fpage>
          <lpage>415</lpage>
          <pub-id pub-id-type="doi">10.1016/0004-3702(85)90077-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brock</surname>
              <given-names>DC</given-names>
            </name>
          </person-group>
          <article-title>Learning from artificial intelligence’s previous awakenings: the history of expert systems</article-title>
          <source>AI Magazine</source>
          <year>2018</year>
          <month>9</month>
          <day>28</day>
          <volume>39</volume>
          <issue>3</issue>
          <fpage>3</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1609/aimag.v39i3.2809</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Whalen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Expert systems versus systems for experts: computer-aided dispatch as a support system in real-world environments</article-title>
          <source>Cambridge Series on Human Computer Interaction</source>
          <year>1995</year>
          <fpage>161</fpage>
          <lpage>83</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/profile/Jack-Whalen/publication/263232352_Expert_Systems_versus_Systems_for_Experts/links/0f31753a314f45605e000000/Expert-Systems-versus-Systems-for-Experts.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="web">
          <article-title>Introducing ChatGPT</article-title>
          <source>OpenAI</source>
          <access-date>2023-05-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/blog/chatgpt">https://openai.com/blog/chatgpt</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>SB</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: the future of discharge summaries?</article-title>
          <source>Lancet Digit Health</source>
          <year>2023</year>
          <month>03</month>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>e107</fpage>
          <lpage>e108</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2589-7500(23)00021-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00021-3</pub-id>
          <pub-id pub-id-type="medline">36754724</pub-id>
          <pub-id pub-id-type="pii">S2589-7500(23)00021-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gilson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Safranek</surname>
              <given-names>CW</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Socrates</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Chartash</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>How does ChatGPT perform on the United States Medical Licensing Examination? the implications of large language models for medical education and knowledge assessment</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>03</month>
          <day>08</day>
          <volume>9</volume>
          <fpage>e45312</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e45312/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/45312</pub-id>
          <pub-id pub-id-type="medline">36753318</pub-id>
          <pub-id pub-id-type="pii">v9i1e45312</pub-id>
          <pub-id pub-id-type="pmcid">PMC9947764</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>TJ</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and other artificial intelligence applications speed up scientific writing</article-title>
          <source>J Chin Med Assoc</source>
          <year>2023</year>
          <month>04</month>
          <day>01</day>
          <volume>86</volume>
          <issue>4</issue>
          <fpage>351</fpage>
          <lpage>353</lpage>
          <pub-id pub-id-type="doi">10.1097/JCMA.0000000000000900</pub-id>
          <pub-id pub-id-type="medline">36791246</pub-id>
          <pub-id pub-id-type="pii">02118582-990000000-00174</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aljanabi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: future directions and open possibilities</article-title>
          <source>Mesopotamian Journal of CyberSecurity</source>
          <year>2023</year>
          <month>1</month>
          <day>31</day>
          <volume>2023</volume>
          <fpage>16</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.58496/mjcs/2023/003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kung</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Cheatham</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Medenilla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sillos</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>de Leon</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Elepaño</surname>
              <given-names>Camille</given-names>
            </name>
            <name name-style="western">
              <surname>Madriaga</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aggabao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Diaz-Candido</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Maningo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <month>03</month>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>e0000198</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812645"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000198</pub-id>
          <pub-id pub-id-type="medline">36812645</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-22-00371</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Dis</surname>
              <given-names>EAM</given-names>
            </name>
            <name name-style="western">
              <surname>Bollen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zuidema</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>van Rooij</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bockting</surname>
              <given-names>CL</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: five priorities for research</article-title>
          <source>Nature</source>
          <year>2023</year>
          <month>03</month>
          <day>03</day>
          <volume>614</volume>
          <issue>7947</issue>
          <fpage>224</fpage>
          <lpage>226</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://paperpile.com/b/KWcOMb/9UIV"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/d41586-023-00288-7</pub-id>
          <pub-id pub-id-type="medline">36737653</pub-id>
          <pub-id pub-id-type="pii">10.1038/d41586-023-00288-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Biswas</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and the future of medical writing</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <month>04</month>
          <day>01</day>
          <volume>307</volume>
          <issue>2</issue>
          <fpage>e223312</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.223312</pub-id>
          <pub-id pub-id-type="medline">36728748</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mbakwe</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Lourentzou</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Mechanic</surname>
              <given-names>OJ</given-names>
            </name>
            <name name-style="western">
              <surname>Dagan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT passing USMLE shines a spotlight on the flaws of medical education</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <month>03</month>
          <day>9</day>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>e0000205</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812618"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000205</pub-id>
          <pub-id pub-id-type="medline">36812618</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-23-00027</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931307</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lund</surname>
              <given-names>BD</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Chatting about ChatGPT: how may AI and GPT impact academia and libraries?</article-title>
          <source>Library Hi Tech News</source>
          <year>2023</year>
          <month>02</month>
          <day>14</day>
          <volume>40</volume>
          <issue>3</issue>
          <fpage>26</fpage>
          <lpage>29</lpage>
          <pub-id pub-id-type="doi">10.1108/lhtn-01-2023-0009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rudolph</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: bullshit spewer or the end of traditional assessments in higher education?</article-title>
          <source>Journal of Applied Learning and Teaching</source>
          <year>2023</year>
          <month>1</month>
          <day>24</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.37074/jalt.2023.6.1.9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kitamura</surname>
              <given-names>FC</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT is shaping the future of medical writing but still requires human judgment</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <month>04</month>
          <day>01</day>
          <volume>307</volume>
          <issue>2</issue>
          <fpage>e230171</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.230171</pub-id>
          <pub-id pub-id-type="medline">36728749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kwak</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>An</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Is ChatGPT better than human annotators? potential and limitations of ChatGPT in explaining implicit hate speech</article-title>
          <year>2023</year>
          <month>4</month>
          <day>30</day>
          <conf-name>WWW '23: The ACM Web Conference 2023</conf-name>
          <conf-date>April 30 to May 4, 2023</conf-date>
          <conf-loc>Austin, TX</conf-loc>
          <fpage>294</fpage>
          <lpage>297</lpage>
          <pub-id pub-id-type="doi">10.1145/3543873.3587368</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Castelvecchi</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Are ChatGPT and AlphaCode going to replace programmers?</article-title>
          <source>Nature</source>
          <year>2022</year>
          <month>12</month>
          <day>08</day>
          <pub-id pub-id-type="doi">10.1038/d41586-022-04383-z</pub-id>
          <pub-id pub-id-type="medline">36481949</pub-id>
          <pub-id pub-id-type="pii">10.1038/d41586-022-04383-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Angelis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Baglivo</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Arzilli</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Privitera</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Ferragina</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tozzi</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Rizzo</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and the rise of large language models: the new AI-driven infodemic threat in public health</article-title>
          <source>Front Public Health</source>
          <year>2023</year>
          <month>4</month>
          <day>25</day>
          <volume>11</volume>
          <fpage>1166120</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37181697"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpubh.2023.1166120</pub-id>
          <pub-id pub-id-type="medline">37181697</pub-id>
          <pub-id pub-id-type="pmcid">PMC10166793</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>FY</given-names>
            </name>
            <name name-style="western">
              <surname>Miao</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>What does ChatGPT say: the DAO from algorithmic intelligence to linguistic intelligence</article-title>
          <source>IEEE/CAA J Autom Sinica</source>
          <year>2023</year>
          <month>3</month>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>575</fpage>
          <lpage>579</lpage>
          <pub-id pub-id-type="doi">10.1109/jas.2023.123486</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hope</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gerada</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and antimicrobial advice: the end of the consulting infection doctor?</article-title>
          <source>Lancet Infect Dis</source>
          <year>2023</year>
          <month>04</month>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>405</fpage>
          <lpage>406</lpage>
          <pub-id pub-id-type="doi">10.1016/S1473-3099(23)00113-5</pub-id>
          <pub-id pub-id-type="medline">36822213</pub-id>
          <pub-id pub-id-type="pii">S1473-3099(23)00113-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thorp</surname>
              <given-names>HH</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT is fun, but not an author</article-title>
          <source>Science</source>
          <year>2023</year>
          <month>01</month>
          <day>27</day>
          <volume>379</volume>
          <issue>6630</issue>
          <fpage>313</fpage>
          <pub-id pub-id-type="doi">10.1126/science.adg7879</pub-id>
          <pub-id pub-id-type="medline">36701446</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liebrenz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schleifer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Buadze</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bhugra</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Generating scholarly content with ChatGPT: ethical challenges for medical publishing</article-title>
          <source>Lancet Digit Health</source>
          <year>2023</year>
          <month>03</month>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>e105</fpage>
          <lpage>e106</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris.unibe.ch/id/eprint/178562"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00019-5</pub-id>
          <pub-id pub-id-type="medline">36754725</pub-id>
          <pub-id pub-id-type="pii">S2589-7500(23)00019-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turja</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Aaltonen</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Taipale</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Oksanen</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Robot acceptance model for care (RAM-care): a principled approach to the intention to use care robots</article-title>
          <source>Information &#38; Management</source>
          <year>2020</year>
          <month>07</month>
          <volume>57</volume>
          <issue>5</issue>
          <fpage>103220</fpage>
          <pub-id pub-id-type="doi">10.1016/j.im.2019.103220</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Jo</surname>
              <given-names>HI</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>BG</given-names>
            </name>
          </person-group>
          <article-title>The study on the factors influencing on the behavioral intention of chatbot service for the financial sector: focusing on the UTAUT model. Article in Korean</article-title>
          <source>Journal of Digital Contents Society</source>
          <year>2019</year>
          <month>01</month>
          <day>31</day>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>41</fpage>
          <lpage>50</lpage>
          <pub-id pub-id-type="doi">10.9728/dcs.2019.20.1.41</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kuberkar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Singhal</surname>
              <given-names>TK</given-names>
            </name>
          </person-group>
          <article-title>Factors influencing adoption intention of AI powered chatbot for public transport services within a smart city</article-title>
          <source>International Journal of Emerging Technologies in Learning</source>
          <year>2020</year>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>948</fpage>
          <lpage>58</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchtrend.net/ijet/pdf/Factors%20Influencing%20Adoption%20Intention%20of%20AI%20Powered%20Chatbot%20for%20Public%20Transport%20Services%20within%20a%20Smart%20City%20Tarun%20Kumar%20Singhal%20947.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kasilingam</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Understanding the attitude and intention to use smartphone chatbots for shopping</article-title>
          <source>Technology in Society</source>
          <year>2020</year>
          <month>08</month>
          <volume>62</volume>
          <issue>12</issue>
          <fpage>101280</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pmed.1001940"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.techsoc.2020.101280</pub-id>
          <pub-id pub-id-type="pii">PMEDICINE-D-15-03679</pub-id>
          <pub-id pub-id-type="pmcid">PMC4689409</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chiang</surname>
              <given-names>MC</given-names>
            </name>
          </person-group>
          <article-title>Trust me, if you can: a study on the factors that influence consumers’ purchase intention triggered by chatbots based on brain image evidence and self-reported assessments</article-title>
          <source>Behaviour &#38; Information Technology</source>
          <year>2020</year>
          <month>03</month>
          <day>24</day>
          <volume>40</volume>
          <issue>11</issue>
          <fpage>1177</fpage>
          <lpage>1194</lpage>
          <pub-id pub-id-type="doi">10.1080/0144929x.2020.1743362</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patil</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kulkarni</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Can we trust health and wellness chatbot going mobile? empirical research using TAM and HBM</article-title>
          <year>2022</year>
          <month>8</month>
          <day>29</day>
          <conf-name>2022 IEEE Region 10 Symposium (TENSYMP)</conf-name>
          <conf-date>July 1-3, 2022</conf-date>
          <conf-loc>Mumbai, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/tensymp54529.2022.9864368</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heerink</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kröse</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Evers</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Wielinga</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Assessing acceptance of assistive social agent technology by older adults: the Almere model</article-title>
          <source>Int J of Soc Robotics</source>
          <year>2010</year>
          <month>9</month>
          <day>4</day>
          <volume>2</volume>
          <issue>4</issue>
          <fpage>361</fpage>
          <lpage>375</lpage>
          <pub-id pub-id-type="doi">10.1007/s12369-010-0068-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Bussel</surname>
              <given-names>Martien J P</given-names>
            </name>
            <name name-style="western">
              <surname>Odekerken-Schröder</surname>
              <given-names>Gaby J</given-names>
            </name>
            <name name-style="western">
              <surname>Ou</surname>
              <given-names>Carol</given-names>
            </name>
            <name name-style="western">
              <surname>Swart</surname>
              <given-names>Rachelle R</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobs</surname>
              <given-names>Maria J G</given-names>
            </name>
          </person-group>
          <article-title>Analyzing the determinants to accept a virtual assistant and use cases among cancer patients: a mixed methods study</article-title>
          <source>BMC Health Serv Res</source>
          <year>2022</year>
          <month>07</month>
          <day>09</day>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>890</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmchealthservres.biomedcentral.com/articles/10.1186/s12913-022-08189-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12913-022-08189-7</pub-id>
          <pub-id pub-id-type="medline">35804356</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12913-022-08189-7</pub-id>
          <pub-id pub-id-type="pmcid">PMC9270807</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Filieri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Raguseo</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gorton</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Mobile apps for healthy living: factors influencing continuance intention for health apps</article-title>
          <source>Technological Forecasting and Social Change</source>
          <year>2021</year>
          <month>05</month>
          <volume>166</volume>
          <fpage>120644</fpage>
          <pub-id pub-id-type="doi">10.1016/j.techfore.2021.120644</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laumer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Maier</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gubler</surname>
              <given-names>FT</given-names>
            </name>
          </person-group>
          <article-title>Chatbot acceptance in healthcare: explaining user adoption of conversational agents for disease diagnosis</article-title>
          <year>2019</year>
          <conf-name>27th European Conference on Information Systems (ECIS)</conf-name>
          <conf-date>June 8-14, 2019</conf-date>
          <conf-loc>Stockholm &#38; Uppsala, Sweden</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aisel.aisnet.org/ecis2019_rp/88/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3322385.3322392</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Chiu</surname>
              <given-names>YTH</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>HD</given-names>
            </name>
          </person-group>
          <article-title>Determinants of continuance intention towards banks’ chatbot services in Vietnam: a necessity for sustainable development</article-title>
          <source>Sustainability</source>
          <year>2021</year>
          <month>07</month>
          <day>08</day>
          <volume>13</volume>
          <issue>14</issue>
          <fpage>7625</fpage>
          <pub-id pub-id-type="doi">10.3390/su13147625</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ihaka</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gentleman</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>R: a language for data analysis and graphics</article-title>
          <source>Journal of Computational and Graphical Statistics</source>
          <year>1996</year>
          <month>09</month>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>299</fpage>
          <lpage>314</lpage>
          <pub-id pub-id-type="doi">10.1080/10618600.1996.10474713</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hair</surname>
              <given-names>JF Jr</given-names>
            </name>
            <name name-style="western">
              <surname>Hult</surname>
              <given-names>GTM</given-names>
            </name>
            <name name-style="western">
              <surname>Ringle</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Sarstedt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Danks</surname>
              <given-names>NP</given-names>
            </name>
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The SEMinR Package</article-title>
          <source>Partial Least Squares Structural Equation Modeling (PLS-SEM) Using R: A Workbook</source>
          <year>2021</year>
          <month>11</month>
          <day>4</day>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>49</fpage>
          <lpage>74</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Latif</surname>
              <given-names>KF</given-names>
            </name>
            <name name-style="western">
              <surname>Nazeer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shahzad</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ullah</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Imranullah</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sahibzada</surname>
              <given-names>UF</given-names>
            </name>
          </person-group>
          <article-title>Impact of entrepreneurial leadership on project success: mediating role of knowledge management processes</article-title>
          <source>Leadership &#38; Organization Development Journal</source>
          <year>2020</year>
          <month>04</month>
          <day>4</day>
          <volume>41</volume>
          <issue>2</issue>
          <fpage>237</fpage>
          <lpage>256</lpage>
          <pub-id pub-id-type="doi">10.1108/lodj-07-2019-0323</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ab Hamid</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Sami</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Mohmad Sidek</surname>
              <given-names>MH</given-names>
            </name>
          </person-group>
          <source>J Phys Conf Ser 890</source>
          <year>2017</year>
          <month>09</month>
          <day>20</day>
          <conf-name>1st International Conference on Applied &#38; Industrial Mathematics and Statistics 2017 (ICoAIMS 2017)</conf-name>
          <conf-date>August 8-10, 2017</conf-date>
          <conf-loc>Kuantan, Pahang, Malaysia</conf-loc>
          <fpage>012163</fpage>
          <pub-id pub-id-type="doi">10.1088/1742-6596/890/1/012163</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hair</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Ringle</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Sarstedt</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>PLS-SEM: indeed a silver bullet</article-title>
          <source>Journal of Marketing Theory and Practice</source>
          <year>2014</year>
          <month>12</month>
          <day>08</day>
          <volume>19</volume>
          <issue>2</issue>
          <fpage>139</fpage>
          <lpage>152</lpage>
          <pub-id pub-id-type="doi">10.2753/mtp1069-6679190202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hair</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Risher</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Sarstedt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ringle</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>When to use and how to report the results of PLS-SEM</article-title>
          <source>European Business Review</source>
          <year>2019</year>
          <month>01</month>
          <day>14</day>
          <volume>31</volume>
          <issue>1</issue>
          <fpage>2</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1108/ebr-11-2018-0203</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hair</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Hult</surname>
              <given-names>GTM</given-names>
            </name>
            <name name-style="western">
              <surname>Ringle</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Sarstedt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Thiele</surname>
              <given-names>KO</given-names>
            </name>
          </person-group>
          <article-title>Mirror, mirror on the wall: a comparative evaluation of composite-based structural equation modeling methods</article-title>
          <source>J Acad Mark Sci</source>
          <year>2017</year>
          <month>2</month>
          <day>16</day>
          <volume>45</volume>
          <issue>5</issue>
          <fpage>616</fpage>
          <lpage>632</lpage>
          <pub-id pub-id-type="doi">10.1007/s11747-017-0517-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nitzl</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>The use of partial least squares structural equation modelling (PLS-SEM) in management accounting research: directions for future theory development</article-title>
          <source>Journal of Accounting Literature</source>
          <year>2016</year>
          <month>12</month>
          <volume>37</volume>
          <fpage>19</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1016/j.acclit.2016.09.003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mostafa</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Kasamani</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Antecedents and consequences of chatbot initial trust</article-title>
          <source>Eur J Mark</source>
          <year>2021</year>
          <month>10</month>
          <day>20</day>
          <volume>56</volume>
          <issue>6</issue>
          <fpage>1748</fpage>
          <lpage>1771</lpage>
          <pub-id pub-id-type="doi">10.1108/ejm-02-2020-0084</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mou</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Human vs. AI: understanding the impact of anthropomorphism on consumer response to chatbots from the perspective of trust and relationship norms</article-title>
          <source>Information Processing &#38; Management</source>
          <year>2022</year>
          <month>5</month>
          <volume>59</volume>
          <issue>3</issue>
          <fpage>102940</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ipm.2022.102940</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helberger</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Diakopoulos</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and the AI Act</article-title>
          <source>Internet Policy Review</source>
          <year>2023</year>
          <month>2</month>
          <day>16</day>
          <volume>12</volume>
          <issue>1</issue>
          <pub-id pub-id-type="doi">10.14763/2023.1.1682</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cascella</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Montomoli</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bellini</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bignami</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Evaluating the feasibility of ChatGPT in healthcare: an analysis of multiple clinical and research scenarios</article-title>
          <source>J Med Syst</source>
          <year>2023</year>
          <month>03</month>
          <day>04</day>
          <volume>47</volume>
          <issue>1</issue>
          <fpage>33</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36869927"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10916-023-01925-4</pub-id>
          <pub-id pub-id-type="medline">36869927</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-023-01925-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC9985086</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Asch</surname>
              <given-names>DA</given-names>
            </name>
          </person-group>
          <article-title>An interview with ChatGPT about health care</article-title>
          <source>NEJM Catalyst</source>
          <year>2023</year>
          <month>4</month>
          <day>4</day>
          <volume>4</volume>
          <issue>2</issue>
          <pub-id pub-id-type="doi">10.1056/CAT.23.0043</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vaishya</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vaish</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: is this version good for healthcare and research?</article-title>
          <source>Diabetes Metab Syndr</source>
          <year>2023</year>
          <month>04</month>
          <volume>17</volume>
          <issue>4</issue>
          <fpage>102744</fpage>
          <pub-id pub-id-type="doi">10.1016/j.dsx.2023.102744</pub-id>
          <pub-id pub-id-type="medline">36989584</pub-id>
          <pub-id pub-id-type="pii">S1871-4021(23)00040-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Transforming maritime health with ChatGPT-powered healthcare services for mariners</article-title>
          <source>Ann Biomed Eng</source>
          <year>2023</year>
          <month>06</month>
          <volume>51</volume>
          <issue>6</issue>
          <fpage>1123</fpage>
          <lpage>1125</lpage>
          <pub-id pub-id-type="doi">10.1007/s10439-023-03195-0</pub-id>
          <pub-id pub-id-type="medline">37040060</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10439-023-03195-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fijačko</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gosak</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Štiglic</surname>
              <given-names>Gregor</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>John Douma</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Can ChatGPT pass the life support exams without entering the American Heart Association course?</article-title>
          <source>Resuscitation</source>
          <year>2023</year>
          <month>04</month>
          <volume>185</volume>
          <fpage>109732</fpage>
          <pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.109732</pub-id>
          <pub-id pub-id-type="medline">36775020</pub-id>
          <pub-id pub-id-type="pii">S0300-9572(23)00045-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>D'Amico</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>TG</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Langer</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>I asked a ChatGPT to write an editorial about how we can incorporate chatbots into neurosurgical research and patient care…</article-title>
          <source>Neurosurgery</source>
          <year>2023</year>
          <month>04</month>
          <day>01</day>
          <volume>92</volume>
          <issue>4</issue>
          <fpage>663</fpage>
          <lpage>664</lpage>
          <pub-id pub-id-type="doi">10.1227/neu.0000000000002414</pub-id>
          <pub-id pub-id-type="medline">36757199</pub-id>
          <pub-id pub-id-type="pii">00006123-202304000-00002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kamineni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Evaluating ChatGPT as an adjunct for radiologic decision-making</article-title>
          <source>medRxiv</source>
          <comment>Preprint posted online on February 7, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1101/2023.02.02.23285399"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="medline">36798292</pub-id>
          <pub-id pub-id-type="pii">2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="pmcid">PMC9934725</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
