<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e71418</article-id><article-id pub-id-type="doi">10.2196/71418</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Perceptions and Attitudes of Chinese Oncologists Toward Endorsing AI-Driven Chatbots for Health Information Seeking Among Patients with Cancer: Phenomenological Qualitative Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Zeng</surname><given-names>Lijuan</given-names></name><degrees>BSCN</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Qiaoqi</given-names></name><degrees>MMSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zuo</surname><given-names>Yan</given-names></name><degrees>MSN</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Ying</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Li</surname><given-names>Zhaojun</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff6">6</xref></contrib></contrib-group><aff id="aff1"><institution>Division of Abdominal Tumor Multimodality Treatment, Cancer Center, West China Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Gynecology and Obstetrics Nursing, West China Second University Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff3"><institution>West China School of Nursing, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff4"><institution>Key Laboratory of Birth Defects and Related Disease of Women and Children, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff5"><institution>Division of Internal Medicine, Institute of Integrated Traditional Chinese and Western Medicine, West China Hospital, Sichuan University</institution><addr-line>Chengdu</addr-line><country>China</country></aff><aff id="aff6"><institution>Department of Radiation Oncology, Hainan Affiliated Hospital of Hainan Medical University (Hainan General Hospital)</institution><addr-line>#19 Xiuhua Road, Xiuying District</addr-line><addr-line>Haikou</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Khan Rony</surname><given-names>Moustaq Karim</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>An</surname><given-names>Ning</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Zhaojun Li, MD, Department of Radiation Oncology, Hainan Affiliated Hospital of Hainan Medical University (Hainan General Hospital), #19 Xiuhua Road, Xiuying District, Haikou, 570311, China, 86 18898963083; <email>lzjradiotherapy@163.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>23</day><month>7</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e71418</elocation-id><history><date date-type="received"><day>17</day><month>01</month><year>2025</year></date><date date-type="rev-recd"><day>12</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>12</day><month>06</month><year>2025</year></date></history><copyright-statement>&#x00A9; Lijuan Zeng, Qiaoqi Li, Qiaoqi Li, Ying Zhang, Zhaojun Li. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 23.7.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e71418"/><abstract><sec><title>Background</title><p>Chatbots driven by large language model artificial intelligence (AI) have emerged as potential tools to enhance health information access for patients with cancer. However, their integration into patient education raises concerns among oncologists. Limited literature has examined the perceptions and attitudes of oncologists in terms of endorsing AI-driven chatbots for health information.</p></sec><sec><title>Objective</title><p>This study aims to explore the perceptions and attitudes of Chinese oncologists toward endorsing AI-driven chatbots to patients with cancer.</p></sec><sec sec-type="methods"><title>Methods</title><p>In this phenomenological qualitative study, we purposively sampled oncologists from 4 hospitals in Southwest and East China and conducted semistructured interviews with 24 participants between November 19, 2024, and December 21, 2024. The data saturation principle was observed to determine the end point of data collection. Data were analyzed using the Colaizzi method.</p></sec><sec sec-type="results"><title>Results</title><p>The participants were aged 42.0 (range 29&#x2010;53) years on average, including 9 (37%) female and 15 (62%) male participants. The participants had an average of 8.8 (range 1&#x2010;25) years in oncology. Of the participants, 7 (29%) had recommended AI chatbots to patients. Three key themes were revealed from analysis of interview transcriptions, including perceived benefits, significant concerns, and impacts on doctor-patient dynamics. Benefits included enhanced accessibility and potential support for chronic condition management. Concerns centered on liability, misinformation, lack of personalization, privacy and data security risks, and patient readiness and education. Oncologists stressed a dual impact of AI chatbots on doctor-patient dynamics, recognizing the potential for improved communication and risks of trust erosion due to overreliance on AI.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>While recognizing the potential of AI-driven chatbots to enhance accessibility of health information and chronic disease management, Chinese oncologists report significant concerns, including liability, misinformation, lack of personalization, privacy and data security risks, and patient readiness. Addressing the challenges requires comprehensive solutions, such as clear policies and guidelines, rigorous testing and validation, institutional endorsement, and robust patient and provider education. Future efforts should focus on resolving the barriers while leveraging the strengths of AI technology to support patient-centered care in a safe, effective, and ethical manner.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>attitude</kwd><kwd>chatbot</kwd><kwd>health information seeking</kwd><kwd>large language model</kwd><kwd>liability</kwd><kwd>misinformation</kwd><kwd>oncologist</kwd><kwd>perception</kwd><kwd>patient education</kwd><kwd>qualitative study</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Chatbots driven by large language model (LLM) artificial intelligence (AI) have emerged as innovative tools in health care [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. They leverage advanced natural language processing to interact with users in real-time. They simulate human conversation to provide information, answer queries, and offer support based on a vast repository of data [<xref ref-type="bibr" rid="ref4">4</xref>]. Equipped with LLMs, they are capable of understanding complex questions and delivering contextually relevant responses. This makes them valuable assets for health information dissemination [<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>The integration of AI-driven chatbots in health care has been growing rapidly, given their potential to bridge gaps in patient education and support. The tools can assist individuals in understanding medical conditions, exploring treatment options, and navigating health systems. Their ability to operate 24/7 offers a unique advantage in addressing patients&#x2019; nonurgent concerns and reducing reliance on in-person consultations for routine information [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Additionally, they hold promise in alleviating the burden on health care providers by streamlining communication and enhancing patient engagement [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Globally, widely recognized AI-driven chatbots, such as ChatGPT, have gained prominence for their versatility in responding to health-related inquiries [<xref ref-type="bibr" rid="ref10">10</xref>]. In China, domestic tools such as Kimichat have been developed to cater to local linguistic and cultural contexts.</p><p>Accessible and reliable health information is crucial for patients with cancer. It enables them to make informed decisions about their treatment, manage symptoms, and improve their overall quality of life. In cancer care, patients often face complex medical decisions that require a clear understanding of their condition, treatment options, potential side effects, and prognosis [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Reliable information can also help patients and their families cope with the emotional and psychological challenges associated with a cancer diagnosis and foster a sense of control and preparedness [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. However, patients with cancer frequently encounter significant challenges in accessing relevant information. One major issue is the prevalence of misinformation, particularly from unverified web-based sources. Patients who turn to search engines or social media platforms for answers may encounter inaccurate, incomplete, or overly generalized information that can mislead them or exacerbate their anxiety [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Additionally, limited access to health care resources further complicates information-seeking efforts. Oftentimes, face-to-face consultations with oncology specialists may be infrequent or unavailable. Patients may struggle to attain timely answers, which leaves gaps in their understanding of their disease and its management. Even in urban centers, the high patient-to-doctor ratio often results in brief consultations, leaving little time for detailed explanations of medical conditions or treatments. As a result, innovative solutions, such as AI-driven chatbots, can provide reliable and easily accessible health information to support patients with cancer, which could complement traditional health care delivery.</p><p>According to current reports, LLM AI chatbots, such as ChatGPT, have significantly impacted patient education by providing accessible health information and personalized support. They have been used to interpret complex medical data, generate patient-friendly educational materials, and answer health-related queries, which has enhanced patient engagement and health literacy [<xref ref-type="bibr" rid="ref17">17</xref>]. However, evaluations of the chatbots reveal mixed results regarding accuracy and reliability. For instance, studies assessing ChatGPT&#x2019;s responses to medical queries have noted variability in accuracy, safety, relevance, and readability. Challenges such as the risk of disseminating misinformation, lack of personalization, and ethical concerns related to patient privacy have been noted [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>Oncologists are at the forefront of cancer care. They serve as clinical decision makers and trusted advisors in patient education. Their role has evolved to include guiding patients through increasingly complex treatment options and emerging digital health tools. In today&#x2019;s rapidly changing health care landscape, the endorsement of innovative technologies, such as AI-driven chatbots, is critical, as it can significantly influence patient trust and engagement. Despite their central impact on patient care and information dissemination, the unique perspectives of oncologists on these digital tools have received limited attention. This study seeks to bridge that gap by exploring Chinese oncologists&#x2019; perceptions and attitudes toward endorsing AI-driven chatbots for the health information seeking of patients with cancer. By using a phenomenological qualitative approach, our findings may capture their lived experiences, illuminate the pivotal role they play in integrating digital innovations into clinical practice, and identify both the facilitators and barriers to adoption.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Setting</title><p>The study was conducted among oncologists working in 4 major hospitals in Southwest and East China. The primary investigator (PI; first author) is from West China Hospital, which is one of the country&#x2019;s largest comprehensive hospitals and a leading regional center for cancer care. The other 3 hospitals included 2 major comprehensive hospitals and one specialized cancer hospital, all recognized for their strong oncology services. The hospitals were chosen to ensure diverse representation of oncologists&#x2019; experiences and perspectives, given a wide range of institutional settings and patient populations.</p></sec><sec id="s2-2"><title>Study Design</title><p>We used a phenomenological qualitative design to explore the perceptions and attitudes of oncologists toward endorsing AI-driven chatbots to their patients for health information seeking. The phenomenological approach was chosen for its ability to capture and interpret the subjective experiences and insights of participants and provide a deeper understanding of the study topic [<xref ref-type="bibr" rid="ref21">21</xref>]. The design was deemed appropriate for investigating the attitudes and concerns of oncologists in the context of integrating AI technologies into patient education and care.</p></sec><sec id="s2-3"><title>Sampling Strategy</title><p>A purposive sampling strategy was used, which ensured the selection of participants who had appropriate backgrounds and could provide relevant, rich, and detailed insights [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s2-4"><title>Participant Enrollment</title><p>Licensed oncologists actively practicing at the selected hospitals, with at least 2 years of clinical experience, were recruited for the study. Participants were required to have experience in providing patient education and using AI-driven chatbots, including ChatGPT or similar Chinese chatbots. Those who expressed interest in participating, after being given detailed study information, were enrolled.</p><p>Notably, we set a minimum requirement of 2 years of clinical experience to ensure that participants had adequate exposure to the complexities of real-world clinical practice, as well as exposure to the AI chatbots emerging in the last 2 years. The threshold was meant to ensure that the collected data are grounded in experienced perspectives, thereby enhancing the credibility and validity of the findings by relying on the judgments of practitioners who have encountered realistic clinical scenarios and patient interactions.</p><p>To recruit participants, invitations were extended via phone calls through the PI&#x2019;s professional network. Given the PI&#x2019;s senior standing in oncology and long-standing relationships with a wide range of practitioners across different hospital settings in Southwest and East China, the purposive sampling strategy enabled the inclusion of oncologists with diverse demographic backgrounds, clinical experiences, and levels of exposure to AI-driven chatbots. This targeted approach ensured a comprehensive representation of perspectives, thereby mitigating potential recruitment bias related to network-based sampling. A 20&#x2010;30 minute semistructured interview was conducted either immediately or at a later time convenient for the participant.</p></sec><sec id="s2-5"><title>Data Collection</title><p>Data were collected through semistructured phone interviews. An interview guide was developed and pilot-tested with 3 oncologists to refine the questions and flow (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The pilot interviews and transcripts were excluded from the final analysis. Each interview phone call was audio-recorded with permission. Notes were also taken to capture key observations. Data collection continued until data saturation was achieved, as determined by the point where no new analytic information emerged in 3 consecutive interviews [<xref ref-type="bibr" rid="ref23">23</xref>]. Participants&#x2019; demographic data were collected before interviews using a standard demographic information form, which was compiled and desensitized for subsequent analysis.</p></sec><sec id="s2-6"><title>Data Analysis</title><p>Data were analyzed using the Colaizzi method, a rigorous approach that systematically guides researchers through extracting and organizing significant statements, formulating meanings, and clustering themes [<xref ref-type="bibr" rid="ref24">24</xref>]. We chose the Colaizzi method over other phenomenological techniques, such as Giorgi or van Manen approaches, for several reasons. First, unlike the other 2 methods, the practice of returning to participants for validation of the identified meanings enhances the credibility and confirmability of our findings. This was especially important as our findings might be used to support sensitive clinical decisions. Second, the Colaizzi method is particularly suited for capturing the in-depth details of lived experiences. This aligns with our objective of exploring the complex perceptions and attitudes of oncologists. It could ensure a comprehensive interpretation of the data while maintaining comprehensive transparency throughout the analysis, which strengthens the study&#x2019;s methodological rigor. The process involved multiple steps. The researchers first read the transcripts at least 3 times to gain familiarity and then extracted significant statements and formulated meanings. The meanings were clustered into themes and subthemes, which were returned to participants for validation. Any discrepancies were discussed until consensus was achieved.</p></sec><sec id="s2-7"><title>Study Rigor</title><p>To ensure the trustworthiness of the study, we implemented several strategies, including member checking for ensuring credibility, where participants reviewed and validated the findings; an audit trail documenting key decisions in the study process for dependability; regular reflexivity discussions among the research team to acknowledge and mitigate potential biases due to their personal and professional backgrounds and assumptions; and a reflexivity statement for outlining the researchers&#x2019; positions, potential influences on the study, and steps taken to minimize bias throughout the research process (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>) [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref27">27</xref>]. Although we did not formally assess intercoder reliability or conduct external validation of the coding process, multiple researchers independently coded the data and subsequently discussed discrepancies until consensus was reached.</p></sec><sec id="s2-8"><title>Ethical Considerations</title><p>The study was ethically approved by the Ethics Committee of West China Hospital, Sichuan University (HXLL0751). Verbal informed consent was obtained from each participant at the beginning of their interview, including explicit permission for the interview to be recorded. The consent process was documented as part of the audio recording. The study was conducted in accordance with the principles of the Declaration of Helsinki and relevant regulatory codes and guidelines for human subject protection. No compensation of any form was provided to participants for their participation.</p><p>All identifiers were anonymized. Participants were assigned unique identifiers (eg, P1 and P2). Audio recordings and transcripts were stored securely on a password-protected flash drive, which was kept by the PI and was accessible only to the research team. Consent forms and other sensitive documents were stored in a locked drawer in the PI&#x2019;s office. Participants were informed about their right to withdraw from the study at any time without any repercussions.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>A total of 29 candidate oncologists were contacted, with 5 (17%) of them declining participation due to unavailability for interview. Eventually, 24 oncologists were interviewed between November 19, 2024, and December 21, 2024, by which time data saturation was achieved. The interviews lasted 21.7 (range 16&#x2010;25) minutes on average. Notably, although the interviews were relatively short given the phenomenological nature of this study, the depth and richness of the data were not compromised as the busy schedules and direct communication style of Chinese oncologists enabled them to convey focused, meaningful insights efficiently. The participants were aged 42.0 (range 29&#x2010;53) years on average, including 9 (37%) female and 15 male (62%) participants. The participants had an average of 8.8 (range 1&#x2010;25) years in oncology. Of the participants, 7 (29%) had recommended AI chatbots to patients. <xref ref-type="table" rid="table1">Table 1</xref> summarizes the demographic characteristics of participants.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Summary of participant demographic characteristics.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Characteristics</td><td align="left" valign="bottom" colspan="2">Value (N=24), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Sex</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top" colspan="2">9 (37)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top" colspan="2">15 (62)</td></tr><tr><td align="left" valign="top" colspan="2">Age (range in years)</td><td align="left" valign="top" colspan="2"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;35</td><td align="left" valign="top" colspan="2">6 (25)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>35&#x2010;45</td><td align="left" valign="top" colspan="2">9 (37)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;45</td><td align="left" valign="top" colspan="2">9 (37)</td></tr><tr><td align="left" valign="top" colspan="2">Prior exposure to AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> chatbots</td><td align="left" valign="top" colspan="2"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Chinese chatbots</td><td align="left" valign="top" colspan="2">7 (29)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ChatGPT and Chinese chatbots</td><td align="left" valign="top" colspan="2">17 (71)</td></tr><tr><td align="left" valign="top" colspan="2">Ever recommended an AI chatbot to a patient</td><td align="left" valign="top" colspan="2"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top" colspan="2">7 (29)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top" colspan="2">17 (71)</td></tr><tr><td align="left" valign="top" colspan="2">Expressed reluctance to use an AI chatbot in clinical practice</td><td align="left" valign="top" colspan="2"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top" colspan="2">19 (79)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top" colspan="2">5 (21)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Key Themes</title><p>Three overarching themes with 7 subthemes emerged from the interviews, including the perceived benefits of AI chatbots, 5 significant concerns such as liability, misinformation, lack of personalization, patient privacy, and patient readiness, and the impact of chatbots on the doctor-patient relationship.</p><sec id="s3-2-1"><title>Theme 1: Perceived Benefits of AI Chatbots</title><p>Participants cited 2 main potential benefits of AI chatbots in oncology care regarding their ability to enhance accessibility and support chronic disease management. While most participants saw them generally as supplementary to professional care, some recognized their value in improving patient education and engagement, especially for routine queries or long-term disease management.</p><sec id="s3-2-1-1"><title>Subtheme 1.1: Accessibility and Convenience</title><p>The 24/7 availability of AI chatbots was the most frequently mentioned advantage. Participants noted that the tools could help bridge gaps in access to health information. By addressing nonurgent queries, they were believed to be an effective asset to reduce the workload of health care providers while enabling patients to seek basic information independently.</p><disp-quote><p>One biggest advantage [of the AI chatbots] is that they are readily available. Patients can access them at any time, even late at night. For minor concerns, such as looking up symptoms or side effects, patients don&#x2019;t have to wait to see a doctor.</p><attrib>P2</attrib></disp-quote><disp-quote><p>It&#x2019;s not feasible for patients to call a hospital or doctor for small queries. The AI [chatbots] is particularly useful in this situation. Patients can get immediate answers to their questions.</p><attrib>P7</attrib></disp-quote></sec><sec id="s3-2-1-2"><title>Subtheme 1.2: Potential for Chronic Condition Management</title><p>Participants also recognized the potential for AI chatbots to support patients managing chronic conditions, as a result of their high availability. By providing timely answers when patients have concerns or questions in their chronic disease management, the tools could help them adhere to treatment plans more effectively.</p><disp-quote><p>Patients with cancer have many questions about their care and rehabilitation on a daily basis. If the questions are not answered timely, they might lose track of how to manage their care effectively. The AI [chatbots] can fill this gap. They can provide instant answers, for example about new symptoms, medications, side effects, or even dietary recommendations.</p><attrib>P10</attrib></disp-quote></sec></sec></sec><sec id="s3-3"><title>Theme 2: Concerns</title><p>Participants expressed significant concerns about integrating AI chatbots into real-world oncology care. The concerns primarily revolved around issues of liability, accuracy, lack of personalization, patient privacy and data security, and the readiness of patients to use the tools effectively. Some participants even emphasized that the challenges must be addressed before chatbots can be widely recommended or trusted in clinical practice.</p><sec id="s3-3-1"><title>Subtheme 2.1: Liability Issues</title><p>The question of accountability was a recurring concern among most participants, even those who had already recommended the tools to patients. Oncologists were uncertain about who would bear responsibility if patients experienced adverse outcomes after following chatbot recommendations. This led to hesitation in endorsing or recommending them in clinical practice.</p><disp-quote><p>For certain, liability is the biggest concern. If a patient follows advice given by AI and something goes wrong, who will be held responsible? As a doctor, I cannot recommend a tool to patients unless I myself am certain whether it&#x2019;s safe and reliable and know that I&#x2019;m safe from the liabilities. Too much risk at this point.</p><attrib>P6</attrib></disp-quote><disp-quote><p>Patients are not [medical] professionals. They are likely to assume that the information from a tool recommended by a doctor or hospital is trustworthy. But what if the AI [chatbot] provides wrong advice? such as incorrect source material or a programming error? It&#x2019;s not just the chatbot developers who will face troubles. We doctors, too. What if something goes wrong with the patient after they follow the AI [chatbot&#x2019;s] advice, for example, taking a wrong medication? Who will be held responsible?</p><attrib>P19</attrib></disp-quote></sec><sec id="s3-3-2"><title>Subtheme 2.2: Misinformation Risk</title><p>Another frequently raised concern was the risk of chatbots providing inaccurate or misleading information. This was particularly worrisome for patients with low health literacy, who might misinterpret chatbot responses or take them at face value without consulting a health care professional.</p><disp-quote><p>I have always been skeptical about the AI [chatbot&#x2019;s] responses. Can they be such as outdated or incorrect because of wrong source data.</p><attrib>P11</attrib></disp-quote><disp-quote><p>Sometimes, AI [chatbots] can oversimplify complex medical information. For example, they might give a generic explanation of symptoms that doesn&#x2019;t account for the patient&#x2019;s conditions. It&#x2019;s fine if the patient doesn&#x2019;t act on it, but what if the patient doesn&#x2019;t really understand it and becomes unwilling to see a doctor because they already have the &#x201C;answer.&#x201D;</p><attrib>P24</attrib></disp-quote></sec><sec id="s3-3-3"><title>Subtheme 2.3: Lack of Personalization</title><p>Several participants indicated concern about the inability of chatbots to provide personalized recommendations. They emphasized that oncology care often requires advice tailored to individual patient needs that cannot be generalized.</p><disp-quote><p>Every patient is unique, especially in oncology. I have tried the AI [chatbots] myself. Most of them don&#x2019;t functionalities to enter information on patient&#x2019;s personal conditions. The responses are generic... Yes, you can manually enter a patient&#x2019;s condition, but it takes a lot of work and you have to enter it every time in a new chat session.</p><attrib>P5</attrib></disp-quote></sec><sec id="s3-3-4"><title>Subtheme 2.4: Patient Privacy and Data Security</title><p>Patient privacy and data security concerns were another significant issue raised by participants. Oncologists were wary about how patient data would be collected, stored, and used by AI systems and the companies behind them, especially given the sensitive nature of medical data.</p><disp-quote><p>The state is very serious about protecting patient privacy and medical data security. We are completely blind when it comes to the AI [chatbots]. Even if the patient is willing to enter their medical data, how do you know what the AI or the company would do with it? What if the information is leaked? Will I be held responsible because I recommended it to the patient?</p><attrib>P5</attrib></disp-quote></sec><sec id="s3-3-5"><title>Subtheme 2.5: Patient Readiness and Education</title><p>Participants emphasized that not all patients are equipped to use AI chatbots effectively. Factors such as age, technological literacy, and familiarity with digital tools could limit the accessibility of the technologies for certain populations.</p><disp-quote><p>Some older patients don&#x2019;t even know how to use their smartphones. It could be even more difficult for them to AI.</p><attrib>P7</attrib></disp-quote><disp-quote><p>There is this possible usability issue. Some patients, for example older patients or those who are not familiar with digital products, they may not be able to use the AI [chatbots] effectively.</p><attrib>P22</attrib></disp-quote></sec></sec><sec id="s3-4"><title>Theme 3: Impact on Doctor-Patient Dynamics</title><p>Participants discussed how chatbots could influence the dynamics of doctor-patient interactions. While some believed chatbots might enhance communication by helping patients prepare better for consultations.</p><disp-quote><p>AI [chatbots] can help patients better prepare for subsequent consultations. For example, they may already have asked basic questions and gotten answers. This allows them to focus on more complex issues during the consultation.</p><attrib>P12</attrib></disp-quote><p>On the other hand, some others expressed concern that overreliance on the tools could lead to conflicts or misunderstandings.</p><disp-quote><p>There is a risk. Some patients might trust the chatbot more than the doctor. If the AI&#x2019;s advice contradicts the doctor&#x2019;s, this could create unnecessary tension, even undermine trust in us doctors.</p><attrib>P16</attrib></disp-quote></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>In this qualitative study of Chinese oncologists, we investigated their perceptions and attitudes toward endorsing AI-driven chatbots to patients with cancer. Three key themes were revealed from analysis of interview transcriptions, including perceived benefits, significant concerns, and impacts on doctor-patient dynamics. Benefits included enhanced accessibility and potential support for chronic condition management. Concerns centered on liability, misinformation, lack of personalization, privacy and data security risks, and patient readiness and education. Oncologists stressed a dual impact of AI chatbots on doctor-patient dynamics, recognizing potential for improved communication and risks of trust erosion due to overreliance on AI.</p></sec><sec id="s4-2"><title>Comparison to Prior Work</title><p>According to our findings, fewer than a third of participants had recommended the tools to patients, despite all having used chatbots themselves. This hesitation reflects a cautious approach among oncologists, shaped by a balance between recognizing the potential benefits of chatbots, such as enhancing accessibility and supporting chronic disease management, and grappling with significant concerns, including liability, misinformation, lack of personalization, and privacy and data security concerns. The finding is consistent with prior reports on Chinese oncologists&#x2019; perceptions and attitudes [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. There seems to be a gap between personal experience with AI tools and professional endorsement. This suggests significant challenges in integrating such technologies into real-world oncology care.</p><p>In addition to their high availability, a widely recognized strength of LLM AIs in current literature [<xref ref-type="bibr" rid="ref30">30</xref>], participants emphasized the potential of AI chatbots to support chronic disease management, particularly in cancer care. Their 24/7 accessibility enables patients to address concerns promptly, which is essential for maintaining adherence to complex treatment plans. Participants noted that timely responses from chatbots could prevent patients from feeling neglected or uncertain. This may enhance their ability to manage their care independently. The finding is consistent with existing studies that LLM AIs improve aspects of chronic disease management [<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref34">34</xref>]. AI chatbots are therefore promising as supplementary tools in oncology care, with the potential to improve patient engagement, treatment adherence, and overall outcomes.</p><p>As the main findings of this study, participants expressed substantial concerns about recommending AI chatbots to patients in real-world practice, including liability, misinformation, lack of personalization, patient privacy and data security, and the readiness of patients. The issue of liability was particularly prominent. In particular, many participants questioned who would be held accountable if patients experience adverse outcomes after following chatbot advice. Even those who had previously recommended chatbots were hesitant, citing liability concerns as a major barrier. Liability concerns are a common challenge when integrating new technologies into formal or professional processes.</p><p>In health care, previous studies have documented similar apprehensions with the adoption of telemedicine platforms and wearable health devices. For instance, in telemedicine implementation, questions arose regarding who would be held responsible if a remote consultation resulted in a misdiagnosis due to technological limitations [<xref ref-type="bibr" rid="ref35">35</xref>]. Similarly, the use of wearable health monitors, such as fitness trackers or medical-grade devices such as continuous glucose monitors, has raised concerns about the accuracy of the data and the potential consequences of acting on incorrect or incomplete information [<xref ref-type="bibr" rid="ref36">36</xref>]. Therefore, liability concerns are not unique to AI chatbots but are a recurring theme when technology intersects with professional accountability.</p><p>To address this issue, solutions at multiple levels are necessary. At the policy level, clear regulations and guidelines must be established to define the appropriate use of AI chatbots in clinical practice and delineate accountability in cases of adverse outcomes. Such policies should include standards for chatbot development, data validation, and evidence-based responses to ensure their reliability. Additionally, the involvement of professional regulatory bodies to formally approve and monitor the use of these tools can provide an added layer of accountability and confidence for both patients and providers.</p><p>At the institutional level, hospitals and health care organizations must play a pivotal role in facilitating the safe integration of AI chatbots. Formal endorsement by institutions, including selecting validated tools, development of hospital-endorsed chatbot guidelines, provision of AI literacy training for care providers and patients, and integrating them into existing clinical workflows, can help establish trust. Additionally, providing training for health care providers on how to endorse and monitor chatbot use and ensuring that patients are aware of the chatbot&#x2019;s capabilities and limitations are essential steps. Such institutional involvement would shift some of the accountability burden from individual practitioners to a broader, system-level responsibility, thereby reducing hesitancy among health care providers.</p><p>Misinformation is a frequently reported concern with LLM AI chatbots, including issues related to accuracy, the quality of training datasets, the reliability of the source data, and so-called &#x201C;hallucinations&#x201D; [<xref ref-type="bibr" rid="ref37">37</xref>]. LLMs might rely on vast and unfiltered internet data to generate responses, which can contain inaccuracies, biases, and outdated information [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Hallucinations are where the chatbot generates plausible-sounding but factually incorrect or irrelevant responses [<xref ref-type="bibr" rid="ref40">40</xref>]. Additionally, the inherent variance in responses to similar prompts could further undermine their reliability [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. These are realistic risks for patients seeking consistent and accurate information.</p><p>To mitigate the concerns, it is critical to test and validate AI chatbots rigorously before recommending them to patients. Health care institutions should select chatbots based on performance in controlled validation studies, focusing on accuracy, consistency, and their ability to provide evidence-based responses. If existing LLMs fail to meet clinical needs, institutions could consider customizing specialized LLM agents trained on validated medical datasets and updated guidelines specific to oncology. Regular updates and testing are also essential to ensure that chatbots remain aligned with current medical knowledge and best practices.</p><p>User feedback is another valuable tool for addressing misinformation. Implementing processes to gather and analyze feedback from both patients and health care providers can help identify and rectify inaccuracies. Improving the prompting skills can also enhance the accuracy of chatbot responses [<xref ref-type="bibr" rid="ref43">43</xref>]. Training users to frame their queries effectively, such as including relevant context and specific details, can reduce ambiguities and improve the relevance of the chatbot&#x2019;s answers. Providing tutorial materials or conducting workshops on effective questioning techniques would be a practical way to enhance user interactions with chatbots. Similarly, physicians should be equipped to guide patients on how to use chatbots responsibly and effectively. Finally, clear instructions on the limitations of AI chatbots should accompany their implementation. Patients must understand that chatbots are supplementary tools, not substitutes for professional medical advice.</p><p>The concern over the lack of personalization in AI chatbots may not be as substantial as perceived by participants. Improved prompting skills, where users provide specific details about their health conditions or concerns, can significantly enhance the relevance and accuracy of responses. For example, entering contextual information such as medications, symptoms, or treatment history can allow chatbots to tailor recommendations more effectively. Training both patients and health care providers to use precise and structured prompts can help bridge the gap in personalization.</p><p>Moreover, some chatbots are already equipped with functionalities to integrate personal health data to deliver more customized responses. For instance, iFlyhealth, a Chinese AI chatbot that allows users to input their medical records, health checkup reports, and other personal information, demonstrates the potential to provide contextually relevant guidance [<xref ref-type="bibr" rid="ref44">44</xref>]. The functionalities enable chatbots to adapt their advice to the unique needs of individual users, especially in oncology care, where personalized care is critical.</p><p>However, the integration of personal health data introduces a parallel concern: patient privacy and data security. Participants in this study expressed concerns about the risk of sensitive health information being mishandled or accessed without consent. For chatbots to achieve meaningful personalization without compromising privacy, robust data security measures must be in place, such as end-to-end encryption, secure storage systems, and strict access controls. Additionally, transparent communication with users about how their data will be collected, stored, and used is essential to build trust and mitigate privacy concerns [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>]. Balancing the benefits of personalization with the need for stringent privacy protections is a critical challenge for the adoption of AI chatbots in oncology care. While personalization enhances the utility, addressing privacy concerns is pivotal in ensuring their acceptance and widespread use. Future research should explore ways to achieve this balance, including the development of secure, locally hosted AI models with minimized data exposure.</p><p>Patient readiness and education are important considerations when adopting novel technologies such as AI chatbots [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. However, this should not be overstressed to the detriment of broader implementation. While some patients, particularly older adults or those less familiar with technology, may face challenges in using chatbots, the principle of adoption should focus on serving the majority. Most patients are likely to adapt quickly. Those who are less ready should not be a cause to hinder the rollout of the tools. Instead, targeted efforts, such as simplified chatbot designs, caregiver assistance, or personalized training, can address their needs.</p><p>The integration of AI-driven chatbots in oncology care presents a dual impact on doctor-patient dynamics. On one hand, the tools offer enhanced accessibility and convenience, potentially improving patient preparation for consultations by addressing routine inquiries and freeing up time for more complex discussions. This can streamline communication and enhance the efficiency of health care delivery. On the other hand, there are concerns that patients might overrely on chatbots, potentially leading to trust issues if their advice contradicts that of health care providers. In the Chinese context, cultural factors may further influence how patients perceive and interact with the technologies. This finding aligns with prior reports [<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. Clear communication about the role and limitations of chatbots is essential, in addition to developing guidelines to ensure that such tools complement rather than replace human interaction. A balanced approach is crucial to harness the benefits of AI while preserving the integrity of the doctor-patient relationship in oncology care.</p></sec><sec id="s4-3"><title>Limitations and Future Directions</title><p>This study has several limitations. First, conducting semistructured interviews via phone calls inherently limits the collection of nonverbal cues, which may hinder a full understanding of the emotional context behind responses. Although the interviewer initiated a brief ice-breaking conversation to foster rapport, the absence of visual feedback remains a constraint. Second, the requirement of a minimum of 2 years of clinical experience ensured that participants had sufficient professional and technical exposure. However, it may have excluded newer practitioners who could offer innovative perspectives on the integration of digital health technologies. Future research should consider broadening the participant criteria to enhance representativeness. Third, we did not assess intercoder reliability or externally validate the coding process, but used independent coding and consensus among researchers. A more stringent process may further enhance the rigor of future studies. Fourth, as a qualitative study focused on a specific group of Chinese oncologists from selected hospitals, the findings should be interpreted and adopted with caution in other clinical settings or cultural contexts. For example, the dynamics of patient challenges to an oncologist&#x2019;s judgment or issues of physician liability in endorsing an AI chatbot may differ across regions. Finally, as participants were known professionally to the interviewer, this familiarity might have inhibited candid discussion of controversial opinions and confined recruitment to those willing to engage in interviews, thereby narrowing the range of perspectives captured. These limitations should be taken into account when interpreting and applying the study&#x2019;s findings. Follow-up qualitative and further quantitative studies may evaluate the long-term evolution of oncologists&#x2019; perceptions and attitudes, as well as the impact of AI-driven chatbots on patient outcomes and care efficiency.</p></sec><sec id="s4-4"><title>Implications for Practice and Policy Making</title><p>Clinicians should be guided by clear, evidence-based protocols and institutional policies that address key concerns such as liability, accuracy, and data privacy. Training both health care providers and patients on the appropriate use of these tools is critical, particularly to ensure that chatbots are leveraged as supportive adjuncts rather than replacements for professional advice. Moreover, a collaborative framework involving regulatory bodies can foster the development of robust validation processes and real-time monitoring systems, ultimately ensuring that chatbot apps enhance, rather than compromise, the integrity of the doctor-patient relationship and patient safety in oncology practice.</p></sec><sec id="s4-5"><title>Conclusions</title><p>While recognizing the potential of AI-driven chatbots to enhance accessibility of health information and chronic disease management, Chinese oncologists report significant concerns, including liability, misinformation, lack of personalization, privacy and data security risks, and patient readiness. Addressing the challenges requires comprehensive solutions, such as clear policies and guidelines, rigorous testing and validation, institutional endorsement, and robust patient and provider education. Future efforts should focus on resolving the barriers while leveraging the strengths of AI technology to support patient-centered care in a safe, effective, and ethical manner.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available due to institutional privacy policy but are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb3">PI</term><def><p>primary investigator</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cung</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sosa</surname><given-names>B</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>HS</given-names> </name><etal/></person-group><article-title>The performance of artificial intelligence chatbot large language models to address skeletal biology and bone health queries</article-title><source>J Bone Miner Res</source><year>2024</year><month>03</month><day>22</day><volume>39</volume><issue>2</issue><fpage>106</fpage><lpage>115</lpage><pub-id pub-id-type="doi">10.1093/jbmr/zjad007</pub-id><pub-id pub-id-type="medline">38477743</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schukow</surname><given-names>C</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Landgrebe</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Application of ChatGPT in routine diagnostic pathology: promises, pitfalls, and potential future directions</article-title><source>Adv Anat Pathol</source><year>2024</year><month>01</month><day>1</day><volume>31</volume><issue>1</issue><fpage>15</fpage><lpage>21</lpage><pub-id pub-id-type="doi">10.1097/PAP.0000000000000406</pub-id><pub-id pub-id-type="medline">37501529</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kurniawan</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Handiyani</surname><given-names>H</given-names> </name><name name-style="western"><surname>Nuraini</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hariyati</surname><given-names>RTS</given-names> </name><name name-style="western"><surname>Sutrisno</surname><given-names>S</given-names> </name></person-group><article-title>A systematic review of artificial intelligence-powered (AI-powered) chatbot intervention for managing chronic illness</article-title><source>Ann Med</source><year>2024</year><month>12</month><volume>56</volume><issue>1</issue><fpage>2302980</fpage><pub-id pub-id-type="doi">10.1080/07853890.2024.2302980</pub-id><pub-id pub-id-type="medline">38466897</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mhatre</surname><given-names>A</given-names> </name><name name-style="western"><surname>R. Warhade</surname><given-names>S</given-names> </name><name name-style="western"><surname>Pawar</surname><given-names>O</given-names> </name><name name-style="western"><surname>Kokate</surname><given-names>S</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>S</given-names> </name><name name-style="western"><surname>M</surname><given-names>E</given-names> </name></person-group><article-title>Leveraging LLM: implementing an advanced AI chatbot for healthcare</article-title><source>Int J Innovative Sci Res Technol</source><year>2024</year><fpage>3144</fpage><lpage>3151</lpage><pub-id pub-id-type="doi">10.38124/ijisrt/IJISRT24MAY1964</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Athota</surname><given-names>L</given-names> </name><name name-style="western"><surname>Shukla</surname><given-names>VK</given-names> </name><name name-style="western"><surname>Pandey</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rana</surname><given-names>A</given-names> </name></person-group><article-title>Chatbot for healthcare system using artificial intelligence</article-title><conf-name>2020 8th International Conference on Reliability, Infocom Technologies and Optimization (Trends and Future Directions) (ICRITO)</conf-name><conf-date>Jun 4-5, 2020</conf-date><conf-loc>Noida, India</conf-loc><pub-id pub-id-type="doi">10.1109/ICRITO48877.2020.9197833</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tustumi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Andreollo</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Aguilar-Nascimento</surname><given-names>J de</given-names> </name></person-group><article-title>Future of the language models in healthcare: the role of Chatgpt</article-title><source>Arq Bras Cir Dig</source><year>2023</year><volume>36</volume><fpage>e1727</fpage><pub-id pub-id-type="doi">10.1590/0102-672020230002e1727</pub-id><pub-id pub-id-type="medline">37162073</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Shinde</surname><given-names>NV</given-names> </name><name name-style="western"><surname>Akhade</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bagad</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bhavsar</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wagh</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Kamble</surname><given-names>A</given-names> </name></person-group><article-title>Healthcare chatbot system using artificial intelligence</article-title><conf-name>2021 5th International Conference on Trends in Electronics and Informatics (ICOEI)</conf-name><conf-date>Jun 3-5, 2021</conf-date><conf-loc>Tirunelveli, India</conf-loc><pub-id pub-id-type="doi">10.1109/ICOEI51242.2021.9452902</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Wen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Norel</surname><given-names>R</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Stappenbeck</surname><given-names>T</given-names> </name><name name-style="western"><surname>Zulkernine</surname><given-names>F</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>H</given-names> </name></person-group><article-title>Leveraging large language models for patient engagement</article-title><conf-name>The Power of Conversational AI in Digital Health 2024 IEEE International Conference on Digital Health (ICDH)</conf-name><conf-date>Jul 7-13, 2024</conf-date><conf-loc>Shenzhen, China</conf-loc><pub-id pub-id-type="doi">10.1109/ICDH62654.2024.00027</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Jeli&#x0107;</surname><given-names>G</given-names> </name><name name-style="western"><surname>Tartalja</surname><given-names>DM</given-names> </name></person-group><article-title>How can AI-powered solutions improve communication in healthcare?</article-title><conf-name>7th International Scientific Conference ITEMA Recent Advances in Information Technology, Tourism, Economics, Management and Agriculture</conf-name><conf-date>Oct 26, 2023</conf-date><conf-loc>Croatia</conf-loc><pub-id pub-id-type="doi">10.31410/ITEMA.S.P.2023.81</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Hamidi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>K</given-names> </name></person-group><article-title>Evaluation of AI chatbots for patient-specific EHR questions</article-title><source>arXiv</source><comment>Preprint posted online on  Jun 5, 2023</comment><pub-id pub-id-type="doi">10.48550/arXiv.2306.02549</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ferraris</surname><given-names>G</given-names> </name><name name-style="western"><surname>Monzani</surname><given-names>D</given-names> </name><name name-style="western"><surname>Coppini</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Barriers to and facilitators of online health information-seeking behaviours among cancer patients: a systematic review</article-title><source>Digital Health</source><year>2023</year><volume>9</volume><fpage>20552076231210663</fpage><pub-id pub-id-type="doi">10.1177/20552076231210663</pub-id><pub-id pub-id-type="medline">38107979</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Grace</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Schweers</surname><given-names>L</given-names> </name><name name-style="western"><surname>Anazodo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Freyer</surname><given-names>DR</given-names> </name></person-group><article-title>Evaluating and providing quality health information for adolescents and young adults with cancer</article-title><source>Pediatr Blood Cancer</source><year>2019</year><month>10</month><volume>66</volume><issue>10</issue><fpage>e27931</fpage><pub-id pub-id-type="doi">10.1002/pbc.27931</pub-id><pub-id pub-id-type="medline">31322817</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghorbani</surname><given-names>F</given-names> </name><name name-style="western"><surname>Zare</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nabavi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Vashani</surname><given-names>H</given-names> </name><name name-style="western"><surname>Bari</surname><given-names>A</given-names> </name></person-group><article-title>Effect of education and telephone counseling on caregiver strain and unmet needs in family caregivers and self-care behaviors in patients with cancer: a randomized clinical trial</article-title><source>Evidence Based Care</source><year>2020</year><volume>10</volume><fpage>51</fpage><lpage>60</lpage><pub-id pub-id-type="doi">10.22038/EBCJ.2020.45647.2244</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mart&#x00ED;nez-Miranda</surname><given-names>P</given-names> </name><name name-style="western"><surname>Casuso-Holgado</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Jes&#x00FA;s Jim&#x00E9;nez-Rejano</surname><given-names>J</given-names> </name></person-group><article-title>Effect of patient education on quality-of-life, pain and fatigue in breast cancer survivors: a systematic review and meta-analysis</article-title><source>Clin Rehabil</source><year>2021</year><month>12</month><volume>35</volume><issue>12</issue><fpage>1722</fpage><lpage>1742</lpage><pub-id pub-id-type="doi">10.1177/02692155211031081</pub-id><pub-id pub-id-type="medline">34266300</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Teplinsky</surname><given-names>E</given-names> </name><name name-style="western"><surname>Ponce</surname><given-names>SB</given-names> </name><name name-style="western"><surname>Drake</surname><given-names>EK</given-names> </name><etal/></person-group><article-title>Online medical misinformation in cancer: distinguishing fact from fiction</article-title><source>JCO Oncol Pract</source><year>2022</year><month>08</month><volume>18</volume><issue>8</issue><fpage>584</fpage><lpage>589</lpage><pub-id pub-id-type="doi">10.1200/OP.21.00764</pub-id><pub-id pub-id-type="medline">35357887</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loeb</surname><given-names>S</given-names> </name><name name-style="western"><surname>Langford</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Bragg</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Sherman</surname><given-names>R</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>JM</given-names> </name></person-group><article-title>Cancer misinformation on social media</article-title><source>CA Cancer J Clin</source><year>2024</year><volume>74</volume><issue>5</issue><fpage>453</fpage><lpage>464</lpage><pub-id pub-id-type="doi">10.3322/caac.21857</pub-id><pub-id pub-id-type="medline">38896503</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sallam</surname><given-names>M</given-names> </name></person-group><article-title>ChatGPT utility in healthcare education, research, and practice: systematic review on the promising perspectives and valid concerns</article-title><source>Healthcare (Basel)</source><year>2023</year><month>03</month><day>19</day><volume>11</volume><issue>6</issue><fpage>887</fpage><pub-id pub-id-type="doi">10.3390/healthcare11060887</pub-id><pub-id pub-id-type="medline">36981544</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Toiv</surname><given-names>A</given-names> </name><name name-style="western"><surname>Saleh</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ishak</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Digesting digital health: a study of appropriateness and readability of ChatGPT-generated gastroenterological information</article-title><source>Clin Transl Gastroenterol</source><year>2024</year><month>11</month><day>1</day><volume>15</volume><issue>11</issue><fpage>e00765</fpage><pub-id pub-id-type="doi">10.14309/ctg.0000000000000765</pub-id><pub-id pub-id-type="medline">39212302</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wei</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>B</given-names> </name><name name-style="western"><surname>Jin</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>X</given-names> </name></person-group><article-title>Evaluation of ChatGPT-generated medical responses: a systematic review and meta-analysis</article-title><source>J Biomed Inform</source><year>2024</year><month>03</month><volume>151</volume><fpage>104620</fpage><pub-id pub-id-type="doi">10.1016/j.jbi.2024.104620</pub-id><pub-id pub-id-type="medline">38462064</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>KU</given-names> </name><name name-style="western"><surname>Lipsitz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dominici</surname><given-names>LS</given-names> </name><etal/></person-group><article-title>Generative artificial intelligence as a source of breast cancer information for patients: proceed with caution</article-title><source>Cancer</source><year>2025</year><month>01</month><day>1</day><volume>131</volume><issue>1</issue><fpage>e35521</fpage><pub-id pub-id-type="doi">10.1002/cncr.35521</pub-id><pub-id pub-id-type="medline">39211977</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frechette</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bitzas</surname><given-names>V</given-names> </name><name name-style="western"><surname>Aubry</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kilpatrick</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lavoie-Tremblay</surname><given-names>M</given-names> </name></person-group><article-title>Capturing lived experience: methodological considerations for interpretive phenomenological inquiry</article-title><source>Int J Qual Methods</source><year>2020</year><month>01</month><day>1</day><volume>19</volume><pub-id pub-id-type="doi">10.1177/1609406920907254</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Campbell</surname><given-names>S</given-names> </name><name name-style="western"><surname>Greenwood</surname><given-names>M</given-names> </name><name name-style="western"><surname>Prior</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Purposive sampling: complex or simple? Research case examples</article-title><source>J Res Nurs</source><year>2020</year><month>12</month><volume>25</volume><issue>8</issue><fpage>652</fpage><lpage>661</lpage><pub-id pub-id-type="doi">10.1177/1744987120927206</pub-id><pub-id pub-id-type="medline">34394687</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alam</surname><given-names>M</given-names> </name></person-group><article-title>A systematic qualitative case study: questions, data collection, NVivo analysis and saturation</article-title><source>QROM</source><year>2020</year><month>08</month><day>20</day><volume>16</volume><issue>1</issue><fpage>1</fpage><lpage>31</lpage><pub-id pub-id-type="doi">10.1108/QROM-09-2019-1825</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Praveena</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Sasikumar</surname><given-names>S</given-names> </name></person-group><article-title>Application of Colaizzi&#x2019;s method of data analysis in phenomenological research</article-title><source>Medico Legal Update</source><year>2021</year><pub-id pub-id-type="doi">10.37506/mlu.v21i2.2800</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Johnson</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Adkins</surname><given-names>D</given-names> </name><name name-style="western"><surname>Chauvin</surname><given-names>S</given-names> </name></person-group><article-title>A review of the quality indicators of rigor in qualitative research</article-title><source>Am J Pharm Educ</source><year>2020</year><month>01</month><volume>84</volume><issue>1</issue><fpage>7120</fpage><pub-id pub-id-type="doi">10.5688/ajpe7120</pub-id><pub-id pub-id-type="medline">32292186</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Harley</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cornelissen</surname><given-names>J</given-names> </name></person-group><article-title>Rigor with or without templates? The pursuit of methodological rigor in qualitative research</article-title><source>Organ Res Methods</source><year>2022</year><month>04</month><volume>25</volume><issue>2</issue><fpage>239</fpage><lpage>261</lpage><pub-id pub-id-type="doi">10.1177/1094428120937786</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dyar</surname><given-names>KL</given-names> </name></person-group><article-title>Qualitative inquiry in nursing: creating rigor</article-title><source>Nurs Forum</source><year>2022</year><month>01</month><volume>57</volume><issue>1</issue><fpage>187</fpage><lpage>200</lpage><pub-id pub-id-type="doi">10.1111/nuf.12661</pub-id><pub-id pub-id-type="medline">34655435</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>M</given-names> </name><name name-style="western"><surname>Xiong</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dickson</surname><given-names>C</given-names> </name></person-group><article-title>Chinese oncologists&#x2019; perspectives on integrating AI into clinical practice: cross-sectional survey study</article-title><source>JMIR Form Res</source><year>2024</year><month>06</month><day>5</day><volume>8</volume><fpage>e53918</fpage><pub-id pub-id-type="doi">10.2196/53918</pub-id><pub-id pub-id-type="medline">38838307</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>M</given-names> </name><name name-style="western"><surname>Xiong</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>B</given-names> </name></person-group><article-title>Attitudes and perceptions of Chinese oncologists towards artificial intelligence in healthcare: a cross-sectional survey</article-title><source>Front Digital Health</source><year>2024</year><volume>6</volume><fpage>1371302</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2024.1371302</pub-id><pub-id pub-id-type="medline">39290363</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khan</surname><given-names>N</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Koubaa</surname><given-names>A</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Salleh</surname><given-names>R bin</given-names> </name></person-group><article-title>Global insights and the impact of generative AI-ChatGPT on multidisciplinary: a systematic review and bibliometric analysis</article-title><source>Conn Sci</source><year>2024</year><month>12</month><day>31</day><volume>36</volume><issue>1</issue><pub-id pub-id-type="doi">10.1080/09540091.2024.2353630</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dave</surname><given-names>P</given-names> </name></person-group><article-title>Using AI to increase medication adherence</article-title><source>Asian J Dental Health Sci</source><year>2024</year><volume>4</volume><issue>2</issue><fpage>38</fpage><lpage>43</lpage><pub-id pub-id-type="doi">10.22270/ajdhs.v4i2.80</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Babel</surname><given-names>A</given-names> </name><name name-style="western"><surname>Taneja</surname><given-names>R</given-names> </name><name name-style="western"><surname>Malvestiti</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Monaco</surname><given-names>A</given-names> </name><name name-style="western"><surname>Donde</surname><given-names>S</given-names> </name></person-group><article-title>Artificial intelligence solutions to increase medication adherence in patients with non-communicable diseases</article-title><source>Front Digital Health</source><year>2021</year><volume>3</volume><fpage>669869</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2021.669869</pub-id><pub-id pub-id-type="medline">34713142</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pavlopoulos</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rachiotis</surname><given-names>T</given-names> </name><name name-style="western"><surname>Maglogiannis</surname><given-names>I</given-names> </name></person-group><article-title>An overview of tools and technologies for anxiety and depression management using AI</article-title><source>Appl Sci (Basel)</source><year>2024</year><volume>14</volume><issue>19</issue><fpage>9068</fpage><pub-id pub-id-type="doi">10.3390/app14199068</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>A</given-names> </name></person-group><article-title>Empowering patients with AI-driven personalized medicine: a paradigm shift in chronic disease management</article-title><source>IJAR</source><year>2024</year><volume>12</volume><issue>8</issue><fpage>1031</fpage><lpage>1038</lpage><pub-id pub-id-type="doi">10.21474/IJAR01/19340</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nittari</surname><given-names>G</given-names> </name><name name-style="western"><surname>Khuman</surname><given-names>R</given-names> </name><name name-style="western"><surname>Baldoni</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Telemedicine practice: review of the current ethical and legal challenges</article-title><source>Telemed e-Health</source><year>2020</year><month>12</month><volume>26</volume><issue>12</issue><fpage>1427</fpage><lpage>1437</lpage><pub-id pub-id-type="doi">10.1089/tmj.2019.0158</pub-id><pub-id pub-id-type="medline">32049608</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shajari</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kuruvinashetti</surname><given-names>K</given-names> </name><name name-style="western"><surname>Komeili</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sundararaj</surname><given-names>U</given-names> </name></person-group><article-title>The emergence of AI-based wearable sensors for digital health technology: a review</article-title><source>Sensors (Basel)</source><year>2023</year><month>11</month><day>29</day><volume>23</volume><issue>23</issue><fpage>9498</fpage><pub-id pub-id-type="doi">10.3390/s23239498</pub-id><pub-id pub-id-type="medline">38067871</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williamson</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Prybutok</surname><given-names>V</given-names> </name></person-group><article-title>The era of artificial intelligence deception: unraveling the complexities of false realities and emerging threats of misinformation</article-title><source>Information</source><year>2024</year><volume>15</volume><issue>6</issue><fpage>299</fpage><pub-id pub-id-type="doi">10.3390/info15060299</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Mousavi</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Alghisi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Riccardi</surname><given-names>G</given-names> </name></person-group><article-title>DyKnow: dynamically verifying time-sensitive factual knowledge in llms</article-title><year>2024</year><conf-name>Findings of the Association for Computational Linguistics</conf-name><conf-date>Aug 11-16, 2024</conf-date><conf-loc>Bangkok, Thailand</conf-loc><pub-id pub-id-type="doi">10.18653/v1/2024.findings-emnlp.471</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Perelkiewicz</surname><given-names>M</given-names> </name><name name-style="western"><surname>Poswiata</surname><given-names>R</given-names> </name></person-group><article-title>A review of the challenges with massive web-mined corpora used in large language models pre-training</article-title><source>arXiv</source><comment>Preprint posted online on  Jul 10, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2407.07630</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Agarwal</surname><given-names>V</given-names> </name><name name-style="western"><surname>Jin</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chandra</surname><given-names>M</given-names> </name><name name-style="western"><surname>Choudhury</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kumar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sastry</surname><given-names>N</given-names> </name></person-group><article-title>MedHalu: hallucinations in responses to healthcare queries by large language models</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 29, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2409.19492</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Cao</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cai</surname><given-names>D</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zou</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lam</surname><given-names>W</given-names> </name></person-group><article-title>On the worst prompt performance of large language models</article-title><source>arXiv</source><comment>Preprint posted online on  Jun 8, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2406.10248</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Bonagiri</surname><given-names>V</given-names> </name><name name-style="western"><surname>Vennam</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gaur</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kumaraguru</surname><given-names>P</given-names> </name></person-group><article-title>Measuring moral inconsistencies in large language models</article-title><source>arXiv</source><comment>Preprint posted online on  Jan 26, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2402.01719</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Balaji</surname><given-names>DK</given-names> </name><name name-style="western"><surname>Lokesha</surname><given-names>A</given-names> </name><name name-style="western"><surname>G</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Optimizing prompt length and specificity for enhanced AI chatbot responses</article-title><source>ijcsrr</source><year>2024</year><volume>07</volume><issue>9</issue><pub-id pub-id-type="doi">10.47191/ijcsrr/V7-i9-61</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="web"><article-title>About us</article-title><source>Xunfei Healthcare</source><access-date>2024-12-10</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.iflyhealth.com/en/about.html">https://www.iflyhealth.com/en/about.html</ext-link></comment></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>YL</given-names> </name><name name-style="western"><surname>Por</surname><given-names>LY</given-names> </name><name name-style="western"><surname>Ku</surname><given-names>CS</given-names> </name></person-group><article-title>A systematic literature review of information security in chatbots</article-title><source>Appl Sci (Basel)</source><year>2023</year><volume>13</volume><issue>11</issue><fpage>6355</fpage><pub-id pub-id-type="doi">10.3390/app13116355</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>May</surname><given-names>R</given-names> </name><name name-style="western"><surname>Denecke</surname><given-names>K</given-names> </name></person-group><article-title>Security, privacy, and healthcare-related conversational agents: a scoping review</article-title><source>Inform Health Soc Care</source><year>2022</year><month>04</month><day>3</day><volume>47</volume><issue>2</issue><fpage>194</fpage><lpage>210</lpage><pub-id pub-id-type="doi">10.1080/17538157.2021.1983578</pub-id><pub-id pub-id-type="medline">34617857</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hasal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nowakov&#x00E1;</surname><given-names>J</given-names> </name><name name-style="western"><surname>Saghair</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Abdulla</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sn&#x00E1;&#x0161;el</surname><given-names>V</given-names> </name><name name-style="western"><surname>Ogiela</surname><given-names>L</given-names> </name></person-group><article-title>Chatbots: security, privacy, data protection, and social aspects</article-title><source>Concurr Comput</source><year>2021</year><month>10</month><day>10</day><volume>33</volume><issue>19</issue><pub-id pub-id-type="doi">10.1002/cpe.6426</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smrke</surname><given-names>U</given-names> </name><name name-style="western"><surname>&#x0160;pes</surname><given-names>T</given-names> </name><name name-style="western"><surname>Mlakar</surname><given-names>I</given-names> </name><name name-style="western"><surname>Musil</surname><given-names>B</given-names> </name><name name-style="western"><surname>Plohl</surname><given-names>N</given-names> </name></person-group><article-title>Technophobia mediates the associations between age, education level, and readiness to adopt new (health) technology among aging adults</article-title><source>J Appl Gerontol</source><year>2025</year><month>03</month><volume>44</volume><issue>3</issue><fpage>497</fpage><lpage>507</lpage><pub-id pub-id-type="doi">10.1177/07334648241274260</pub-id><pub-id pub-id-type="medline">39177432</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jagde</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Shrivastava</surname><given-names>R</given-names> </name><name name-style="western"><surname>Feine</surname><given-names>J</given-names> </name><name name-style="western"><surname>Emami</surname><given-names>E</given-names> </name></person-group><article-title>Patients&#x2019; E-readiness to use E-health technologies for oral health</article-title><source>PLoS One</source><year>2021</year><volume>16</volume><issue>7</issue><fpage>e0253922</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0253922</pub-id><pub-id pub-id-type="medline">34252096</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hryciw</surname><given-names>BN</given-names> </name><name name-style="western"><surname>Fortin</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ghossein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kyeremanteng</surname><given-names>K</given-names> </name></person-group><article-title>Doctor-patient interactions in the age of AI: navigating innovation and expertise</article-title><source>Front Med (Lausanne)</source><year>2023</year><volume>10</volume><fpage>1241508</fpage><pub-id pub-id-type="doi">10.3389/fmed.2023.1241508</pub-id><pub-id pub-id-type="medline">37711734</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name><name name-style="western"><surname>Chaudhry</surname><given-names>Z</given-names> </name></person-group><article-title>Large language models and user trust: consequence of self-referential learning loop and the deskilling of health care professionals</article-title><source>J Med Internet Res</source><year>2024</year><month>04</month><day>25</day><volume>26</volume><fpage>e56764</fpage><pub-id pub-id-type="doi">10.2196/56764</pub-id><pub-id pub-id-type="medline">38662419</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sezgin</surname><given-names>E</given-names> </name></person-group><article-title>Artificial intelligence in healthcare: complementing, not replacing, doctors and healthcare providers</article-title><source>Digital Health</source><year>2023</year><volume>9</volume><fpage>20552076231186520</fpage><pub-id pub-id-type="doi">10.1177/20552076231186520</pub-id><pub-id pub-id-type="medline">37426593</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Interview guide.</p><media xlink:href="jmir_v27i1e71418_app1.pdf" xlink:title="PDF File, 212 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Reflexivity statement.</p><media xlink:href="jmir_v27i1e71418_app2.pdf" xlink:title="PDF File, 127 KB"/></supplementary-material></app-group></back></article>