<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e74428</article-id><article-id pub-id-type="doi">10.2196/74428</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Generative Artificial Intelligence in Primary Care: Qualitative Study of UK General Practitioners&#x2019; Views</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Blease</surname><given-names>Charlotte</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Garcia Sanchez</surname><given-names>Carolina</given-names></name><degrees>MMSc, MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Locher</surname><given-names>Cosima</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>McMillan</surname><given-names>Brian</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gaab</surname><given-names>Jens</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Torous</surname><given-names>John</given-names></name><degrees>MD, MBI</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib></contrib-group><aff id="aff1"><institution>Participatory eHealth &#x0026; Health data Research Group, Department of Women's and Children's Health, Uppsala University</institution><addr-line>MTC-huset, Dag Hammarskj&#x00F6;lds v&#x00E4;g 14B, 1 tr</addr-line><addr-line>Uppsala</addr-line><country>Sweden</country></aff><aff id="aff2"><institution>Clinical Psychology and Psychosomatics, Faculty of Psychology, University of Basel</institution><addr-line>Basel</addr-line><country>Switzerland</country></aff><aff id="aff3"><institution>Centre for Primary Care and Health Services Research, University of Manchester</institution><addr-line>Manchester</addr-line><country>United Kingdom</country></aff><aff id="aff4"><institution>Department of Psychiatry, Beth Israel Deaconess Medical Center, Harvard Medical School</institution><addr-line>Boston</addr-line><addr-line>MA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Mavragani</surname><given-names>Amaryllis</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Khosla</surname><given-names>Archit</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sivarajkumar</surname><given-names>Sonish</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to  Charlotte Blease, PhD, Participatory eHealth &#x0026; Health data Research Group, Department of Women's and Children's Health, Uppsala University, MTC-huset, Dag Hammarskj&#x00F6;lds v&#x00E4;g 14B, 1 tr, Uppsala, 752 37, Sweden, 46 184710000; <email>charlotte.blease@uu.se</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>6</day><month>8</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e74428</elocation-id><history><date date-type="received"><day>25</day><month>03</month><year>2025</year></date><date date-type="rev-recd"><day>24</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>25</day><month>06</month><year>2025</year></date></history><copyright-statement>&#x00A9; Charlotte Blease, Carolina Garcia Sanchez, Cosima Locher, Brian McMillan, Jens Gaab, John Torous. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 6.8.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e74428"/><abstract><sec><title>Background</title><p>The potential for generative artificial intelligence (GenAI) to assist with clinical tasks is the subject of ongoing debate within biomedical informatics and related fields.</p></sec><sec><title>Objective</title><p>This study aimed to explore general practitioners&#x2019; (GPs&#x2019;) opinions about GenAI on primary care.</p></sec><sec sec-type="methods"><title>Methods</title><p>In January 2025, we conducted a web-based survey of 1005 UK GPs&#x2019; experiences and opinions of GenAI in clinical practice. This study involved a qualitative inductive descriptive analysis of a written response (&#x201C;comments&#x201D;) to an open-ended question in the survey. After analysis, the interpretation of themes was also informed by the technology acceptance model.</p></sec><sec sec-type="results"><title>Results</title><p>Out of 1005 respondents, 611 GPs (61%) provided written comments in response to the free text question, totaling 7990 words. Comments were classified into 3 major themes and 8 subthemes in relation to GenAI in clinical practice. The major themes were (1) unfamiliarity, (2) ambivalence and anxiety, and (3) role in clinical tasks. &#x201C;Unfamiliarity&#x201D; encompassed a lack of experience and knowledge, and the need for training on GenAI. &#x201C;Ambivalence and anxiety&#x201D; included mixed expectations among GPs in relation to these tools, beliefs about diminished human connection, and skepticism about AI accountability. Finally, commenting on the role of GenAI in clinical tasks, GPs believed it would help with documentation. However, respondents questioned AI&#x2019;s clinical judgment and raised concerns about operational uncertainty concerning these tools. Female GPs were more likely to leave comments than male GPs, with 53% (324/611) of female GPs providing feedback compared to 41.1% (162/394) who did not. Chi-square tests confirmed this difference ((&#x03C7;&#x00B2;&#x2082;= 14.6, <italic>P</italic>=.001). In addition, doctors who left comments were significantly more likely to have used GenAI in clinical practice compared with those who did not. Among all respondents, 71.7% (438/611) had not used GenAI. However, noncommenters were even less likely to have used it, with 80.7% (318/394) reporting no use. A chi-square test confirmed this difference (&#x03C7;&#x00B2;&#x2081;=10.0, <italic>P</italic>=.002).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This study provides timely insights into UK GPs&#x2019; perspectives on the role, impact, and limitations of GenAI in primary care. However, the study has limitations. The qualitative data analyzed originates from a self-selected subset of respondents who chose to provide free-text comments, and these participants were more likely to have used GenAI tools in clinical practice. However, the substantial number of comments offers valuable insights into the diverse views held by GPs regarding GenAI. Furthermore, the majority of our respondents reported limited experience and training with these tools; however, many GPs perceived potential benefits of GenAI and ambient AI for documentation. Notably, 2 years after the widespread introduction of GenAI, GPs&#x2019; persistent lack of understanding and training remains a critical concern. More extensive qualitative work would provide a more in-depth understanding of GPs&#x2019; views.</p></sec></abstract><kwd-group><kwd>generative AI</kwd><kwd>general practice</kwd><kwd>primary care</kwd><kwd>large language models</kwd><kwd>education</kwd><kwd>training</kwd><kwd>online survey questionnaire</kwd><kwd>qualitative research.</kwd><kwd>artificial intelligence</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Since the launch of ChatGPT 3.5 (OpenAI) in November 2022, interest in large language model (LLM)&#x2013;powered chatbots has burgeoned, with increasing attention given to their potential applications within clinical practice. These models, built on Generative Pre-trained Transformer architectures, undergo extensive pretraining on large datasets before fine-tuning for specific tasks. By leveraging probabilistic language generation, they produce contextually relevant responses and engage in dynamic, conversational interactions while retaining contextual memory, distinguishing them from traditional search engines.</p><p>The potential clinical utility of these models is now recognized. Emerging research highlights their capacity to streamline workflows by supporting automated medical documentation through &#x201C;ambient&#x201D; or &#x201C;listening&#x201D; artificial intelligence (AI) [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref4">4</xref>], enhancing the accessibility and empathy of patient-facing clinical notes [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>], and assisting with differential diagnosis formulation [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. However, significant challenges persist. LLM-powered tools are prone to factual inaccuracies, or &#x201C;hallucinations,&#x201D; and may perpetuate biases related to race, gender, and disability, contributing to algorithmic discrimination in health care [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. In addition, the widespread availability of consumer tools, combined with their consummate capacity to engage users in &#x201C;conversations,&#x201D; raises significant concerns regarding patient privacy [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>Despite growing enthusiasm, most studies have evaluated generative artificial intelligence (GenAI) in controlled environments rather than examining real-world clinical adoption. Limited research has explored how physicians, particularly those in frontline health care roles, integrate GenAI into daily practice. Addressing this gap, a 2023 survey of 138 psychiatrists affiliated with the American Psychiatric Association reported that 43% (59/138) had used ChatGPT-3.5, and 33% (45/138) had used GPT-4.0 (OpenAI) for clinical queries [<xref ref-type="bibr" rid="ref14">14</xref>]. Similar findings emerged from a broader survey conducted in November 2023 among 938 UK public sector professionals, including National Health Service (NHS) staff members (225/938, 24%) and emergency service workers (141/938, 15%) [<xref ref-type="bibr" rid="ref15">15</xref>]. In this cohort, 45% reported awareness of colleagues using GenAI, and 22% confirmed personal use. In February 2024, we conducted a nationwide survey of 1006 UK general practitioners (GPs) to assess their adoption of GenAI into clinical practice [<xref ref-type="bibr" rid="ref16">16</xref>]. Among respondents, 20% (205/1006) had adopted GenAI tools, predominantly for documentation (47/160, 29%) and differential diagnosis support (45/160, 28%). Although interest in GenAI in health care is growing, empirical data on clinician adoption, particularly from surveys and qualitative studies, remain scarce.</p></sec><sec id="s1-2"><title>Objectives</title><p>Amid ongoing debates about AI&#x2019;s impact on the future of medical professions, the perspectives of practicing physicians have received limited attention [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. To address this gap, we conducted an online survey examining UK GPs&#x2019; experiences and opinions on the potential impact of GenAI in primary care. Recognizing the value of qualitative insights, we incorporated an open-ended survey question to explore GPs&#x2019; perspectives in greater depth. We aimed to provide a preliminary investigation into their views on how GenAI shapes primary care.</p><p>While the analysis followed an inductive, descriptive approach, we drew on the technology acceptance model (TAM) as a conceptual lens to help interpret the identified themes [<xref ref-type="bibr" rid="ref17">17</xref>]. TAM is a widely used framework for understanding users&#x2019; adoption of new technologies. It posits that acceptance is primarily influenced by design features of technology and their perceived usefulness and perceived ease of use, which shape attitudes and behavioral intentions. In health care, TAM has been applied to explore clinicians&#x2019; responses to digital tools, thereby offering a helpful lens for interpreting GPs&#x2019; varied perceptions of GenAI. To illuminate these themes, we applied TAM to our analysis; its constructs can help clarify how GPs responded to GenAI, particularly considering their limited exposure and varying levels of enthusiasm or concern.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Main Survey</title><p>A complete methodological description of the survey is available elsewhere [<xref ref-type="bibr" rid="ref18">18</xref>]. In summary, we conducted an online survey among random GPs registered with the Doctors website, the largest professional network for UK doctors affiliated with the General Medical Council (GMC). At the time of the study, the Doctors website had 254,741 members, representing approximately 65% of the 390,000 registered doctors in the United Kingdom. Eligible participants also included those who had opted in to receive survey invitations via email. The survey was part of a recurring monthly omnibus survey, which maintains a fixed sample size of 1000 participants. Respondents were required to complete all closed-ended questions to submit their responses; however, response to a single open-ended, free-text question was optional. Invitations to a random sample of GPs who are members of the Doctors website were distributed via email notifications or home page advertisements on the Doctors website, depending on user preferences. The survey was open from January 7, 2025, to January 26, 2025. During the period in which the survey was administered, 25,569 GPs were active on the platform. The online survey was divided into 4 sections, which explored participants&#x2019; experiences with, and opinions about, the use of GenAI in primary care. Before launch, the survey underwent pretesting and a pilot phase involving 5 UK-based GPs to assess usability and clarity. The final version of the survey (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) was designed to take 3&#x2010;5 minutes to complete. The study adhered to the CHERRIES (Checklist for Reporting Results of Internet E-Surveys) guidelines [<xref ref-type="bibr" rid="ref1">1</xref>] (<xref ref-type="supplementary-material" rid="app5">Checklist 1</xref>).</p></sec><sec id="s2-2"><title>Ethical Considerations</title><p>The study received ethical approval from the Faculty of Psychology, University of Basel, Switzerland (approval #030-24-1). Participants were assured of anonymity, and informed consent (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>) was obtained before participation. The survey was hosted on the Doctors website&#x2019;s secure platform, and all responses were encrypted and anonymized before data analysis. Email addresses and personal identifiers were removed before data transfer to the research team. The study complied with the European Union&#x2019;s General Data Protection Regulation (GDPR). As an incentive, participants received a &#x00A3;7.50 (US $8.80, &#x20AC;8.83) shopping voucher upon survey completion.</p></sec><sec id="s2-3"><title>Qualitative Component</title><p>To maximize the response rate for the qualitative component, as noted, we included a single, optional, open-ended question that allowed participants to respond in more detail to the topic of the survey. Specifically, we requested: &#x201C;Please add any comments about the topic or the survey. Please add 1&#x2010;2 brief comments.<italic>&#x201D;</italic></p><p>Descriptive qualitative data analysis was used, and we applied inductive thematic coding to the data [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Due to the limitations of the dataset&#x2014;the brevity of comments and responses as sentence fragments&#x2014;a full thematic analysis was not feasible [<xref ref-type="bibr" rid="ref21">21</xref>]. Responses were cleaned (comments such as &#x201C;none,&#x201D; &#x201C;n/a,&#x201D; and &#x201C;no comments&#x201D; were deleted), and remaining responses were imported into QCAmap (coUnity Software Development GmbH) for analysis. To familiarize themselves with the data, CB and JT conducted multiple readings of the comment transcripts. Following this, an inductive coding approach was used, where descriptive labels (&#x201C;codes&#x201D;) were assigned to each comment. Comments containing multiple meanings were assigned multiple codes accordingly. To ensure consistency, comments and their corresponding codes were reviewed and compared to identify patterns, similarities, and differences. CB and JT collaboratively discussed coding decisions, leading to further refinements where necessary. Finally, despite the limitations with the dataset, first-order codes were organized into second-order categories based on shared meaning, providing a structured summary of the responses.</p><p>Although the coding process was conducted inductively, without the use of a predefined theoretical framework, we subsequently drew on constructs from the TAM to support interpretation of the findings, namely, perceived usefulness and perceived ease of use of GenAI [<xref ref-type="bibr" rid="ref22">22</xref>]. This post hoc application of TAM helped us to consider how emergent themes might reflect underlying factors known to influence technology adoption, including perceived usefulness and ease of use.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>A total of 1141 unique visitors accessed the first page of the survey, and this number was used as the reference point for calculating view and participation rates. The view rate was considered 100% (1141/1141&#x00D7;100), reflecting direct exposure from both email and website recruitment channels. Of these, 1067 respondents provided consent to participate by selecting &#x201C;yes&#x201D; to the initial consent question, resulting in a participation rate of 94% (1067/1141&#x00D7;100). Among those who consented, 1005 respondents completed the full survey, yielding a completion rate of 94% (1005/1067 &#x00D7; 100). For clarity, 1141 represents the total number of unique visitors to the survey, 1067 the number of participants who consented, and 1005 the number who completed all survey items.</p><p>Out of 1005 respondents, 611 GPs (61%) provided written comments in response to the free-text question, totaling 7990 words. These comments were generally brief, ranging from short phrases to 1-3 sentences. As outlined in the quantitative survey, respondents were representative of UK GPs in terms of age and gender and from all regions of the United Kingdom [<xref ref-type="bibr" rid="ref18">18</xref>]. GPs who submitted comments were not significantly different from those who did not, both in terms of role, age, practice size, or whether they think practice could be affected by GenAI (<xref ref-type="table" rid="table1">Table 1</xref>). GPs who submitted comments differed significantly from those who did not, particularly by sex. Female GPs were more likely to leave comments than male GPs, with 53% (324/611) of female GPs providing feedback compared with 41.1% (162/394) who did not. Chi-square tests confirmed this difference (<italic>P</italic>=.001). In addition, doctors who left comments were significantly more likely to have used GenAI in clinical practice compared with those who did not. Among all respondents, 71.7% (438/611) had not used GenAI. However, noncommenters were even less likely to have used it, with 80.7% (318/394) reporting no use. A chi-square test confirmed this difference (<italic>&#x03C7;&#x00B2;</italic>=9.9, <italic>P</italic>=.002). More detailed information about these variables and their categories is available in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>; deidentified, raw qualitative data are available in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Demographic and practice characteristics of UK general practitioners who submitted free-text comments versus those who did not in a 2025 national survey on generative artificial intelligence in primary care.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2" colspan="2">Variable</td><td align="left" valign="bottom" rowspan="2">Submitted comments (n=611), n (%)</td><td align="left" valign="bottom" rowspan="2">Did not submit comments (n=394), n (%)</td><td align="left" valign="bottom" colspan="2">Comparison</td></tr><tr><td align="left" valign="bottom">Chi-square (<italic>df</italic>)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Role: GP<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> partner or principal</td><td align="left" valign="top">246 (40.3)</td><td align="left" valign="top">193 (49)</td><td align="left" valign="top">7.9 (3)</td><td align="left" valign="top">.05</td></tr><tr><td align="left" valign="top" colspan="2">Sex: female</td><td align="left" valign="top">324 (53)</td><td align="left" valign="top">162 (41.1)</td><td align="left" valign="top">14.6 (2)</td><td align="left" valign="top">.001</td></tr><tr><td align="left" valign="top" colspan="2">Age&#x003E;46 y</td><td align="left" valign="top">334 (54.7)</td><td align="left" valign="top">212 (53.8)</td><td align="left" valign="top">0.04 (1)</td><td align="left" valign="top">.84</td></tr><tr><td align="left" valign="top" colspan="2">Practice place: large town or city (eg, Nottingham and Cardiff)</td><td align="left" valign="top">87 (14.2)</td><td align="left" valign="top">67 (17)</td><td align="left" valign="top">14.2 (5)</td><td align="left" valign="top">.01</td></tr><tr><td align="left" valign="top" colspan="2">Practice size<underline>&#x003E;</underline>10,001</td><td align="left" valign="top">330 (54)</td><td align="left" valign="top">196 (49.7)</td><td align="left" valign="top">1.5 (1)</td><td align="left" valign="top">.21</td></tr><tr><td align="left" valign="top" colspan="2">Used GenAI<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> to assist clinical practice: no</td><td align="left" valign="top">438 (71.7)</td><td align="left" valign="top">318 (80.7)</td><td align="left" valign="top">10.0 (1)</td><td align="left" valign="top">.002</td></tr><tr><td align="left" valign="top" colspan="2">Practice could be affected by GenAI: decrease my risk of having legal action taken against me</td><td align="left" valign="top">51 (8.3)</td><td align="left" valign="top">40 (10.2)</td><td align="left" valign="top">1.04 (3)</td><td align="left" valign="top">.79</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>GP: general practitioner.</p></fn><fn id="table1fn2"><p><sup>b</sup>GenAI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>Through an iterative thematic analysis of the comments, three key themes were identified regarding GPs&#x2019; perspectives on GenAI in clinical practice: (1) unfamiliarity, (2) ambivalence and anxiety, and (3) role in clinical tasks. These themes were further divided into 8 subthemes, each detailed in the following sections with illustrative quotes (<xref ref-type="other" rid="box1">Textbox 1</xref>), where parenthetical numbers indicate individual participant identifiers. We emphasize that while <xref ref-type="other" rid="box1">Textbox 1</xref> provides a summary of the major themes and subthemes, it is not intended as a hierarchical or conceptual model. Instead, it reflects a descriptive categorization based on the surface-level nature of the brief, diverse comments. For example, the subtheme &#x201C;mixed expectations&#x201D; captures a wide range of conflicting attitudes&#x2014;from skepticism and fear to optimism&#x2014;highlighting the spectrum of GP opinions rather than a single, discrete idea.</p><boxed-text id="box1"><title> Summary of major themes and subthemes from UK general practitioners&#x2019; free-text comments about generative artificial intelligence. This textbox presents a descriptive classification; themes are not hierarchically ordered, and subtheme granularity varies, reflecting the range and specificity of the source data.</title><p><bold>Unfamiliarity</bold></p><list list-type="bullet"><list-item><p>Lack of experience and knowledge</p></list-item><list-item><p>Need for training</p></list-item></list><p><bold>Ambivalence and anxiety</bold></p><list list-type="bullet"><list-item><p>Mixed expectations</p></list-item><list-item><p>Diminished human connection</p></list-item><list-item><p>Skepticism about artificial intelligence accountability</p></list-item></list><p><bold>Role in clinical tasks</bold></p><list list-type="bullet"><list-item><p>Help with documentation</p></list-item><list-item><p>Questioning artificial intelligence&#x2019;s clinical judgment</p></list-item><list-item><p>Operational uncertainty</p></list-item></list></boxed-text></sec><sec id="s3-2"><title>Unfamiliarity</title><sec id="s3-2-1"><title>Lack of Experience and Knowledge</title><p>Many participants noted that they had &#x201C;no experience&#x201D; with GenAI or &#x201C;haven&#x2019;t used it.&#x201D; Relatedly, multiple GPs stated that they had:</p><disp-quote><p>Not enough experience of this area to comment.</p><attrib>#98, sex: prefer not to say, 46&#x2010;55 y, have not used</attrib></disp-quote><p>This theme aligns with the broader survey finding that 75% of respondents had not used GenAI tools to assist with tasks in clinical practice [<xref ref-type="bibr" rid="ref18">18</xref>]. However, some GPs noted limited familiarity with these tools; for example:</p><disp-quote><p>Only started using recently&#x2026;</p><attrib>#58, male, 46&#x2010;55 y, used</attrib></disp-quote><disp-quote><p>No experience in this at all but aware of it.</p><attrib>#132, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>Colleagues find it useful but I have not tried it yet.</p><attrib>#306, female, 36&#x2010;45 y, have not used</attrib></disp-quote><p>Multiple respondents also identified their lack of knowledge about GenAI, and some GPs were emphatic about their lack of understanding; for example:</p><disp-quote><p>The great unknown for me.</p><attrib>#242, female, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>Really no idea.</p><attrib>#823, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>Total minefield and I have no knowledge in this area.</p><attrib>#899, female, 36&#x2010;46 y, have not used</attrib></disp-quote></sec><sec id="s3-2-2"><title>Need for Training</title><p>Reported lack of training and guidance was another common concern. Some GPs stated that &#x201C;GPs have no training in AI,&#x201D; and expressed their &#x201C;really limited exposure to AI in the workplace.&#x201D; Many participants connected their lack of familiarity with GenAI with the need for greater formal guidance; for example:</p><disp-quote><p>Need far more training on AI use in healthcare setting than currently available.</p><attrib>#997, male, 36&#x2010;45, No</attrib></disp-quote><disp-quote><p>I have little experience but I feel if we have some training [&#x2026;] this would be helpful in healthcare then I may be more comfortable with it.</p><attrib>#71, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>It needs to be delivered with inclusive training so less IT confident colleagues are not left behind.</p><attrib>#989, sex: prefer not to say, 36&#x2010;45 y, have not used</attrib></disp-quote><p>A number of GPs emphasized that training needed to be specific to primary care; for example:</p><disp-quote><p>Need more proactive support and resources to take on such tools to help support general practice utilize such tools.</p><attrib>#1056, male, 36&#x2010;46 y, have not used</attrib></disp-quote><disp-quote><p>I need more guidance on suitability for general practice in my location.</p><attrib>#259, male, 35 y or younger, have not used</attrib></disp-quote><disp-quote><p>&#x2026;would be hesitant to use it without a significant amount to training tailored to a GP setting.</p><attrib>#772, female, 36&#x2010;45 y, have not used</attrib></disp-quote></sec></sec><sec id="s3-3"><title>Ambivalence and Anxiety</title><sec id="s3-3-1"><title>Mixed Expectations</title><p>Another major subtheme was the contrast in expectations surrounding GenAI. Many comments were brief, capturing broad sentiments about its potential impact. Notably, many GPs expressed deep skepticism or mistrust regarding AI&#x2019;s role and consequences in primary care; for example,</p><disp-quote><p>Not used it and frightened of it being implemented.</p><attrib>#1059, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>Theory sounds good. Dangerous in reality.</p><attrib>#515, male, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>I hope I get to retirement before all this nonsense takes over.</p><attrib>#637, male, 46&#x2010;55 y, have not used</attrib></disp-quote><p>Some described GenAI as irrelevant to general practice; for example:</p><disp-quote><p>To be honest AI not necessary in our domain.</p><attrib>#87, male, 46&#x2010;55 y, used</attrib></disp-quote><disp-quote><p>There is no role for AI in my role. We need real, not artificial, intelligence to look after patients.</p><attrib>#942, male, 46&#x2010;55 y, have not used</attrib></disp-quote><p>Some GPs voiced anxiety, linking their negative expectations to &#x201C;concerns about job security&#x201D; and the existential threat of AI to their profession; for example:</p><disp-quote><p>Could be the end of medicine as we currently know it.</p><attrib>#97, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>I dread to think of human doctors being replaced by AI.</p><attrib>#623, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>I would be very surprised if AI does not completely take over the role of GPs over the next 15 years.</p><attrib>#358, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>GPs are a dying breed. We had an informal discussion over our last Christmas meal if we need to now become IT professionals or even a bricklayer.</p><attrib>#985, male, 46&#x2010;55 y, used</attrib></disp-quote><p>In contrast, many others expressed uncertainty and caution about the role of GenAI in clinical practice. For example:</p><disp-quote><p>Unsure of its role yet.</p><attrib>#651, male, 56 y or older, have not used</attrib></disp-quote><disp-quote><p>I am not convinced that AI will help.</p><attrib>#113, female, 36&#x2010;46 y, have not used</attrib></disp-quote><disp-quote><p>Very early to say. Lots of unknowns.</p><attrib>#810, male, 36&#x2010;45 y, have not used</attrib></disp-quote><p>However, reflecting the divergence of opinions, some GPs were &#x201C;very excited about this development&#x201D; and more optimistic about the usefulness of GenAI. For example:</p><disp-quote><p>This is here to stay and if anything have increased presence in our working life. So we as clinicians need to embrace it!</p><attrib>#312, female, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>I can&#x2019;t believe that it is taking so long to integrate AI into healthcare. It is very obvious that this is the way forward&#x2026;</p><attrib>#42, female, 56 y or older, have not used</attrib></disp-quote><disp-quote><p>It will be transformational for healthcare.</p><attrib>#512, female, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>It&#x2019;s a force for good.</p><attrib>#877, male, 46&#x2010;55 y, used</attrib></disp-quote></sec><sec id="s3-3-2"><title>Diminished Human Connection</title><p>Many GPs worried about the effects of GenAI on empathy and the risk that &#x201C;it will take away the human touch,&#x201D; undermine the face-to-face interaction, and that &#x201C;it will dehumanize medicine.&#x201D; For example:</p><disp-quote><p>I feel AI will not read patients and miss the social cues and hidden agendas patients have.</p><attrib>#130, female, 36&#x2010;45 y, have not used</attrib></disp-quote><p>Some respondents assumed that patients would view GenAI outputs as inferior when it comes to humanistic aspects of care; for example:</p><disp-quote><p>Removes any element of compassion/empathy. No patient really wants to be seen by a robot. Would you?</p><attrib>#906, female, 35 y or younger, have not used</attrib></disp-quote><disp-quote><p>It may be a useful assistant but cannot replace the genuine interest, empathy and intuition of an experienced clinician.</p><attrib>#53, female, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Not much empathy in the responses, sounds fake.</p><attrib>#569, female, 46&#x2010;55 y, have not used</attrib></disp-quote><p>Some GPs, however, challenged this assumption and proposed that the adoption of GenAI tools might benefit communication and patient interactions; for example:</p><disp-quote><p>A recent study showed that ChatGPT was more empathetic than doctors in dealing with patients [&#x2026;] I&#x2019;m all up for technology.</p><attrib>#280, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>I think the gains from interpersonal interaction are being underestimated.</p><attrib>#479, male, 56 y or older, have not used</attrib></disp-quote><disp-quote><p>It&#x2019;s good for patient communication.</p><attrib>#279, female, 36&#x2010;45 y, used</attrib></disp-quote></sec><sec id="s3-3-3"><title>Skepticism About AI Accountability</title><p>Comments reflecting GenAI anxieties also centered on the broader theme of AI access to sensitive patient information and accountability. However, again, many responses were brief and broadly framed. Some GPs specifically voiced concerns about patient data security, for example:</p><disp-quote><p>Still concerns over third parties having access to patient data.</p><attrib>#475, male, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>Significant concerns regarding privacy and data protection.</p><attrib>#435, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>We use very little due to GDPR concerns, regulatory concerns, and do not use it in direct patient care currently&#x2026;</p><attrib>#841, female, 36&#x2010;45 y, used</attrib></disp-quote><p>Other GPs expressed attenuated optimism, describing the need for specific safeguards before AI could be adopted; for example:</p><disp-quote><p>I&#x2019;d like to use AI for note taking during consultations but difficult to implement in terms of privacy, confidentiality, security.</p><attrib>#211, male, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Seems like a really positive way forward. Need to be sure about ethics re info sharing.</p><attrib>#686, female, 46&#x2010;55 y, used</attrib></disp-quote><p>Other GPs raised concerns about legalities, particularly in relation to who, or what, would be accountable if things went wrong in the adoption of these tools; for example:</p><disp-quote><p>I have concerns about who will regulate the provision of AI tools, and what impact their use will have on liability for medical errors.</p><attrib>#594, male, 56 y or older, have not used</attrib></disp-quote><disp-quote><p>Very suspicious of AI, it depends totally on quality of information inputted. Who is medicolegally responsible?</p><attrib>#629, male, 46&#x2010;55 y, have not used</attrib></disp-quote></sec></sec><sec id="s3-4"><title>Role in Clinical Tasks</title><sec id="s3-4-1"><title>Helps With Documentation</title><p>A dominant subtheme, when it came to the role of GenAI, was in administration and clinical documentation. These comments elaborate on quantitative findings indicating that documentation was the most common use case among GenAI adopters [<xref ref-type="bibr" rid="ref18">18</xref>]. Drawing on TAM, these comments reflect a high level of perceived usefulness for GenAI tools in these administrative tasks, particularly where GPs cited time-saving benefits, improved workflow, and reduced cognitive burden. Many GPs expressed buoyancy about the capacity for these tools to &#x201C;help efficiency&#x201D; in clinical notes; for example:</p><disp-quote><p>Completely changed my enjoyment of job &#x2013; means I can focus on pt [patient] rather than busily typing away.</p><attrib>#311, female, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Useful to speed up documentation.</p><attrib>#54, female, 36&#x2010;45 y, used</attrib></disp-quote><p>Notably, many GPs specifically mentioned &#x201C;Heidi&#x201D; as an ambient clinical AI integrated into NHS trusts, which automates medical scribing, documentation, and patient data management with the aim of reducing administrative burdens for GPs. Respondents explicitly mentioned their positive experiences with this tool;</p><disp-quote><p>Heidi Health has been incredible for generating excellent documentation for consultations that improve communication and reduce my workload as GP. Very accurate with medical jargon used.</p><attrib>#1017, male, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Using Heidi as an ambient AI has made a massive improvement on my working efficiency and quality of notes.</p><attrib>#252, male, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Heidi is fabulous. It saves me an hour a day but also a lot of brain ache...</p><attrib>#819, male, 46&#x2010;55 y, used</attrib></disp-quote><p>However, some GPs expressed more reserved views about the role of AI in documentation, acknowledging the benefits while maintaining a cautious perspective:</p><disp-quote><p>Saves time transcribing consultation and generating referral letters. Not perfect but helps.</p><attrib>#1043, female, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>Find it good for note taking and I guess about 90% accurate.</p><attrib>#310, male, 46&#x2010;55 y, used</attrib></disp-quote><disp-quote><p>I find that using Heidi intermittently will significantly reduce my documentation time. You do need to read the transcription prior to use in the notes [&#x2026;] but otherwise I find it very useful especially with more complex histories or mental health patients who may take longer consulting times.</p><attrib>#160, female, 36&#x2010;45 y, used</attrib></disp-quote><p>Finally, a number of GPs specifically mentioned the role of GenAI in drafting patient-facing letters and improving communication, including for bridging language barriers; for example:</p><disp-quote><p>Good for doing letters and complaint responses.</p><attrib>#758, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>Have found it helpful to re-phrase content sent to patients to make it easier to understand or remove jargon.</p><attrib>#967, male, 46&#x2010;55 y, used</attrib></disp-quote><disp-quote><p>It helped me with communication and good language with the patients as English is not my first language.</p><attrib>#57, female, 35 y or younger, have not used</attrib></disp-quote></sec><sec id="s3-4-2"><title>Questioning AI&#x2019;s Clinical Judgment</title><p>Despite GPs&#x2019; optimism about the role of GenAI in documentation, most GPs were cynical or expressed reservations about the role of these tools in replacing doctors&#x2019; clinical judgment. Notably, however, it was unclear if these judgments encompassed user experiences; for example:</p><disp-quote><p>AI cannot replace the spidey sense of a GP to know that something is wrong.</p><attrib>#32, male, 36&#x2010;45 y, used</attrib></disp-quote><disp-quote><p>It has the potential to aid in management of patients but there is a long way to go for its diagnostic accuracy.</p><attrib>#1034, female, 36&#x2010;45 y, have not used</attrib></disp-quote><disp-quote><p>There are times in my profession I used my &#x201C;GUT&#x201D; and it has been pretty good to me so far. People do not fit into boxes so I think AI will work on probabilities.</p><attrib>#933, male, 36&#x2010;45 y, have not used</attrib></disp-quote></sec><sec id="s3-4-3"><title>Operational Uncertainty</title><p>While less frequently mentioned, some participants stressed that AI must be &#x201C;very carefully implemented&#x201D; and require &#x201C;clinical oversight,&#x201D; highlighting the need for clearer divisions of labor between AI and doctors. One GP highlighted the following concern relating to doctors&#x2019; overreliance on AI:</p><disp-quote><p>I use ChatGPT frequently and it has enormous potential and is very useful but it needs sense checking and it could cause complacency in busy overstretched healthcare professionals. When you start using it, it appears to have a solution to many problems and it also converses with you in a way that feels authentically human. These are impressive qualities but it should only be seen and used to augment rather than replace effort.</p><attrib>#922, male, 46&#x2010;55 y, used</attrib></disp-quote><p>Taking a different angle on operational uncertainties, a minority voiced concerns about technopolitical shifts and the broader trajectory of AI&#x2019;s impact. For example:</p><disp-quote><p>Too many billionaires controlling our work as it is. Stuff AI doing turning us into robots.</p><attrib>#324, male, 46&#x2010;55 y, have not used</attrib></disp-quote><disp-quote><p>I believe AI is a Pandora&#x2019;s box that has already been opened. We lack the safeguards or infrastructure to control its possible (and potentially catastrophic) effects.</p><attrib>#209, male, 46&#x2010;55 y, have not used</attrib></disp-quote></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This qualitative study offers timely insights into how GPs in the United Kingdom perceive the role of GenAI in primary care. While the analysis captures broad patterns and recurring sentiments, it is based on brief, free-text responses and should be understood as a high-level exploration rather than an in-depth account of underlying motivations or complex experiences.</p><p>Comments were classified into three major themes in relation to GenAI: (1) unfamiliarity, (2) ambivalence and anxiety, and (3) role in clinical tasks. Many GPs reported a lack of experience and familiarity with GenAI, and a need for training tailored to primary care. They also expressed a spectrum of views on GenAI in clinical practice, ranging from optimism to deep skepticism. While some saw GenAI as a promising tool for reducing administrative burden (particularly in documentation), others voiced concerns about its accuracy, potential to erode human connection, and unclear medico-legal implications. Given that GenAI users were more likely to provide comments, it is possible that certain benefits, particularly those related to documentation and workflow, may be somewhat overrepresented in the thematic analysis. In addition, widespread uncertainty about AI&#x2019;s role in primary care, alongside ethical and regulatory concerns, underscored GPs&#x2019; reported need for structured guidance. This uncertainty, particularly around usefulness and ease of use, aligns with constructs from TAM [<xref ref-type="bibr" rid="ref17">17</xref>], which posits that these perceptions are central to whether users adopt or reject new technologies.</p><p>This qualitative study supports other research indicating that GPs have limited formal training in AI, which directly influences their perceived ease of use&#x2014;a central construct of TAM. The comments reflecting confusion, lack of confidence, or the need for guidance reveal how uncertainty about how to interact with GenAI tools may reduce behavioral intention to adopt them [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. For example, a recent survey of 175 UK medical students found little awareness of formal AI or data science training in their curriculum, while 92% of 210 trainee doctors felt their AI education was inadequate. In our study, lack of familiarity with these tools was clear, with many GPs reporting minimal exposure to GenAI, stating they had no experience or understanding of how these technologies function; furthermore, this unfamiliarity was closely linked to GPs&#x2019; concerns about insufficient training. Online training is now available via the NHS Learning Hub. Certification as an &#x201C;Artificial Intelligence Practitioner&#x201D; is offered [<xref ref-type="bibr" rid="ref27">27</xref>], and the Medical Schools Council has called for AI training integration and proposed learning competencies [<xref ref-type="bibr" rid="ref28">28</xref>]. Our findings emphasize the critical need for AI-related training, which is both essential and highly sought after by GPs. From a TAM perspective, improving training could enhance both perceived ease of use and perceived usefulness (2 key factors) in shaping future adoption of GenAI tools in clinical practice [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. Our findings also highlight the importance of developing innovative, adaptive training approaches. Given the rapid evolution of AI, GPs may be hesitant to invest time in education that risks becoming quickly outdated. Our results suggest that GPs seek training that clarifies the parameters of AI use, provides insights into its strengths and limitations, and offers accessible advice on staying updated as the technology advances.</p><p>GPs in our study expressed highly divergent views on GenAI in primary care, with opinions ranging from deep skepticism and existential anxiety about their job to excitement. These findings echo previous research, which shows physicians have mixed opinions about AI [<xref ref-type="bibr" rid="ref23">23</xref>]; again, this broad spectrum of expectations may be indicative of a lack of formal education. These ambivalent or anxious attitudes, ranging from existential fears to cautious optimism, map onto TAM&#x2019;s concept of attitudes toward technology, which in turn shape behavioral intentions. Where GPs felt hopeful, behavioral intention appeared stronger; where they expressed anxiety or confusion, adoption seemed less likely [<xref ref-type="bibr" rid="ref29">29</xref>]. In addition, GPs&#x2019; varied interpretations of what GenAI entails may have shaped the divergence in views, contributing to both inflated concerns and enthusiastic projections.</p><p>Similar to previous research, GPs worried about the potential for AI to erode human connection and to undermine empathy, depersonalizing care [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]; this perspective is also common among leading medical commentators [<xref ref-type="bibr" rid="ref31">31</xref>]. However, these views contrast with emerging research suggesting that GenAI may enhance, rather than diminish, compassionate care. In a blinded study, ChatGPT&#x2019;s responses were rated as 10 times more empathetic than those of physicians, challenging assumptions about AI&#x2019;s limitations in patient communication [<xref ref-type="bibr" rid="ref6">6</xref>]. Other research suggests that AI can play a role in assisting clinicians to write empathetic communication with patients [<xref ref-type="bibr" rid="ref32">32</xref>] and that patients cannot discern when AI is responding [<xref ref-type="bibr" rid="ref33">33</xref>]. While more research is needed to explore patients&#x2019; opinions about using AI in clinical care, emerging studies suggest that at least some patients are turning to these tools for support [<xref ref-type="bibr" rid="ref34">34</xref>]. Few participants commented on how AI and GPs might collaborate more effectively. The concept of AI as a &#x201C;co-pilot&#x201D; is an increasingly proposed model, drawing parallels to how airline pilots work alongside autopilot systems in aviation [<xref ref-type="bibr" rid="ref35">35</xref>]. Others dispute the seamless integration of AI and doctors [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>], challenging this assumption.</p><p>Many expressed concerns about AI&#x2019;s accountability and data privacy. These are ongoing concerns. Implemented in February 2025, the European Union&#x2019;s AI Act, the most comprehensive global legislation on AI, designates consumer GenAI tools as &#x201C;high risk&#x201D; due to concerns over cybersecurity, accuracy, reliability, and transparency [<xref ref-type="bibr" rid="ref38">38</xref>]. Most LLMs do not claim clinical suitability, and the GMC advises physicians to apply professional judgment when using AI or emerging technologies in practice [<xref ref-type="bibr" rid="ref28">28</xref>]. Our findings suggest that this guidance is insufficient to alleviate GPs&#x2019; concerns about accountability, privacy, and when it is appropriate to use these tools. Without a firm legal grounding, GPs may take on liability for AI errors and so place themselves at risk.</p><p>Notably, however, GPs were buoyant about the role for GenAI chatbots and ambient AI, particularly the medical AI scribe &#x201C;Heidi Health,&#x201D; in assisting with documentation. This finding aligns with previous surveys suggesting that doctors perceive AI as beneficial in handling administrative tasks [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Emerging studies also suggest AI may improve workflow efficiency [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]; some preliminary studies suggest it might reduce burnout [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. However, the risk of GenAI hallucinations and errors raises concerns about the need for clinical oversight. These concerns may temper perceived usefulness&#x2014;an essential driver of adoption according to TAM [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]&#x2014;and reinforce GPs&#x2019; hesitation to rely on GenAI tools for clinical decision-making.</p><p>In our study, GPs questioned whether AI could ever replace the intuitive and experiential judgment of an experienced GP, emphasizing the irreplaceable nature of human clinical reasoning. While AI tools are not intended to replace clinical judgment, emerging evidence underscores their remarkable capabilities in differential diagnosis, with several studies demonstrating that GenAI can outperform physicians in certain diagnostic tasks [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. However, many GPs may be correct in remaining unsure of the added value of AI-driven diagnostic support, especially if actual clinical care is based on real-world data and decisions are made on complex information instead of simple vignettes.</p><p>Notably, few GPs highlighted how GenAI tools might enhance patient access to medical information, suggesting a limited awareness of their potential role in patient empowerment. This limited awareness may reflect early-stage exposure, where both perceived usefulness and ease of use remain unclear, further influencing low behavioral intention to integrate such tools (again, consistent with TAM) [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. In addition, despite well-documented concerns about algorithmic fairness and reliability, there was scarce mention of the risks of bias inherent in these AI-driven systems [<xref ref-type="bibr" rid="ref10">10</xref>]. This lack of engagement with both the opportunities and risks of GenAI suggests a broader gap in discourse and critical evaluation of AI&#x2019;s value in primary care, underscoring the need for more comprehensive discussions on its integration and ethical implications.</p></sec><sec id="s4-2"><title>Strengths and Limitations</title><p>This study represents the largest investigation to date into the perspectives of physicians on the integration of GenAI in clinical practice. Using a web-based survey may have encouraged more candid feedback, as evidenced by the strength of participants&#x2019; responses. As previously noted [<xref ref-type="bibr" rid="ref18">18</xref>], although our sample aligned with the UK GP population in terms of age and regional distribution, as reflected in the GMC Registry, our sample was not gender-representative [<xref ref-type="bibr" rid="ref44">44</xref>]. While the national GP workforce comprises more women than men (58% vs 42%), our sample had an equal gender split.</p><p>However, the study has limitations related to the use of a nonprobability sample via the Doctors website. We also acknowledge that the qualitative data analyzed originates from a self-selected subset of respondents who chose to provide free-text comments. This may introduce self-selection bias, potentially limiting the generalizability of the findings. Notably, in our study, those who used GenAI tools were more likely to have responded to the open comment section, and this likely influenced the valence of responses, potentially in a direction that was more positive toward AI. In addition, the web-based survey limited the response length and the opportunity for more nuanced insights. As noted, our thematic coding could be considered as resulting in high-level categorization of surface-level views rather than a deep interpretive analysis of participants&#x2019; underlying motivations or complex experiences. Finally, the survey provided a definition and examples of GenAI tools (eg, ChatGPT [OpenAI] and Gemini [Google AI]); however, respondents&#x2019; interpretations of the term may have varied based on their individual experiences and familiarity with AI technologies&#x2014;a common challenge in survey research on emerging digital tools.</p><p>While participant validation was not feasible due to the anonymous survey design, we observed strong repetition of core ideas across responses, suggesting thematic saturation despite the brevity of comments. In addition, the qualitative themes identified here are broadly consistent with the quantitative findings from the same survey (reported in a separate manuscript currently under review) [<xref ref-type="bibr" rid="ref18">18</xref>]. Although that manuscript is not yet published, this alignment provides indirect support for the trustworthiness of our interpretations.</p><p>We recommend ongoing qualitative research to explore GPs&#x2019; perspectives on GenAI. More robust, nationally representative surveys are needed to provide a clearer understanding of physicians&#x2019; experiences and concerns. To gain deeper insight into GPs&#x2019; use of GenAI tools, future research should incorporate semistructured interviews. This qualitative approach could help to uncover nuanced motivations, perceived benefits, and concerns that may not emerge from online survey data alone. In addition, integrating objective measures, such as time-motion studies or electronic health record analytics, could better assess GenAI&#x2019;s practical impacts on workload and clinical decision-making. Such comprehensive studies would provide a richer understanding of GenAI adoption, informing strategies to optimize its integration into primary care practice. Beyond these recommendations, we strongly emphasize the importance of extensive survey research to explore patient perspectives on the benefits, risks, and opportunities associated with these tools, including how they perceive physicians who use these tools.</p></sec><sec id="s4-3"><title>Conclusions</title><p>This study offers timely and valuable insights into GPs&#x2019; perspectives on the role, impact, and limitations of GenAI in primary care. A dominant theme among GPs was their limited experience and training with generative and ambient AI, despite recognizing its potential benefits for clinical documentation. Beyond this, GPs expressed ambivalence and concern about the broader impact of GenAI in primary care. Many were skeptical of its value and implications for their profession, while others remained uncertain or cautiously optimistic about its potential to transform health care.</p><p>Now, 2 years after the widespread introduction of GenAI, the ongoing lack of understanding and training among GPs remains a critical concern, potentially hindering effective and responsible implementation. Addressing these gaps through better, targeted education and structured integration efforts will be essential to harnessing the full potential of GenAI tools in primary care. Future research, particularly more extensive qualitative studies, is needed to provide a deeper, more nuanced understanding of GPs&#x2019; evolving attitudes about, and experiences with, GenAI.</p></sec></sec></body><back><ack><p>The authors thank Nicola Miles for support in the administration of this survey, and the general practitioners who offered feedback on a draft of the survey. This study was supported by a Forte Grant (artificial intelligence in Healthcare Unleashed, #2024-&#x2010;00039) (CB and CGS), and by the Faculty of Psychology, University of Basel, Switzerland (JG and CL). BM is also funded by National Institute for Health and Care Research (NIHR) Advanced Fellowship (NIHR300887). The views expressed are those of the authors and not necessarily those of the National Health Service, the NIHR, or the Department of Health and Social Care.</p></ack><notes><sec><title>Data Availability</title><p>Deidentified raw data are available in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p></sec></notes><fn-group><fn fn-type="conflict"><p>CB is Associate Editor of <italic>JMIR Mental Health</italic>, and JT is Editor-in-Chief of <italic>JMIR Mental Health</italic>. The other authors declare no conflicts of interest.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CHERRIES</term><def><p>Checklist for Reporting Results of Internet E-Surveys</p></def></def-item><def-item><term id="abb3">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb4">GenAI</term><def><p>generative artificial intelligence</p></def></def-item><def-item><term id="abb5">GMC</term><def><p>General Medical Council</p></def></def-item><def-item><term id="abb6">GP</term><def><p>general practitioner</p></def></def-item><def-item><term id="abb7">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb8">NHS</term><def><p>National Health Service</p></def></def-item><def-item><term id="abb9">TAM</term><def><p>technology acceptance model</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doshi</surname><given-names>GK</given-names> </name><name name-style="western"><surname>Jensen</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Graziano</surname><given-names>A</given-names> </name><name name-style="western"><surname>Enenmoh</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lindsey</surname><given-names>J</given-names> </name></person-group><article-title>Use of ambient AI scribing: Impact on physician administrative burden and patient care</article-title><source>JCO Oncol Pract</source><year>2024</year><month>10</month><volume>20</volume><issue>10_suppl</issue><fpage>418</fpage><lpage>418</lpage><pub-id pub-id-type="doi">10.1200/OP.2024.20.10_suppl.418</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haberle</surname><given-names>T</given-names> </name><name name-style="western"><surname>Cleveland</surname><given-names>C</given-names> </name><name name-style="western"><surname>Snow</surname><given-names>GL</given-names> </name><etal/></person-group><article-title>The impact of nuance DAX ambient listening AI documentation: a cohort study</article-title><source>J Am Med Inform Assoc</source><year>2024</year><month>04</month><day>3</day><volume>31</volume><issue>4</issue><fpage>975</fpage><lpage>979</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocae022</pub-id><pub-id pub-id-type="medline">38345343</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>S&#x00E1;nchez-Rosenberg</surname><given-names>G</given-names> </name><name name-style="western"><surname>Magn&#x00E9;li</surname><given-names>M</given-names> </name><name name-style="western"><surname>Barle</surname><given-names>N</given-names> </name><etal/></person-group><article-title>ChatGPT-4 generates orthopedic discharge documents faster than humans maintaining comparable quality: a pilot study of 6 cases</article-title><source>Acta Orthop</source><year>2024</year><month>03</month><day>21</day><volume>95</volume><fpage>152</fpage><lpage>156</lpage><pub-id pub-id-type="doi">10.2340/17453674.2024.40182</pub-id><pub-id pub-id-type="medline">38597205</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baker</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Dwyer</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kalidoss</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hynes</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wolf</surname><given-names>J</given-names> </name><name name-style="western"><surname>Strelzow</surname><given-names>JA</given-names> </name></person-group><article-title>ChatGPT&#x2019;s ability to assist with clinical documentation: a randomized controlled trial</article-title><source>J Am Acad Orthop Surg</source><year>2024</year><month>02</month><day>1</day><volume>32</volume><issue>3</issue><fpage>123</fpage><lpage>129</lpage><pub-id pub-id-type="doi">10.5435/JAAOS-D-23-00474</pub-id><pub-id pub-id-type="medline">37976385</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kharko</surname><given-names>A</given-names> </name><name name-style="western"><surname>McMillan</surname><given-names>B</given-names> </name><name name-style="western"><surname>Hagstr&#x00F6;m</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Generative artificial intelligence writing open notes: a mixed methods assessment of the functionality of GPT 3.5 and GPT 4.0</article-title><source>Digit Health</source><year>2024</year><volume>10</volume><fpage>20552076241291384</fpage><pub-id pub-id-type="doi">10.1177/20552076241291384</pub-id><pub-id pub-id-type="medline">39493632</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayers</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Poliak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title><source>JAMA Intern Med</source><year>2023</year><month>06</month><day>1</day><volume>183</volume><issue>6</issue><fpage>589</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id><pub-id pub-id-type="medline">37115527</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Gallo</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hom</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Large language model influence on diagnostic reasoning: a randomized clinical trial</article-title><source>JAMA Netw Open</source><year>2024</year><month>10</month><day>1</day><volume>7</volume><issue>10</issue><fpage>e2440969</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.40969</pub-id><pub-id pub-id-type="medline">39466245</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Brodeur</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Buckley</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Kanjee</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Superhuman performance of a large language model on the reasoning tasks of a physician</article-title><source>arXiv</source><comment>Preprint posted online on 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2412.10849</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Katz</surname><given-names>U</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>E</given-names> </name><name name-style="western"><surname>Shachar</surname><given-names>E</given-names> </name><etal/></person-group><article-title>GPT versus resident physicians &#x2014; a benchmark based on official board scores</article-title><source>NEJM AI</source><year>2024</year><month>04</month><day>25</day><volume>1</volume><issue>5</issue><pub-id pub-id-type="doi">10.1056/AIdbp2300192</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zack</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lehman</surname><given-names>E</given-names> </name><name name-style="western"><surname>Suzgun</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: a model evaluation study</article-title><source>Lancet Digit Health</source><year>2024</year><month>01</month><volume>6</volume><issue>1</issue><fpage>e12</fpage><lpage>e22</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(23)00225-X</pub-id><pub-id pub-id-type="medline">38123252</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>ChatGPT and mental healthcare: balancing benefits with risks of harms</article-title><source>BMJ Ment Health</source><year>2023</year><month>11</month><volume>26</volume><issue>1</issue><fpage>e300884</fpage><pub-id pub-id-type="doi">10.1136/bmjment-2023-300884</pub-id><pub-id pub-id-type="medline">37949485</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name></person-group><article-title>Open AI meets open notes: surveillance capitalism, patient privacy and online record access</article-title><source>J Med Ethics Institute of Medical Ethics</source><year>2024</year><access-date>2024-01-13</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/jme-2023-109574">https://doi.org/10.1136/jme-2023-109574</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>IG</given-names> </name></person-group><article-title>What should ChatGPT mean for bioethics?</article-title><source>Am J Bioeth</source><year>2023</year><month>10</month><volume>23</volume><issue>10</issue><fpage>8</fpage><lpage>16</lpage><pub-id pub-id-type="doi">10.1080/15265161.2023.2233357</pub-id><pub-id pub-id-type="medline">37440696</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Worthen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>Psychiatrists&#x2019; experiences and opinions of generative artificial intelligence in mental healthcare: an online mixed methods survey</article-title><source>Psychiatry Res</source><year>2024</year><month>03</month><volume>333</volume><fpage>115724</fpage><pub-id pub-id-type="doi">10.1016/j.psychres.2024.115724</pub-id><pub-id pub-id-type="medline">38244285</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bright</surname><given-names>J</given-names> </name><name name-style="western"><surname>Enock</surname><given-names>F</given-names> </name><name name-style="western"><surname>Esnaashari</surname><given-names>S</given-names> </name><name name-style="western"><surname>Francis</surname><given-names>J</given-names> </name><name name-style="western"><surname>Hashem</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Morgan</surname><given-names>D</given-names> </name></person-group><article-title>Generative AI is already widespread in the public sector: evidence from a survey of UK public sector professionals</article-title><source>Digit Gov: Res Pract</source><year>2025</year><month>03</month><day>31</day><volume>6</volume><issue>1</issue><fpage>1</fpage><lpage>13</lpage><pub-id pub-id-type="doi">10.1145/3700140</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Locher</surname><given-names>C</given-names> </name><name name-style="western"><surname>Gaab</surname><given-names>J</given-names> </name><name name-style="western"><surname>H&#x00E4;gglund</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mandl</surname><given-names>KD</given-names> </name></person-group><article-title>Generative artificial intelligence in primary care: an online survey of UK general practitioners</article-title><source>BMJ Health Care Inform</source><year>2024</year><month>09</month><day>17</day><volume>31</volume><issue>1</issue><fpage>e101102</fpage><pub-id pub-id-type="doi">10.1136/bmjhci-2024-101102</pub-id><pub-id pub-id-type="medline">39288998</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>FD</given-names> </name></person-group><article-title>Perceived usefulness, perceived ease of use, and user acceptance of information technology</article-title><source>MIS Q</source><year>1989</year><month>09</month><volume>13</volume><issue>3</issue><fpage>319</fpage><pub-id pub-id-type="doi">10.2307/249008</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Hagstr&#x00F6;m</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sanchez</surname><given-names>CG</given-names> </name><etal/></person-group><source>General Practitioners&#x2019; Experiences with Generative Artificial Intelligence in the UK: An Online Survey</source><year>2025</year><access-date>2025-03-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.researchsquare.com/article/rs-6196250/latest">https://www.researchsquare.com/article/rs-6196250/latest</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sandelowski</surname><given-names>M</given-names> </name></person-group><article-title>What&#x2019;s in a name? Qualitative description revisited</article-title><source>Res Nurs Health</source><year>2010</year><month>02</month><volume>33</volume><issue>1</issue><fpage>77</fpage><lpage>84</lpage><pub-id pub-id-type="doi">10.1002/nur.20362</pub-id><pub-id pub-id-type="medline">20014004</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sandelowski</surname><given-names>M</given-names> </name></person-group><article-title>Whatever happened to qualitative description?</article-title><source>Res Nurs Health</source><year>2000</year><month>08</month><volume>23</volume><issue>4</issue><fpage>334</fpage><lpage>340</lpage><pub-id pub-id-type="doi">10.1002/1098-240x(200008)23:4&#x003C;334::aid-nur9&#x003E;3.0.co;2-g</pub-id><pub-id pub-id-type="medline">10940958</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Joffe</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yardley</surname><given-names>L</given-names> </name></person-group><article-title>Content and thematic analysis</article-title><source>Research Methods for Clinical and Health Psychology</source><year>2003</year><publisher-name>SAGE Publications</publisher-name><fpage>56</fpage><lpage>68</lpage></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>FD</given-names> </name></person-group><article-title>Perceived usefulness, perceived ease of use, and user acceptance of information technology</article-title><source>Inf Seek Behav Technol Adopt</source><year>1989</year><volume>205</volume><issue>219</issue><pub-id pub-id-type="doi">10.2307/249008</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kaptchuk</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Mandl</surname><given-names>KD</given-names> </name><name name-style="western"><surname>Halamka</surname><given-names>JD</given-names> </name><name name-style="western"><surname>DesRoches</surname><given-names>CM</given-names> </name></person-group><article-title>Artificial intelligence and the future of primary care: exploratory qualitative study of UK general practitioners&#x2019; views</article-title><source>J Med Internet Res</source><year>2019</year><month>03</month><day>20</day><volume>21</volume><issue>3</issue><fpage>e12802</fpage><pub-id pub-id-type="doi">10.2196/12802</pub-id><pub-id pub-id-type="medline">30892270</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kharko</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Computerization of the work of general practitioners: mixed methods survey of final-year medical students in Ireland</article-title><source>JMIR Med Educ</source><year>2023</year><month>03</month><day>20</day><volume>9</volume><fpage>e42639</fpage><pub-id pub-id-type="doi">10.2196/42639</pub-id><pub-id pub-id-type="medline">36939809</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinto Dos Santos</surname><given-names>D</given-names> </name><name name-style="western"><surname>Giese</surname><given-names>D</given-names> </name><name name-style="western"><surname>Brodehl</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Medical students&#x2019; attitude towards artificial intelligence: a multicentre survey</article-title><source>Eur Radiol</source><year>2019</year><month>04</month><volume>29</volume><issue>4</issue><fpage>1640</fpage><lpage>1646</lpage><pub-id pub-id-type="doi">10.1007/s00330-018-5601-1</pub-id><pub-id pub-id-type="medline">29980928</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kharko</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Machine learning in medical education: a survey of the experiences and opinions of medical students in Ireland</article-title><source>BMJ Health Care Inform</source><year>2022</year><month>02</month><volume>29</volume><issue>1</issue><fpage>e100480</fpage><pub-id pub-id-type="doi">10.1136/bmjhci-2021-100480</pub-id><pub-id pub-id-type="medline">35105606</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>NHS</collab></person-group><source>Artif Intell</source><year>2025</year><access-date>2025-03-06</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://learninghub.nhs.uk/catalogue/ai/about">https://learninghub.nhs.uk/catalogue/ai/about</ext-link></comment></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>General Medical Council</collab></person-group><source>Artificial intelligence and innovative technologies</source><year>2025</year><access-date>2025-03-06</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.gmc-uk.org/professional-standards/learning-materials/artificial-intelligence-and-innovative-technologies">https://www.gmc-uk.org/professional-standards/learning-materials/artificial-intelligence-and-innovative-technologies</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>FD</given-names> </name><name name-style="western"><surname>Grani&#x0107;</surname><given-names>A</given-names> </name></person-group><source>The Technology Acceptance Model: 30 Years of TAM</source><year>2024</year><publisher-name>Springer International Publishing</publisher-name><pub-id pub-id-type="doi">10.1007/978-3-030-45274-2</pub-id><pub-id pub-id-type="other">978-3-030-45273-5</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Locher</surname><given-names>C</given-names> </name><name name-style="western"><surname>Leon-Carlyle</surname><given-names>M</given-names> </name><name name-style="western"><surname>Doraiswamy</surname><given-names>M</given-names> </name></person-group><source>Artificial Intelligence and the Future of Psychiatry: Qualitative Findings from a Global Physician Survey</source><year>2020</year><volume>6</volume><publisher-name>Digit Health SAGE Publications Sage UK: London, England</publisher-name><pub-id pub-id-type="doi">10.1177/2055207620968355</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>Machines and empathy in medicine</article-title><source>The Lancet</source><year>2023</year><month>10</month><volume>402</volume><issue>10411</issue><fpage>1411</fpage><pub-id pub-id-type="doi">10.1016/S0140-6736(23)02292-4</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>IW</given-names> </name><name name-style="western"><surname>Miner</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Atkins</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Althoff</surname><given-names>T</given-names> </name></person-group><article-title>Human&#x2013;AI collaboration enables more empathic conversations in text-based peer-to-peer mental health support</article-title><source>Nat Mach Intell</source><year>2023</year><volume>5</volume><issue>1</issue><fpage>46</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1038/s42256-022-00593-2</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Hatch</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Goodman</surname><given-names>ZT</given-names> </name><name name-style="western"><surname>Vowels</surname><given-names>L</given-names> </name><etal/></person-group><source>When ELIZA Meets Therapists: A Turing Test for the Heart and Mind</source><year>2025</year><volume>2</volume><publisher-name>PLOS Ment Health Public Library of Science</publisher-name><pub-id pub-id-type="doi">10.1371/journal.pmen.0000145</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Siddals</surname><given-names>S</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>Coxon</surname><given-names>A</given-names> </name></person-group><article-title>&#x201C;It happened to be the perfect thing&#x201D;: experiences of generative AI chatbots for mental health</article-title><source>Npj Ment Health Res</source><year>2024</year><month>10</month><day>27</day><volume>3</volume><issue>1</issue><fpage>48</fpage><pub-id pub-id-type="doi">10.1038/s44184-024-00097-4</pub-id><pub-id pub-id-type="medline">39465310</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sellen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Horvitz</surname><given-names>E</given-names> </name></person-group><article-title>The rise of the AI co-pilot: lessons for design from aviation and beyond</article-title><source>Commun ACM</source><year>2024</year><month>07</month><volume>67</volume><issue>7</issue><fpage>18</fpage><lpage>23</lpage><pub-id pub-id-type="doi">10.1145/3637865</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Kostopoulou</surname><given-names>O</given-names> </name><name name-style="western"><surname>Russo</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Keenan</surname><given-names>G</given-names> </name><name name-style="western"><surname>Delaney</surname><given-names>BC</given-names> </name><name name-style="western"><surname>Douiri</surname><given-names>A</given-names> </name></person-group><source>Information Distortion in Physicians&#x2019; Diagnostic Judgments</source><year>2012</year><volume>32</volume><publisher-name>Med Decis Making Sage Publications Sage CA</publisher-name><fpage>831</fpage><lpage>839</lpage></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kostopoulou</surname><given-names>O</given-names> </name><name name-style="western"><surname>Porat</surname><given-names>T</given-names> </name><name name-style="western"><surname>Corrigan</surname><given-names>D</given-names> </name><name name-style="western"><surname>Mahmoud</surname><given-names>S</given-names> </name><name name-style="western"><surname>Delaney</surname><given-names>BC</given-names> </name></person-group><article-title>Diagnostic accuracy of GPs when using an early-intervention decision support system: a high-fidelity simulation</article-title><source>Br J Gen Pract</source><year>2017</year><month>03</month><volume>67</volume><issue>656</issue><fpage>e201</fpage><lpage>e208</lpage><pub-id pub-id-type="doi">10.3399/bjgp16X688417</pub-id><pub-id pub-id-type="medline">28137782</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>European Commission</collab></person-group><source>AI Act</source><year>2025</year><access-date>2025-03-06</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai">https://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai</ext-link></comment></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Gaab</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Computerization and the future of primary care: a survey of general practitioners in the UK</article-title><source>PLoS ONE</source><year>2018</year><volume>13</volume><issue>12</issue><fpage>e0207418</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0207418</pub-id><pub-id pub-id-type="medline">30540791</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doraiswamy</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bodner</surname><given-names>K</given-names> </name></person-group><article-title>Artificial intelligence and the future of psychiatry: Insights from a global physician survey</article-title><source>Artif Intell Med</source><year>2020</year><month>01</month><volume>102</volume><fpage>101753</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2019.101753</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garcia</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Artificial intelligence-generated draft replies to patient inbox messages</article-title><source>JAMA Netw Open</source><year>2024</year><month>03</month><day>4</day><volume>7</volume><issue>3</issue><fpage>e243201</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.3201</pub-id><pub-id pub-id-type="medline">38506805</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tai-Seale</surname><given-names>M</given-names> </name><name name-style="western"><surname>Baxter</surname><given-names>SL</given-names> </name><name name-style="western"><surname>Vaida</surname><given-names>F</given-names> </name><etal/></person-group><article-title>AI-generated draft replies integrated into health records and physicians&#x2019; electronic communication</article-title><source>JAMA Netw Open</source><year>2024</year><month>04</month><day>1</day><volume>7</volume><issue>4</issue><fpage>e246565</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.6565</pub-id><pub-id pub-id-type="medline">38619840</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>McDuff</surname><given-names>D</given-names> </name><name name-style="western"><surname>Schaekermann</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tu</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Towards accurate differential diagnosis with large language models</article-title><source>arXiv</source><year>2023</year><access-date>2024-04-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2312.00164">http://arxiv.org/abs/2312.00164</ext-link></comment></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>General Medical Council</collab></person-group><article-title>Key stats from the medical register</article-title><source>The Register</source><year>2024</year><access-date>2025-03-06</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://gde.gmc-uk.org/">https://gde.gmc-uk.org/</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Survey.</p><media xlink:href="jmir_v27i1e74428_app1.docx" xlink:title="DOCX File, 334 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Informed consent.</p><media xlink:href="jmir_v27i1e74428_app2.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Detailed information about the variables and their categories.</p><media xlink:href="jmir_v27i1e74428_app3.docx" xlink:title="DOCX File, 470 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Raw qualitative data.</p><media xlink:href="jmir_v27i1e74428_app4.docx" xlink:title="DOCX File, 395 KB"/></supplementary-material><supplementary-material id="app5"><label>Checklist 1</label><p>CHERRIES (Checklist for Reporting Results of Internet E-Surveys).</p><media xlink:href="jmir_v27i1e74428_app5.docx" xlink:title="DOCX File, 24 KB"/></supplementary-material></app-group></back></article>