<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e81543</article-id>
      <article-id pub-id-type="pmid">41348460</article-id>
      <article-id pub-id-type="doi">10.2196/81543</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Barriers and Facilitators to Health Care AI Adoption Among Those Living in Wales and Working in Health Care in Wales: Online Survey</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Sarvestan</surname>
            <given-names>Javad</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Oyetunji</surname>
            <given-names>Oladayo </given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bievre</surname>
            <given-names>Nicolas</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Oluwole</surname>
            <given-names>Odumbo</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Pruski</surname>
            <given-names>Michal</given-names>
          </name>
          <degrees>MA, MSc, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>School of Health Science</institution>
            <institution>University of Manchester</institution>
            <addr-line>Oxford Road</addr-line>
            <addr-line>Manchester, M13 9PL</addr-line>
            <country>United Kingdom</country>
            <phone>44 1613066000</phone>
            <email>michal.pruski@postgrad.manchester.ac.uk</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7582-1418</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Woolley</surname>
            <given-names>Katherine E</given-names>
          </name>
          <degrees>MPH, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3743-9925</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Withers</surname>
            <given-names>Kathleen L</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9514-2025</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>CEDAR</institution>
        <institution>Cardiff and Vale University Health Board</institution>
        <addr-line>Cardiff</addr-line>
        <country>United Kingdom</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>School of Health Science</institution>
        <institution>University of Manchester</institution>
        <addr-line>Manchester</addr-line>
        <country>United Kingdom</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>School of Engineering</institution>
        <institution>Cardiff University</institution>
        <addr-line>Cardiff</addr-line>
        <country>United Kingdom</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>School of Medicine</institution>
        <institution>Cardiff University</institution>
        <addr-line>Cardiff</addr-line>
        <country>United Kingdom</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Michal Pruski <email>michal.pruski@postgrad.manchester.ac.uk</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>5</day>
        <month>12</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e81543</elocation-id>
      <history>
        <date date-type="received">
          <day>30</day>
          <month>7</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>21</day>
          <month>8</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>12</day>
          <month>11</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>13</day>
          <month>11</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Michal Pruski, Katherine E Woolley, Kathleen L Withers. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 05.12.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e81543" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>NHS Wales routinely collects patient-reported outcome measures, and these, together with other clinical data, offer an opportunity to design machine learning (ML) technologies that could advance the implementation of prudent health care principles (a health care strategy encouraged by the Welsh Government). However, the wide adoption of such technologies is not only dependent on the development of technically well-performing ML algorithms but also on end-user barriers and facilitators.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to identify potential end-user (patient and health care professional) barriers and facilitators to the use of ML in health care decision-making in Wales. The study’s objective was to provide actionable information for those who are developing and implementing ML technologies in health care, rather than contributing to the theoretical advance of technology implementation frameworks.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>An online survey using Microsoft Forms was conducted. It was open to anyone who was 16 years or older and lived in Wales (member of the public criterion) or was a registered health care professional working in Wales and participating in treatment or therapy decision-making (health care professional criterion). The anonymous survey was open from December 4, 2024, to March 4, 2025. The survey used single-choice, ranking, and free-text questions, which were phrased differently for both eligibility groups. Data analysis was based on the respondent-selected eligibility criterion and self-declared general attitude toward health care artificial intelligence (AI; generally supportive, opposed, or uncertain), using descriptive and inferential statistics, as well as a summary of free-text responses.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 309 respondents filled out the survey, 179 selecting the member of the public criterion and 130 selecting the health care professional criterion. Among them, 209 self-identified as having a generally supportive attitude toward health care AI, 31 as generally being opposed to health care AI, and 69 as being uncertain. Overall, respondents placed a large emphasis on the presence of evidence for the technology’s effectiveness and humans being in control of the health care process, even if this meant that care processes were not as fast as they could be with a higher degree of automation. Those with a negative attitude toward AI placed more emphasis on human autonomy than other respondent groups.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Those developing and implementing health care AI technologies should develop an unbiased evidence base for the effectiveness of their technologies, using transparent methodologies, and continue their evaluation when the technology is in place. Moreover, implementation should not decrease patient-clinician contact but automate specific tasks only and maintain a human in the loop.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>surveys and questionnaires</kwd>
        <kwd>trust</kwd>
        <kwd>public attitudes</kwd>
        <kwd>consumer health informatics</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Technology Adoption</title>
        <p>Technology adoption within health care is particularly complex, as it involves contingent innovation-decisions [<xref ref-type="bibr" rid="ref1">1</xref>], with technologies often requiring regulatory approval to permit the use of a technology, adoption of the technology by a specific organization, a decision to use the technology by a health care professional, and the consent to its use by the patient. Consequently, adoption decisions depend on technology, organizational, and personal factors [<xref ref-type="bibr" rid="ref2">2</xref>]. Furthermore, diffusion of innovation research has identified 5 adopter categories based on how soon they adopt a technology [<xref ref-type="bibr" rid="ref1">1</xref>], which are broadly likely to relate to “pro-AI” and “AI-cautious” groups identified in health informatics research [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>A wide range of theoretical frameworks have been applied to aid and assess digital health care technology implementation, with reports varying as to which framework is the most popular [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. While such frameworks use many distinct concepts, these concepts often interrelate across frameworks. For example, there is a clear overlap between concepts of “perceived ease of use” and “individual effort expectancy,” as well as “perceived usefulness” and “performance expectancy,” which are used respectively in the Technology Acceptance Model (TAM) and the Unified Theory of Acceptance and Use of Technology (UTAUT). Nevertheless, at the core of these frameworks is the ability to predict user acceptance [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
      </sec>
      <sec>
        <title>Importance of the End User Research in AI Adoption</title>
        <p>A briefing by a leading UK health think-tank noted that artificial intelligence (AI) adoption within health care needs to be driven by the public [<xref ref-type="bibr" rid="ref6">6</xref>]. Without end-user acceptance, theoretically useful digital solutions that fail to address user concerns and preferences might not realize their potential benefits. The briefing calls for the engagement of both patients and staff in the design and development of AI technologies [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
        <p>Such an approach is consistent with current UK health care research guidelines, which emphasize public involvement throughout the research lifecycle (including the methodology development phase) and highlight that public involvement increases the likelihood of research results being more useful and beneficial to patients [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. This is reflected in the National Institute for Health and Care Excellence’s (NICE) policy on public involvement in its work on clinical guidelines [<xref ref-type="bibr" rid="ref9">9</xref>].</p>
        <p>Importantly, many AI implementation questions cannot be answered from a purely technical perspective. For example, during the design or implementation of a machine learning (ML) technology, choices often need to be made to prioritize one aspect of the technology over another, such as when conflicts occur between privacy and efficacy, or efficacy and justice considerations [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. While organizations might have preferences over how these are balanced, ultimately if patients or clinicians disagree with this balance, the technology might not be adopted in practice.</p>
      </sec>
      <sec>
        <title>NHS Wales Considerations</title>
        <p>National Health Service (NHS) Wales consists of various organizations, which all use digital technologies. While some national solutions are provided by Digital Health and Care Wales (DHCW) and TEC Cymru, health boards and trusts often make their own choices related to technology adoption and also bear the consequences of these choices. As such, these organizations require information on their own populations to make these decisions effectively.</p>
        <p>However, Wales also has an extensive nationwide program for collecting patient-reported outcome measures (PROM) data as part of routine clinical practice. A previous literature review has shown that there is potential for such data to be used together with ML techniques to help predict patients’ postintervention outcomes [<xref ref-type="bibr" rid="ref12">12</xref>]. Such predictive health care ML technologies have the potential to provide information to patients and clinicians to help them make better-informed treatment and therapy decisions. Furthermore, ML technologies have the potential to aid the application of the prudent health care and value-based principles, which are encouraged in NHS Wales by the Welsh Government [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>Importantly, while UK-wide studies have looked at barriers and facilitators to health care AI adoption, these have been dominated by respondents from England [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. Since health care is a matter devolved to the Welsh Government, the NHS is organized and funded differently in Wales compared to England, and Wales has an older and more rural-based population than England; implementation of AI technologies in Wales is dependent on different geographical, organizational, and population factors [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. The only large-scale study undertaken in Wales on the adoption of AI in health care was the 2025 “Time to Talk Public Health” survey, which had 2137 respondents and included a single question on user acceptability of AI in breast screening, and ran concurrently to the survey described in this study [<xref ref-type="bibr" rid="ref19">19</xref>]. Nevertheless, that study did not look at specific barriers and facilitators to adoption nor the information assurances needed to make these clinical AI technologies acceptable to users, but only at attitudes toward and perceived impact of such use of AI.</p>
      </sec>
      <sec>
        <title>Research Questions and Study Contributions</title>
        <p>The study’s primary research question was: “What are the barriers and facilitators to ML adoption in clinical decision-making among end users in Wales?”</p>
        <p>The study’s secondary research question was: “Are there differences in barriers and facilitators to ML adoption between respondents with different attitudes toward AI?”</p>
        <p>To our best of our knowledge, this is the first all-Wales study looking at barriers and facilitators to AI adoption. It identifies barriers and facilitators to health care AI adoption in 2 distinct user populations: members of the public (as potential patients) and health care professionals who play a role in the treatment or therapy decision process. These results will offer guidance to both national and regional organizations on how to develop and implement such technologies.</p>
        <p>It is one of the few studies that compares the responses between groups with different attitudes toward AI. As such, it contributes toward a better understanding of how to make such technologies more acceptable to AI skeptics.</p>
        <p>The study questionnaire was designed with a focus on ML applications using PROM data to enable potential future benefits from the operationalization of such data within the therapeutic context. Nevertheless, we hope that the results of this study might be of use to a wider range of health care AI applications. While it was not an explicit aim of this study, it might also contribute to the broader literature on theories of technology adoption.</p>
        <p>Compared to other studies, the design of this research, while supported by a literature review, was largely driven by stakeholder input to ensure that it met the needs of the public and health care leaders. Additionally, this study used ranking technology methodology over rating methodology to provide more actionable information for those wanting to use these findings in practice.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Literature Search and Review</title>
        <p>A search strategy (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) was developed and run in MEDLINE ALL (Ovid) to identify relevant records. The search was also adapted and run in the following 5 databases: Embase (Ovid), The Cochrane Library, Scopus, IEEE Xplore, and ACM Digital Library. The searches were carried out on October 11, 2023. Records were imported into EndNote 20 and deduplicated. Two reviewers (MP and KEW) independently screened studies at title and abstract and full-text using EndNote. One reviewer (MP) assessed all records at title and abstract against the inclusion criteria (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Full texts were obtained and assessed by one reviewer (MP) against the inclusion criteria. At both stages, a second reviewer (KEW) checked all included records and 10% of excluded records, noting any discrepancies. Discrepancies were resolved through discussion. The searches retrieved 2044 records, with 1314 records remaining after deduplication. Following title and abstract screening, 134 records were assessed at full-text assessment. Of these, 74 records met the inclusion criteria and pertained to 74 studies. The reasons for the exclusion of the remaining 60 records are provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The included studies were reviewed by a single reviewer (MP) who extracted the barriers and facilitators identified in each publication, which were then grouped into themes. There were 22 themes, found across 67 publications, pertaining to staff, while 13 themes, found among 19 publications, related to patients (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Performance of the AI technology was the most prominent theme in both the staff and patient groups (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>); performance covers such aspects as algorithmic accuracy and practical usefulness.</p>
      </sec>
      <sec>
        <title>Questionnaire Creation</title>
        <p>Using the themes identified in the literature, together with a review of ethics principles in health care adoption and specific topic explorations undertaken as part of this project, initial sets of questions were developed [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>].</p>
        <p>Similar to the wider literature on barriers and facilitators to the adoption of AI in health care, the questionnaire was designed with a large focus on ethical considerations [<xref ref-type="bibr" rid="ref23">23</xref>]. Feedback was then sought on the draft question set from the patient and public involvement (PPI) group, stakeholder groups, and organizational colleagues with experience in research, survey design, or those who might be potential survey responders. After refinement, final feedback was sought from the Cardiff and Vale University Health Board’s Patient Experience Team. Once the English version of the questionnaire was approved, the questionnaire was translated into Welsh.</p>
        <p>To ensure that the findings from the study can be practicably applied, the questionnaire primarily used ranking and choice questions, as opposed to rating questions, even though rating questions have been commonly used in similar studies [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. While there is an ongoing debate surrounding the appropriateness of rating and ranking questions, with both having their advantages, the use of both methods tends to result in a similar order of item importance [<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref28">28</xref>]. Nevertheless, using rating questions can result in more actionable information, as it avoids ties and nondifferentiation between responses, though this can create bias when respondents genuinely do not have a preference [<xref ref-type="bibr" rid="ref26">26</xref>]. Ranking questions had the “shuffle” option enabled to reduce the chances of influence of the order in which the options were presented on participant ranking decisions. All questions were set as “required,” except the final questions, which asked for participants' contact preferences if they wanted to participate in the further qualitative part of the study.</p>
        <p>The initial draft consisted of 21 questions, including 4 demographic questions and 2 questions about the respondents’ general attitude toward AI based on a past report suggesting that this is a potentially useful way of clustering responders [<xref ref-type="bibr" rid="ref3">3</xref>]. Some questions differed between the two eligibility criteria groups. Because of the limitations of Microsoft Forms, we had to ensure that each ranking option had no more than 10 options, but PPI and steering group feedback indicated that we should ideally not have more than 5 options for each question. For the questions that focused on the prioritization of ethical principles, we used the set of principles identified by Jobin et al [<xref ref-type="bibr" rid="ref29">29</xref>], but discarded “trust” to narrow it to 10 principles. Trust was discarded, as stakeholders felt that user trust would be the general outcome of the correct application of the other 10 ethical principles. For other ranking questions, the list of potential options was narrowed down to 5 options, in the most extreme case, from a pool of 46 options, based on PPI and steering group feedback. In some cases, the reduction was achieved by selecting a statement of interest to the PPI and steering group, as well as amalgamating specific statements into more generic statements. The inclusion and exclusion of other questions were also guided by PPI and steering group feedback. To focus respondents on the use of ML in the context of prudent health care, a vignette outlining a hypothetical use of ML with PROM data was presented in the questionnaire’s introduction section and then again after the demographic question section. The questionnaire was kept as short as possible, while keeping it informative, to increase the chances of questionnaire completion and be considerate of participant privacy.</p>
        <p>The questionnaire was divided into 2 arms so that questions could be phrased in a way that was more relevant to members of the public in one arm and the health care professionals in the other arm. The questionnaire was anonymous, except when participants chose to leave their details for consideration in a further qualitative part of the project. The question inquiring about the respondents’ ethnicity was taken from the standard recommended set for Wales by the Office for National Statistics (ONS), and the age categories were adapted from ONS 6a and 6f categories to account for our target population [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>].</p>
        <p>The final English version of the questionnaire can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. This was also translated into Welsh, and both versions of the questionnaire were available on the same link.</p>
      </sec>
      <sec>
        <title>Public and Professional Steering Group</title>
        <p>The project had extensive PPI as well as input from a wider steering group. Two lay members sat on the PPI group and provided regular input into shaping the project and making sure the language used in the survey was appropriate and accessible. Moreover, a volunteer editorial panel from the Cardiff and Vale University Health Board’s Patient Experience Team also proofread the survey and provided comments. The project also benefited from the input of the project’s steering group, which consisted of a diverse range of professionals from the NHS, academia, and government. Additionally, the project team consulted other members of their department and staff experienced in survey design, working at the University Hospital of Wales, regarding various aspects of the questionnaire wording.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>The project’s documentation was first submitted to Cardiff and Vale University Health Board’s research and development department (protocol number 8870) and subsequently submitted to the national Integrated Research Application System (IRAS ID 345131). The project was approved by Health and Care Research Wales (24/HCRW/0021) but was deemed exempt from needing approval by a research ethics committee. Implied consent was used, as the voluntary questionnaire was anonymous, and all participants had to acknowledge the privacy statement before being allowed to proceed with the questionnaire (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). To maintain privacy, only members of the research team had access to the full dataset, and all outputs were assessed for statistical disclosure. Participants received no compensation for taking part in the survey.</p>
      </sec>
      <sec>
        <title>Participants</title>
        <p>The study used an open survey and convenience sampling. To be eligible to participate in the study, potential participants needed to have met at least one of two eligibility criteria: (1) members of the public living in Wales who were 16 years or older or (2) registered health care professionals working in Wales who make treatment or therapy decisions together with or for patients.</p>
        <p>Members of the public living in Wales are all potential patients of health care services delivered in Wales, and therefore, the study was not limited to those who were current patients. The lower age limit of 16 years was selected as, in general, those who are 16 years old are considered to have the capacity to consent for health care treatment in the United Kingdom.</p>
        <p>The second eligibility criterion was designed to allow those health care professionals who work in Wales but commute from England to also participate in the study. Because the study was a part of a wider research program focusing on improving value in health care and focused on the decision-making aspect of health care delivery, this criterion was limited to registered health care staff who participate in such decision-making.</p>
        <p>Respondents could only select one eligibility criterion, even if they met both.</p>
      </sec>
      <sec>
        <title>Questionnaire Deployment</title>
        <p>The questionnaire was deployed via the Microsoft Forms platform and hosted within the Cardiff and Vale University Health Board’s digital ecosystem, with the survey being voluntary and respondents not needing to fill it out to access any other part of the organization’s website. The links and QR codes to the questionnaire were distributed via social media, professional fora, messaging systems, and word of mouth. An advert was placed on our organizational website, and posters were placed throughout our organization. Advertising was undertaken in both English and Welsh; on social media, it was also disseminated in Polish. Key organizations and individuals were contacted to ask to distribute information about the study, including health care leaders, professional organizations, educational institutions, charities, religious and minority organizations, local government, governmental organizations, and members of the Senedd. The questionnaire was open from noon on December 4, 2024, to noon on March 4, 2025.</p>
        <p>Respondents did not receive any incentives to fill out the survey. Participants could not change their answers after the questionnaire was submitted but could revisit the questions before submission. The system did not record site visits or uncompleted questionnaire attempts and did not use cookies or monitor IP addresses to identify potential duplicate entries. Timestamps were not monitored for atypical responses.</p>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <p>Data from the survey were analyzed in Microsoft Excel using descriptive statistics, with statistical hypothesis testing undertaken in R (version 4.1.3; R Foundation for Statistical Computing) using chi-square with or without Monte Carlo simulated <italic>P</italic> values (<italic>B</italic>=2000) depending on the frequencies in the contingency tables and post hoc assessment for the chi-square test (Bonferroni method, with or without Monte Carlo simulated <italic>P</italic> values, <italic>B</italic>=2000). For comparisons with demographic comparators, the tests were done on percentages rather than counts, as counts were not available for the reference population. For demographic data, “prefer not to say” data were excluded from the analysis. For all nondemographic questions, a comparison was made only between those with either a positive or negative self-declared attitude toward AI, following the general advice to reduce the degrees of freedom for post hoc analysis [<xref ref-type="bibr" rid="ref33">33</xref>]. No inferential statistics were undertaken on the ranking results as ranks are not independent of each other [<xref ref-type="bibr" rid="ref26">26</xref>]. A <italic>P</italic> value of &#60;.05 was considered statistically significant. Respondent demographics were compared with those of the population of Wales and the NHS Wales workforce, as no statistics were available for the whole health care workforce for Wales. While the questionnaire asked respondents to choose their ethnicity using 18 groups listed by the ONS [<xref ref-type="bibr" rid="ref31">31</xref>], results were presented using 5 overarching ONS groups to provide a more accessible overview of the results [<xref ref-type="bibr" rid="ref31">31</xref>], and because StatsWales only provided this granularity of data [<xref ref-type="bibr" rid="ref34">34</xref>]. Similarly, while participants were asked to state the county in which they lived or worked, these results have been amalgamated to the territories covered by the 7 NHS Wales health boards for a more accessible overview of the results.</p>
        <p>The main subgroup analysis was carried out based on participants' self-reported attitude toward health care AI (support, oppose, or uncertain). All participants were asked to list the 3 most important barriers and facilitators to the adoption of such health care ML technologies, with this information collected via free-text fields. As such, themes had to be developed before statistical analysis could be performed on the answers to these questions. This step should not be confused with thematic analysis used in qualitative research but rather be seen as a method of accounting for potential variations in spelling, or because participants explained a concept rather than simply stating it.</p>
        <p>To aid with the free-text analysis, the responses to these questions, segregated by eligibility criteria and attitude toward health care AI, were imported into MAXQDA 24 (VERBI Software). In MAXQDA 24, the AI Assist “Suggest Subcodes” function was used with the suggestion language “English” and the “add bullet list with examples for each subcode” option selected on March 28, 2025. The generated subthemes were compiled and reviewed by one researcher (MP), and sense-checked by another colleague based on MAXQDA 24's original suggestions. One researcher (MP) used this list as a starting point for coding the free-text responses, but adjusted the list by generating new subthemes or not using redundant MAXQDA 24–generated subthemes. After MP coded 100% of the free-text responses, 20% of the responses were checked by a second researcher (KEW). Discrepancies were resolved via discussion, and both researchers then agreed on the final themes. MAXQDA 24 AI Assist was not used to code individual responses.</p>
        <p>The a priori analysis plan was to check how often specific barriers and facilitators were mentioned across all the questions, and how often specific barriers and facilitators were mentioned as the most important barrier or facilitator. If a respondent mentioned more than 3 items across the 3 free-text barrier questions and the 3 free-text facilitator questions, all of them would be included in the frequency count. If a respondent mentioned more than one item in the questions pertaining to the most important barrier and most important facilitator, it was a priori decided that the first item mentioned would be counted as the most important barrier or facilitator, unless the text indicated which of the mentioned items was to be considered as the most important.</p>
        <p>Since participants had to complete all questions (except contact detail questions), no responses were discarded, even if only token answers were provided to the open questions. No statistical methods were used to adjust the answers for any nonrepresentative characteristics.</p>
      </sec>
      <sec>
        <title>Checklists</title>
        <p>The CHERRIES (Checklist for Reporting Results of Internet E-Surveys) checklist has been used for this survey [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Respondent Demographics</title>
        <p>A total of 309 responses were collected. Respondent demographics are presented in <xref ref-type="table" rid="table1">Table 1</xref>, and additional health care staff demographics are presented in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. All respondents chose to fill out the questionnaire in English. The sex distribution did not differ significantly among the members of the public sample when compared to the reference populations (<xref ref-type="table" rid="table1">Table 1</xref>), but it did in the health care professional sample (<xref ref-type="table" rid="table1">Table 1</xref>). In the health care group, women were relatively underrepresented and men overrepresented (post hoc <italic>P</italic>=.04 for both sexes). Both ethnicity and age distribution did not differ significantly from the reference populations for both members of the public and health care professionals (<xref ref-type="table" rid="table1">Table 1</xref>). Among members of the public (<xref ref-type="table" rid="table1">Table 1</xref>), there was an overrepresentation of respondents from the region covered by the Cardiff and Vale University Health Board (post hoc <italic>P</italic>&#60;.001). There was no reliable comparator for health care professionals. At a more granular level, at least one response was received from members of the public living in each Welsh county, but not from health care staff working within each county. Most respondents self-reported as having, in general, a positive attitude toward health care AI (<xref ref-type="table" rid="table1">Table 1</xref>).</p>
        <p>Those who selected the eligibility criterion of being registered health care professionals working in Wales and being involved in therapeutic or treatment decision-making came from all major professional groups, with most (32/130, 24.6%) describing themselves as doctors or dentists, followed by health care scientists (29/130, 22.3%) (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). The largest self-reported primary areas of work were “other secondary care” and “diagnostics.” The majority of respondents declared that their primary patient groups were adults.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Respondent demographics.<sup>a</sup></p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="20"/>
            <col width="160"/>
            <col width="110"/>
            <col width="110"/>
            <col width="120"/>
            <col width="70"/>
            <col width="0"/>
            <col width="110"/>
            <col width="110"/>
            <col width="120"/>
            <col width="70"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Characteristics</td>
                <td colspan="5">Public (n=179)</td>
                <td colspan="4">Health care professionals (n=130)</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Values, n (%)</td>
                <td>Reference, %</td>
                <td>Chi-square (<italic>df</italic>)</td>
                <td><italic>P</italic> value</td>
                <td colspan="2">Values, n (%)</td>
                <td>Reference, %</td>
                <td>Chi-square (<italic>df</italic>)</td>
                <td><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Sex</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>0.9 (1)</td>
                <td>.34</td>
                <td colspan="2">
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>5.8 (1)</td>
                <td>.02</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Female</td>
                <td>102 (57.0)</td>
                <td>51.1</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">75 (57.7)<sup>b</sup></td>
                <td>76.6<sup>b</sup></td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Male</td>
                <td>71 (39.7)</td>
                <td>48.9</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">51 (39.2)<sup>b</sup></td>
                <td>23.4<sup>b</sup></td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Prefer not to say</td>
                <td>6 (3.6)</td>
                <td>N/A<sup>c</sup></td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">4 (3.1)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Ethnicity</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>1.2 (N/A)</td>
                <td>&#62;.99</td>
                <td colspan="2">
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>0.8 (N/A)</td>
                <td>&#62;.99</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>White</td>
                <td>161 (89.9)</td>
                <td>93.9</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">119 (91.5)</td>
                <td>79.7</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Asian or Asian British</td>
                <td>7 (4.0)</td>
                <td>3.0</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">4 (3.1)</td>
                <td>4.6</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Black, African, Caribbean, or Black British</td>
                <td>4 (2.3)</td>
                <td>0.8</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">2 (1.5)</td>
                <td>1.3</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mixed or multiple ethnic groups</td>
                <td>3 (1.7)</td>
                <td>1.6</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">1 (0.8)</td>
                <td>1.0</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other ethnic groups</td>
                <td>1 (0.6)</td>
                <td>1.3</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">1 (0.8)</td>
                <td>1.4</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Prefer not to say, not stated, or missing data</td>
                <td>3 (1.7)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">3 (2.3)</td>
                <td>12.0</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Age (years)</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>8.9 (5)</td>
                <td>.11</td>
                <td colspan="2">
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>4.8 (N/A)</td>
                <td>.29</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>16-24</td>
                <td>25 (14.0)</td>
                <td>14.0</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">5 (3.8)</td>
                <td>6.9</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>25-34</td>
                <td>42 (23.5)</td>
                <td>14.8</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">30 (23.1)</td>
                <td>23.8</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>35-49</td>
                <td>41 (22.9)</td>
                <td>21.1</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">62 (47.7)</td>
                <td>34.9</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>50-64</td>
                <td>44 (24.6)</td>
                <td>24.6</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">29 (22.3)</td>
                <td>32.5</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>65-74</td>
                <td>19 (10.6)</td>
                <td>13.8</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">2 (1.5)</td>
                <td>2.0</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>75 or over</td>
                <td>4 (2.2)</td>
                <td>11.7</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">0 (0)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Prefer not to say</td>
                <td>4 (2.2)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">2 (1.5)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Region</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>25.1 (N/A)</td>
                <td>&#60;.001</td>
                <td colspan="2">
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Aneurin Bevan UHB<sup>d</sup></td>
                <td>28 (15.6)</td>
                <td>18.8</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">6 (4.6)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Betsi Cadwaladr UHB</td>
                <td>14 (7.8)</td>
                <td>21.9</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">10 (7.7)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cardiff and Vale UHB</td>
                <td>83 (46.4)<sup>b</sup></td>
                <td>16.4<sup>b</sup></td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">85 (65.4)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cwm Taf Morgannwg UHB</td>
                <td>18 (10.1)</td>
                <td>14.1</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">2 (1.5)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hywel Dda UHB</td>
                <td>10 (5.6)</td>
                <td>12.3</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">4 (3.1)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Powys THB<sup>e</sup></td>
                <td>4 (2.2)</td>
                <td>4.2</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">3 (2.3)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Swansea Bay UHB</td>
                <td>22 (12.3)</td>
                <td>12.3</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">17 (13.1)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Prefer not to say</td>
                <td>0 (0)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">3 (2.3)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>General health care AI<sup>f</sup> attitude</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>N/A</td>
                <td>N/A</td>
                <td colspan="2">
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Support</td>
                <td>116 (65)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">93 (72)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Oppose</td>
                <td>23 (13)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">8 (6.0)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Uncertain</td>
                <td>40 (22)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td colspan="2">29 (22)</td>
                <td>N/A</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Comparators for sex were from the Office for National Statistics (ONS) 2021 Census and StatsWales September 30, 2023, data; note that StatsWales presented the categories as “men” and “women” [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Comparators for ethnicity were taken from the Welsh Government’s website report on the 2021 Census and StatsWales September 30, 2023, data [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Age comparators were taken from the ONS 2021 Census and StatsWales September 30, 2023, data; note that the ONS data only allowed for the calculation of a 15-24 category rather than 16-24, and the StatsWales data only allowed for the following comparator categories to be calculated: ≤25, then 26-35, 36-50, 51-65, and &#62;65 [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. The only comparator used for region data was the StatsWales population estimate by local authority for midyear 2023 [<xref ref-type="bibr" rid="ref42">42</xref>]. For post hoc test <italic>P</italic> values, see text.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>Statistically significant in post hoc analysis (see text for details).</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>N/A: not applicable.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>UHB: University Health Board.</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>THB: Teaching Health Board.</p>
            </fn>
            <fn id="table1fn6">
              <p><sup>f</sup>AI: artificial intelligence.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Demographics by AI Attitude</title>
        <p>There were no statistically significant differences between those with a positive and negative attitude toward AI when comparing sex (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>; <italic>χ</italic><sup>2</sup><sub>1</sub>=0.4, <italic>P</italic>=.53, and <italic>χ</italic><sup>2</sup>=1, <italic>P</italic>=.43, respectively), ethnicity (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>; <italic>χ</italic><sup>2</sup>=0.3, <italic>P</italic>=.75, and <italic>χ</italic><sup>2</sup>=0.03, <italic>P</italic>&#62;.99, respectively), and age (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>; <italic>χ</italic><sup>2</sup>=3.4, <italic>P</italic>=.64, and <italic>χ</italic><sup>2</sup>=1.9, <italic>P</italic>=.61, respectively) in both respondent groups. With respect to ethnicity, all ethnicities other than “Welsh/English/Scottish/Northern Irish/British” were defined as minority ethnicities.</p>
      </sec>
      <sec>
        <title>Focused Questions</title>
        <p>Across all groups, most responders stated that patients (members of the public group) and clinicians (health care professional group) should be able to refuse to have AI used in their care or practice. This choice was more pronounced in those with a negative or uncertain self-declared attitude toward health care AI (<xref rid="figure1" ref-type="fig">Figure 1</xref>A,B), but a difference between those with a positive and negative attitude was only statistically significant among members of the public (<italic>χ</italic><sup>2</sup>=14.7, <italic>P</italic>&#60;.001, with post hoc <italic>P</italic>=.001 for “no” and <italic>P</italic>=.002 for “yes” responses; <italic>χ</italic><sup>2</sup>=2.8, <italic>P</italic>=.24, in the health care professional group).</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Responses to questions 8 (A), 10 (C), 30 (B), and 32 (D) of the questionnaire presented by the general attitude toward health care AI. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The overwhelming majority of respondents with a positive self-declared attitude toward health care AI would, in principle, agree to sharing health care data to improve an AI technology that was already used in their care (<xref rid="figure1" ref-type="fig">Figure 1</xref>C,D). While skeptics in both groups overwhelmingly objected to such use of health care data (<italic>χ</italic><sup>2</sup>=81.8, <italic>P</italic>&#60;.001, for members of the public, and <italic>χ</italic><sup>2</sup>=48.3, <italic>P</italic>&#60;.001, for health care professionals, post hoc <italic>P</italic>&#60;.001 for “no” and “yes” responses in both groups), most of those with an uncertain attitude also agreed with the use of such data to improve existing AI technologies.</p>
        <p>Respondents in both groups tended to trust academic and NHS institutions more to develop health care AI technologies when compared to civil service and private organizations (<xref rid="figure2" ref-type="fig">Figure 2</xref>C,D). This pattern held true for respondents from all groups of self-declared attitudes toward AI.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Response to questions 9 (A) and 31 (B) of the questionnaire presented by the general attitude toward health care AI. Items are presented in order of overall rank (highest ranked on top, lowest ranked at the bottom), while bars represent both the overall rank and the responses given by the different AI attitude subgroups. The x-axis denotes the reciprocal of the rank. AI: artificial intelligence; NHS: National Health Service.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Across all groups, responders preferred health care professionals to have more input into the clinical decision-making process over a more automated and speedier process (<xref rid="figure3" ref-type="fig">Figure 3</xref>A,B). All respondents from both AI-skeptical groups exclusively selected this answer, but this was only statistically significant in the members of the public group (<italic>χ</italic><sup>2</sup>=10.4, <italic>P</italic>=.01; <italic>P</italic>&#62;.99 for “I do not know”; <italic>P</italic>=.01 for preference of more health care professional input; and <italic>P</italic>=.02 for preference for faster decision-making); <italic>χ</italic><sup>2</sup>=3.7, <italic>P</italic>=.15, in the health care professional group.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Responses to questions 11 (A), 12 (C), 33 (B), and 34 (D) of the questionnaire presented by the general attitude toward health care AI. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Overall, there was no clear preference for AI mistakenly recommending treatment or mistakenly not recommending a treatment, as most respondents selected the “I do not know” answer (<xref rid="figure3" ref-type="fig">Figure 3</xref>C,D). In both AI-skeptical groups, respondents were evenly split between the “I do not know” answer and preferring the AI to mistakenly not recommend treatment, with only a minority preferring it to mistakenly recommend the treatment. While not as pronounced, a similar pattern was present among the uncertain groups. There was no statistically significant difference between those with a self-declared positive and negative attitude toward AI in the health care professional group (<italic>χ</italic><sup>2</sup>=2.4, <italic>P</italic>=.29), but there was among the members of the public (<italic>χ</italic><sup>2</sup><sub>2</sub>=8.4, <italic>P</italic>=.02), with no significant difference among those who selected “I do not know” (<italic>P</italic>&#62;.99) and those who preferred treatment to be mistakenly not recommended (<italic>P</italic>=.33), but significant differences among those preferring a mistaken treatment recommendation (<italic>P</italic>=.03).</p>
        <p>When asked to rank 5 potential facilitators of health care AI, members of the public ranked as the highest option the statement “My healthcare professional (such as a doctor or nurse) has a choice to follow the technology’s recommendation and can make a different decision if they think another option is better” (<xref rid="figure4" ref-type="fig">Figure 4</xref>A). The exception was the group of AI skeptics, who, while still highly valuing that answer, scored slightly higher for the answer stating that the use of AI would not replace them having an appointment with a health care professional; this was the overall second-highest-ranked answer among the members of the public. The third highest ranked facilitator was that the health care professional could explain why an AI reached a specific decision. Overall, speeding up care and knowing that the technology worked for people of a similar background were the lowest-scoring facilitators for the members of the public, particularly for the AI skeptics, although AI supporters gave similar preference to the 3 midscoring answers. Among health care professionals (<xref rid="figure4" ref-type="fig">Figure 4</xref>B), the highest-ranked facilitator was the presence of national guidance for AI implementation, including on legal liability, and was followed by pre- and postimplementation evidence for the AI’s effectiveness. This was followed by transparency about the destination of the data collected by the AI and clinician agreement as to this destination, followed by conveniently delivered training, and the AI providing binary recommendations. Among health care professionals, there was little variability in the responses given by the 3 AI attitude groups.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Ranking of various potential facilitators (A and B) and barriers (C and D) to health care AI adoption by members of the public (A and C) and health care professionals (B and D); questions 13, 14, 35, and 36 in the questionnaire. The items are presented in order of overall rank (highest ranked on top, lowest ranked at the bottom), while bars represent both the overall rank and the responses given by the different AI attitude subgroups. The x-axis denotes the reciprocal of the rank. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Among members of the public, the highest-ranked barrier to AI adoption was that AI might distract the health care professional from the bigger picture of the patients’ needs (<xref rid="figure4" ref-type="fig">Figure 4</xref>C). This was followed by a lack of trust in the health care professional in the AI technology. The third highest ranked barrier was if the technology was used without disclosing it to the patient or if the patient felt pressured to have it used in their care. This barrier was somewhat higher ranked by AI skeptics when compared to AI supporters. The last 2 barriers were not being told about the benefits of the use of AI and whether people outside of the patient’s care team would be able to see the information used by the AI. For health care professionals, the highest-ranked barrier was the lack of reassurance about the AI’s performance in their patient group, except for AI skeptics, who ranked as the highest barrier in clinicians’ decision-making freedom, which was overall the third-highest-scoring barrier (<xref rid="figure4" ref-type="fig">Figure 4</xref>D). The second-highest overall scoring barrier was the lack of stakeholder input into the AI’s development. This was followed by health care professionals being distracted from their patients’ needs and their colleagues having a negative attitude toward AI.</p>
      </sec>
      <sec>
        <title>Ranking of Ethics Principles</title>
        <p>There was a large congruence between members of the public and health care professionals in the ranking of ethics principles relating to health care AI, and the highest-ranked principles and two lowest-ranked principles were the same in both groups (<xref rid="figure5" ref-type="fig">Figure 5</xref>A,B). These principles were described slightly differently to the two eligible groups (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). Moreover, similar trends were seen in the AI skeptic groups in both eligibility criteria groups. AI skeptics ranked autonomy as the most important principle, which overall was the third-highest-ranked principle, while beneficence, which was overall the highest-ranked principle, was ranked only as sixth by AI skeptics. Similarly, dignity and responsibility were consistently ranked higher by AI skeptics than by the overall population. Environmental impact was the lowest-ranked principle among all respondent groups. Solidarity was the overall second-lowest-scoring principle in both the members of the public and the health care professional respondent groups. Nevertheless, for members of the public who are AI skeptics, fairness was the second lowest scoring principle, while for those who were uncertain, fairness, transparency, and dignity scored lower than solidarity. In the health care professional group, AI skeptics scored confidentiality and fairness lower than solidarity, but solidarity was the second lowest scoring principle among those with an uncertain attitude.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Ranking of health care AI ethics principles (questions 15 and 37 in the questionnaire) by members of the public (A) and health care professionals (B). The principles are presented in order of overall rank for each eligibility criterion group (highest ranked on top, lowest ranked at the bottom), while bars represent both the overall rank and the responses given by the different AI attitude subgroups. The x-axis denotes the reciprocal of the rank. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Free-Text Responses</title>
        <p>Respondents were asked to state their top barrier and facilitator to the adoption of AI in health care and then two other important barriers and facilitators. As these were free-text fields, respondents stated (including blank and invalid answers) a total of 1054 barriers and 1017 facilitator items, while 927 items were originally anticipated for each. The final list of themes and subthemes used to categorize these responses can be found in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p>
        <p>When looking at the top barrier to adoption (<xref rid="figure6" ref-type="fig">Figure 6</xref>), both members of the public and health care professionals have placed the potential for unintended or negative consequences as their most important concern. This theme was particularly popular among AI skeptics in both groups, though health care professional skeptics were even more concerned about the erosion of human-centered care, which was the overall third-ranked barrier. None of the respondents mentioned cost, environmental concerns, or encroachment of private industry as their main concern. There were no statistically significant differences between skeptics and those with a positive attitude toward AI (<italic>χ</italic><sup>2</sup>=10.7, <italic>P</italic>=.60, for members of the public; <italic>χ</italic><sup>2</sup>=18.2, <italic>P</italic>=.18, for health care professionals).</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Count of all the most important barrier themes to AI adoption stated by participants and normalized to the number of items stated in each group. n indicates the number of counted items, which in this case is equal to the number of respondents in each group. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>When all responses to the 3 barrier questions were accounted for (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), the top 3 themes among both respondent groups were erosion of person-centered care, potential for unintended or negative consequences, and data and algorithmic concerns, although the last of these was most important for health care professionals. Erosion of person-centered care was the most frequently stated barrier among skeptics. None of the skeptics in either group provided an invalid or blank answer to any of the barrier questions. Among members of the public, there were no statistically significant differences between skeptics and those with a positive attitude toward AI (<italic>χ</italic><sup>2</sup>=22.8, <italic>P</italic>=.16), but among health care professionals there was a significant difference between these groups (<italic>χ</italic><sup>2</sup>=32.7, <italic>P</italic>=.01), with post hoc testing showing that issues relating to person-centered care were much more important to skeptics (<italic>P</italic>&#60;.001).</p>
        <p>The top facilitator for both respondent groups (<xref rid="figure7" ref-type="fig">Figure 7</xref>) was the presence of an evidence base and of ongoing evidence generation with respect to health care AI. The overall order of frequency of mentions of each theme was similar for both groups, with the exception that health care professionals placed more importance on resolving data and algorithmic issues, while members of the public placed more importance on humans remaining in charge and ensuring positive patient outcomes. There were no statistically significant differences between AI attitude groups among health care professionals (<italic>χ</italic><sup>2</sup>=9.4, <italic>P</italic>=.34), but the overall distribution of answers was significantly different among members of the public (<italic>χ</italic><sup>2</sup>=40.1, <italic>P</italic>&#60;.001), with post hoc analysis showing (<italic>P</italic>&#60;.001) the larger proportion of the “none” group among skeptics.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Count of all the most important facilitator themes to AI adoption stated by participants and normalized to the number of items stated in each group. n indicates the number of counted items, which in this case is equal to the number of respondents in each group. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e81543_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>When all stated facilitators were considered (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), both respondent groups had the presence of an evidence base and of ongoing evidence generation with respect to health care AI as the most frequently mentioned theme, and humans remaining in charge as the second most frequently mentioned theme. Among members of the public, resolved data and algorithmic issues were the third most mentioned theme, while positive patient outcomes were the fourth most important theme; this was reversed for health care professionals. Noteworthy, for AI skeptics, the most frequent theme was “none,” which consisted of blank or invalid answers, but this was only statistically significant for members of the public (<italic>χ</italic><sup>2</sup>=66.9, <italic>P</italic>&#60;.001; post hoc <italic>P</italic>&#60;.001). There was a statistically significant difference in answer distribution among health care professionals (<italic>χ</italic><sup>2</sup>=18.8, <italic>P</italic>=.04), but post hoc analysis did not identify any specific factors as significant.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Literature Search</title>
        <p>The scope of the literature search (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) was wider than the intended questionnaire scope, as the literature search covered all health care AI, while the questionnaire focused on ML technologies used in treatment and therapy. Making our literature review scope too narrow might have resulted in a low number of identified studies and only informed us as to what was already done in this specific field, while having a broader scope allowed us to consider questions that might not have yet been explored in this specific area, but that were explored in adjacent fields. Moreover, reviewing a wide range of literature allowed us to familiarize ourselves with a broad range of approaches used in similar studies and so inform our survey design.</p>
        <p>The literature review highlighted a relative lack of studies focusing on the patient or member of the public perspective on clinical AI adoption (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). As such, we considered it vital to attend to this perspective and work closely with our PPI representatives to focus the survey on the most pertinent questions to this population.</p>
      </sec>
      <sec>
        <title>Questionnaire Development</title>
        <p>When developing the questionnaire, we sought extensive input from our PPI and stakeholder groups, rather than focusing on using a specific technology adoption framework. Our PPI (who were residents of Wales) and stakeholder groups allowed us to identify which questions were most important to ask and allowed us to keep the questionnaire concise to ensure that the survey results would be informative, but the survey itself would not be burdensome. Additional feedback from colleagues and lay volunteers was used to ensure that the questionnaire was understandable to the intended audience. Consequently, while our questionnaire was not validated, it received a similar level of input from experts to a similar study described previously [<xref ref-type="bibr" rid="ref24">24</xref>]. Moreover, our extensive use of public involvement aligned our approach with recent developments in research methodology and good practice [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Finally, it was not always clear how to represent certain response options. For example, the primary area of work options presented in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> were selected based on clinician input, but there are other reasonable alternatives that could have been used.</p>
        <p>Nevertheless, after deploying the questionnaire, we received feedback that allowing only single answers to the eligibility criteria question, primary specialisms question, and geographical areas of work question was problematic for those who met both criteria or worked across several specialisms or in more than one health board’s territory. Yet, this should not have a negative impact on the interpretation of the results and since the demographic data is mainly used to ascertain the representativeness of our respondent population. Additionally, because the eligibility question was used to direct respondents to the correct questionnaire stream, a specific set of questions would need to be developed for respondents who met both eligibility criteria, which would likely be most health care professional respondents.</p>
        <p>We also received feedback that the specific barriers and facilitators mentioned throughout the survey were not always the most important ones for specific respondents (eg, questions 35 and 36 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). While we acknowledge this limitation, the survey was built with stakeholder input, and the free-text questions were specifically included to capture any themes we might have missed in the rest of the questionnaire.</p>
      </sec>
      <sec>
        <title>Respondent Demographics</title>
        <p>Considering Wales’ relatively small population of approximately 3.2 million, 309 responses represent a relatively high response rate compared to other similar studies conducted in other countries [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. The main deviations of the responder population from the reference population were the underrepresentation of women and overrepresentation of men among respondents in the health care professional group (despite most respondents being women), and overrepresentation of members of the public respondents from the Cardiff and Vale University Health Board area. We are not sure why women were underrepresented in the health care professional group, but the overrepresentation of respondents from the Cardiff and Vale University Health Board area is likely related to our research team being based in Cardiff, making it easier to advertise the study to those living and working in this area.</p>
        <p>To assess the impact of this, we undertook an additional analysis comparing the responses of women and men in the health care professional group to questions 30, 32, 33, and 34 (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>), with the only statistically significant difference (<italic>χ</italic><sup>2</sup><sub>2</sub>=9, <italic>P</italic>=.01; post hoc <italic>P</italic>=.02) being the increased proportion of “I do not know” responses among women to question 30 (asking about the right of clinicians to object to using health care AI technologies in their clinical practice). We also explored the profile of members of the public living in the Cardiff and Vale University Health Board area, finding that most responses from ethnic minority persons were from this location (22 out of 30 ethnic minority respondents). Consequently, we did not apply weights to the responses as the impact of this would be unlikely to be meaningful for the health care professional group, and we did not wish to disenfranchise ethnic minority responders from the members of the public group, as ethnic minorities are usually regarded as underrepresented in research [<xref ref-type="bibr" rid="ref43">43</xref>]. Nevertheless, while we acknowledge that a higher response rate from those living in rural areas would have been preferable, we did manage to get responses from those living in all Welsh counties.</p>
        <p>Finally, it is important to highlight that convenience sampling is associated with an array of limitations. Purposive sampling, together with a compensation for participation, could have potentially overcome these limitations, but resources for this were not available. Additional discussion regarding demographic data can be found in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
      </sec>
      <sec>
        <title>Support, Opposition, and Ambivalence to Health Care AI</title>
        <p>Most respondents from both the members of the public and health care professionals’ groups self-reported themselves to be in general support of health care AI (<xref ref-type="table" rid="table1">Table 1</xref>), and 22% of respondents in both groups described themselves as “uncertain.” Among health care professionals, 6% declared themselves as opposed, corresponding to 8 responses, while for members of the public, it was 13% corresponding to 23 individuals. The small size of the skeptic groups also affected the power and certainty around the conclusions derived from statistical hypothesis testing. Nevertheless, chi-square tests indicate that there were no significant differences between respondent demographics from all 3 AI attitude categories within the 2 eligibility groups (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p>
        <p>The low response rate from those generally opposed to health care AI was discussed with the PPI and stakeholder group members, with several possible reasons proposed. If skeptics were generally averse to digital technologies, they might not have found out about the survey. Furthermore, skeptics might have chosen not to answer these questions because they could have felt that they did not have enough knowledge to even attempt it. For those skeptics fearful of AI, for example, taking over their jobs or because of the negative news stories surrounding AI in relation to deepfakes, responding to such a survey might be seen as potentially validating any conclusions from the survey, even if their personal responses would not agree with the majority. An analogy was drawn with organizations not taking part in consultations, as if they do take part, they are simply listed as having taken part in it without noting their disagreement with the consultation’s consultation. AI might also be seen as being pushed on the population, which might motivate some to exhibit contrarian attitudes toward it and not participate in anything relating to AI. Such an attitude of suspicion was even evident among some skeptical respondents, with one responding with “Nothing really. I dislike dystopian healthcare!” when asked to state a facilitator. Alternatively, the population of AI skeptics might be small, and our small proportion of skeptics is reflective of the wider population. Whatever the reason for this low proportion of skeptical respondents, this limits the amount of insight this survey can generate in relation to that population, and certainly with which these results can be used to derive practices that make AI more acceptable to that population.</p>
        <p>The answers to the other questionnaire items corroborated the self-stated attitude toward health care AI. For example, skeptic respondents were predominantly in favor of the ability to object to the use of AI in health care (<xref rid="figure1" ref-type="fig">Figure 1</xref>A,B), with no members of the public opposing the right to such an objection, and skeptics were opposed to the use of their data to improve an AI, which was contrary to the trend displayed by those supporting AI or being ambivalent to it (<xref rid="figure1" ref-type="fig">Figure 1</xref>C,D). Similarly, AI skeptics often did not list any facilitators to AI adoption (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), potentially demonstrating that their opinion is set and unlikely to change. The distinct response pattern of skeptics, even when compared to the uncertain respondents, indicated that it would not have been insightful to combine these two groups for the purposes of statistical analysis; for example, <xref rid="figure5" ref-type="fig">Figure 5</xref> shows that the response patterns of the ambivalent groups are more like those of supporters than of skeptics.</p>
      </sec>
      <sec>
        <title>Barriers and Facilitators</title>
        <p>A general preference for the ability of patients and clinicians to refuse the use of AI in health care (<xref rid="figure1" ref-type="fig">Figure 1</xref>A,B) and for more health care professional input during the care process (<xref rid="figure3" ref-type="fig">Figure 3</xref>A,B) is consistent with a high ranking of autonomy among the 10 ethical principles (<xref rid="figure5" ref-type="fig">Figure 5</xref>A,B). This is also reflected in the ranking of specific barriers and facilitators (<xref rid="figure4" ref-type="fig">Figure 4</xref>A,C,D), and is particularly prominent among those with a negative attitude toward health care AI. In the free-text answers, prioritizing themes of humans remaining in charge and concerns about person-centered care (<xref rid="figure6" ref-type="fig">Figures 6</xref> and <xref rid="figure7" ref-type="fig">7</xref> and <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>) also reflected this, although the latter also relates to the ethical principle of dignity (<xref rid="figure5" ref-type="fig">Figure 5</xref>). As such, to facilitate adoption automation of specific tasks, rather than the overall care process, and ensuring that a human stays in the loop should be ensured. While facilitating consent to AI-augmented interventions seems desirable, this might not be feasible if AI-augmented practice becomes the standard of care [<xref ref-type="bibr" rid="ref44">44</xref>].</p>
        <p>The higher trust in the NHS and academic institutions to develop health care AI, when compared to civil service and private companies (<xref rid="figure2" ref-type="fig">Figure 2</xref>), was to a certain degree reflected in the free-text answers, though with more nuance. Themes of public sector organizational inefficiencies, the encroachment of private industry, ulterior motives associated with AI deployment, and a specific subtheme regarding the trustworthiness of developers and that a broad stakeholder group should be involved in the development of these technologies (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>) have highlighted the complexities of this issue. As one respondent stated when asked to list a facilitator:</p>
        <disp-quote>
          <p>If the AI was developed by a private non-profit, where the development of this would be open source. This would free the company from the bureaucracy, corruption and sheer incompetency that the government and NHS management are best at, (let's leave that to them). Secondly, a non-profit company would be focused on the work and not motivated by profit, being open source would allow for public scrutiny.</p>
        </disp-quote>
        <p>These results not only support cooperation between the NHS and academic institutions developing such technologies, but also the role of not-for-profit organizations, which might be able to overcome some hurdles faced by the public sector organizations, avoid concerns regarding profit prioritization, and foster trust by using open-access standards.</p>
        <p>Overall, most people were happy to have their information shared to improve the way an AI technology works, except for skeptics who overall opposed the use of their data in such a way (<xref rid="figure1" ref-type="fig">Figure 1</xref>C,D). This might relate to the principle of confidentiality (<xref rid="figure5" ref-type="fig">Figure 5</xref>), which was more important for skeptics who were members of the public. While data and algorithmic concerns were the most often mentioned overall barrier (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), some responses indicated that people wanted their data to be used to help others, which was reflected in the subtheme of AI updating and improvement (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). There is a potential interplay here with concerns about how private companies might use patient data and whether it will cause private enterprise to profit, rather than benefit the patients and the NHS. It is uncertain if AI skeptics would be more willing to share their data for the purposes of AI improvement if the AI technology was owned by the NHS or academia.</p>
        <p>There was no clear preference for whether AI should mistakenly recommend or not recommend treatment (<xref rid="figure3" ref-type="fig">Figure 3</xref>C,D), but the themes of potential harms and benefits were prominent in responses to other questions. Beneficence and nonmaleficence were the two overall highest-ranked ethical principles (<xref rid="figure5" ref-type="fig">Figure 5</xref>A,B), and unintended or negative consequences of AI were the most often mentioned top barrier (<xref rid="figure6" ref-type="fig">Figure 6</xref>). While positive patient outcomes themselves were often stated as an important facilitator, the related theme of evidence basis and ongoing evidence generation was the top-stated facilitator (<xref rid="figure7" ref-type="fig">Figure 7</xref> and <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). Yet, the picture given by <xref rid="figure4" ref-type="fig">Figure 4</xref> is somewhat more complex. The specific benefit of faster care was the second lowest rated facilitator for members of the public (<xref rid="figure4" ref-type="fig">Figure 4</xref>A), while the presence of national guidance was rated higher in that question as a facilitator than the presence of evidence for the effectiveness of the technology in the clinician’s patient cohort (<xref rid="figure4" ref-type="fig">Figure 4</xref>B). Similarly, patients ranked not being told about how a technology will improve their care as the second least important barrier (<xref rid="figure4" ref-type="fig">Figure 4</xref>C), although health care professionals ranked the lack of evidence of the AI’s performance in their patient group as the highest scoring barrier (<xref rid="figure4" ref-type="fig">Figure 4</xref>D). This highlights that knowledge about the potential benefit of the technology is important, but the exact importance is uncertain.</p>
        <p>It is worth noting that many of the themes identified were multifaceted. For example, accountability (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), corresponding to the ethical principle of responsibility (<xref rid="figure5" ref-type="fig">Figure 5</xref>), encompassed concerns of members of the public about who will be responsible for harms resulting from AI-enhanced care, but also of health care professionals wanting to know the extent of their liability arising from mistakes caused by an AI. Moreover, there were concerns that clinicians might start blaming AI for their own mistakes and that the government might blame the AI for their own failures. Furthermore, while patients and clinicians emphasized the need for freedom to reject AI recommendations, there were also worries that clinicians would reject AI recommendations without a good reason. This could be linked with responses that required an AI to be 100% accurate before being allowed to be used clinically, and the recognition that an AI technology might be able to spot things that a clinician might otherwise not notice. Finally, while some health care professionals expressed worries that patients might leverage AI against them, members of the public were concerned that AI might be used to “gaslight” patients with difficult presentations, but also expressed hope that AI might reduce the possibility of clinician judgment error. This shows that both patients and clinicians are often concerned about similar problems, and that while there are worries that the introduction of AI into health care might depersonalize care, there is also an appreciation that AI might alleviate some problems and should not be outright dismissed.</p>
      </sec>
      <sec>
        <title>Theoretical Contributions</title>
        <p>When comparing this survey’s findings to the TAM and UTAUT principles [<xref ref-type="bibr" rid="ref4">4</xref>], the key barriers and facilitators in our study were those relating to the perceived usefulness or performance of the technology (<xref ref-type="table" rid="table2">Table 2</xref>). Those relating to subjective norms were also prominent, mostly manifesting in the need to maintain human interaction during the health care encounter. The other TAM and UTAUT principles, while not completely absent, were not as strongly present among respondents' answers. This supports the case for implementing digital technologies only if they can improve the health care process and outcomes, rather than only because an algorithm can perform a task with high accuracy or because a technology can be implemented for implementation’s sake. Furthermore, the results show that members of the public and clinicians do not want the human element of care to be eliminated. While some might see the implementation of digital health care technologies as an opportunity to decrease the amount of health care staff input into patient care, which is often a large part of health care costs, this survey’s results caution against such an approach. While digital health care technologies will hopefully speed up patient care, the survey results suggest that improving the quality of care is more likely to be a more acceptable use of the time savings, for both members of the public and staff (<xref rid="figure6" ref-type="fig">Figures 6</xref> and <xref rid="figure7" ref-type="fig">7</xref>; <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Comparison between TAM<sup>a</sup> and UTAUT<sup>b</sup> principles and this study’s findings.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="200"/>
            <col width="400"/>
            <col width="400"/>
            <thead>
              <tr valign="top">
                <td>TAM or UTAUT principle</td>
                <td>Examples from this study</td>
                <td>Comments</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Perceived ease of use or individual effort expectancy</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Negative impact on health care staff, lack of understanding of artificial intelligence, digital and technical challenges, considerate deployment</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Several themes related to this technology adoption principle, but none were prominent among respondents</p>
                    </list-item>
                    <list-item>
                      <p>Does not relate directly to any ethics principle</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Perceived usefulness or performance expectancy</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Unintended or negative consequences</p>
                    </list-item>
                    <list-item>
                      <p>Evidence base and ongoing evidence generation</p>
                    </list-item>
                    <list-item>
                      <p>Beneficence and nonmaleficence</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Top barrier among both respondent groups</p>
                    </list-item>
                    <list-item>
                      <p>Top facilitator among both respondent groups</p>
                    </list-item>
                    <list-item>
                      <p>Top 2 ethics principles among both respondent groups</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Subjective norms or habits</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Humans (remain) in charge and person-centered care</p>
                    </list-item>
                    <list-item>
                      <p>Autonomy, responsibility, and dignity</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Both in the top 3 overall stated barriers and facilitators among both respondent groups, and at least one of the two in the top stated barriers and facilitators</p>
                    </list-item>
                    <list-item>
                      <p>The first 2 ethics principles were in the top 4 ranked principles in both groups, while dignity was fifth (health care staff) and sixth (public)</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Social influence</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Evidence base and ongoing evidence generation</p>
                    </list-item>
                    <list-item>
                      <p>Organizational culture</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Several themes related to this technology adoption principle, but none were prominent among respondents</p>
                    </list-item>
                    <list-item>
                      <p>Does not relate directly to any ethics principle</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Facilitating conditions</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Legislation, governance and regulation, funding to increase digital maturity, inappropriate deployment, considerate deployment</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Several themes related to this technology adoption principle, but none were prominent among respondents</p>
                    </list-item>
                    <list-item>
                      <p>Does not relate directly to any ethics principle</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>TAM: Technology Acceptance Model.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>UTAUT: Unified Theory of Acceptance and Use of Technology.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>While Jobin et al [<xref ref-type="bibr" rid="ref29">29</xref>] identified transparency and fairness as the two most frequently mentioned principles among AI guidelines, our results suggest that with regard to health care AI, these are not the most important principles among end users of such technologies. Beneficence and nonmaleficence were the two highest-ranked principles among both our respondent groups (<xref rid="figure5" ref-type="fig">Figure 5</xref>), followed by autonomy and responsibility; nonmaleficence and responsibility had joint third place in the ranking by Jobin et al [<xref ref-type="bibr" rid="ref29">29</xref>]. These differences between our respondent preferences and the findings by Jobin et al might potentially relate to the ease of expressing transparency requirements in the form of structured guidelines and legislation, while beneficence and nonmaleficence are harder to implement due to the dependence of the realization of these principles on how a technology is implemented in an organization, how end users apply it, and how (in the case of AI) the training population matches the patient population. Nevertheless, our data suggest that beneficence and nonmaleficence should be at the forefront of AI implementation guidelines.</p>
        <p>Finally, it is worth briefly relating our findings to other similar international studies. Our findings broadly correlate with Isbanner et al [<xref ref-type="bibr" rid="ref24">24</xref>], who reported in their Australian study that respondents were happy for health care to be augmented through the use of AI but did not want humans to be replaced in the care process. Interestingly, among their respondents, 13.4% stated that they “somewhat” or “strongly” oppose the development of AI, which is similar to the 13% recorded in our responses from members of the public (<xref ref-type="table" rid="table1">Table 1</xref>), suggesting that our small proportion of respondents opposed to AI might match trends in other culturally similar countries to Wales. Similar to our findings about the importance of the perceived usefulness or performance of the technology, a recent report from Germany by Kühne et al [<xref ref-type="bibr" rid="ref25">25</xref>] found that technology reliability was the main factor affecting technology acceptance. While respondents in the study by Kühne et al [<xref ref-type="bibr" rid="ref25">25</xref>] placed less emphasis on autonomy, similar to our study and that by Isbanner et al [<xref ref-type="bibr" rid="ref24">24</xref>], respondents preferred AI systems that work collaboratively with humans over those that replace human input. As such, our study’s results are largely in congruence with international findings.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>While there are some other limitations of this study, such as not having validated the questionnaire in a smaller population before deployment, not assessing the relationship of the questionnaire items using Cronbach α, or not using snowball sampling during the literature review, here we focus on the 3 main limitations of this study.</p>
        <p>The main limitations of this study are the use of convenience sampling, a high proportion of respondents from the Cardiff and Vale area, and a low number of respondents who are generally opposed to the use of AI in health care. Because of the anonymous nature of the survey, we were also unable to check for duplicate responses. Nevertheless, the study achieved a good representativeness of the population of Wales. To minimize the risk of the voice of AI skeptics being lost, we presented the data split by overall attitude. Their responses are particularly important as specific approaches might need to be implemented to encourage them to use ML-based approaches in their care.</p>
        <p>While the survey intended to focus on the application of ML to facilitate prudent health care in Wales, as exemplified by the case study presented in the questionnaire (see <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>), the respondents often addressed broader issues with respect to the application of AI technologies in health care. This has been particularly evident from free-text responses mentioning large language models or interacting with chatbots. This is unsurprising given the prominence of these types of technologies, for example, ChatGPT, in the media (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>). Hence, respondents might have used this survey as an opportunity to express their concerns regarding the broader application of health care AI. Consequently, the findings from this survey are likely to be applicable to a broader range of health care AI technology implementation scenarios than those focusing solely on prudent health care. Yet, this also means that the results less reliably represent the views of respondents on this specific topic.</p>
        <p>Lastly, the survey did not explicitly target other stakeholder groups, which are vital for the adoption of new health care technologies, such as health care leaders, government managers, as well as information technology and governance specialists. These people could nevertheless fill out the questionnaire if they met the inclusion criteria. Nevertheless, diffusion of innovation theory suggests that since members of the public and clinicians are the final adopters of health care technology, it is they who primarily need to be convinced of a technology’s appropriateness [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      </sec>
      <sec>
        <title>Future Work</title>
        <p>This survey did not include any questions that tried to address why any specific barriers and facilitators were deemed important. The next step of this project is to conduct a range of focus groups to address this question and to engage members of underrepresented groups. Information gathered from focus groups, together with this survey’s data, will allow for the formation of policy recommendations.</p>
        <p>Research targeting health care leaders, policymakers, and informaticians would help describe the challenges of AI adoption from an organizational perspective [<xref ref-type="bibr" rid="ref2">2</xref>], although individual NHS Wales organizations make their own adoption decisions for specific digital technologies.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Based on the responses from both members of the public living in Wales and health care professionals participating in treatment or therapy decision-making and working in Wales, there is a strong preference for ensuring that AI technologies are assessed for their effectiveness and that these technologies do not replace human input into the care process. Moreover, there is a clear hesitancy toward the introduction of commercial technologies and a preference for developing these with strong clinical and academic input. Consequently, developing technologies locally by health boards or trusts, or via DHCW, together with robust internal evaluation, might present the way forward. While such an approach might not be feasible for some digital applications, it might be particularly suitable for ML applications within the context of prudent health care, where algorithms are likely to require training on local population data. Moreover, for such solutions to be acceptable, it is important that the results are reviewed by a human clinician and that patient-clinician contact is not decreased due to the introduction of these technologies, even if this reduces the cost-savings that could result from the introduction of such technologies.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Literature review.</p>
        <media xlink:href="jmir_v27i1e81543_app1.docx" xlink:title="DOCX File , 510 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Survey questionnaire.</p>
        <media xlink:href="jmir_v27i1e81543_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 409 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Additional tables and figures.</p>
        <media xlink:href="jmir_v27i1e81543_app3.docx" xlink:title="DOCX File , 783 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Google search statistics as of June 21, 2025.</p>
        <media xlink:href="jmir_v27i1e81543_app4.pdf" xlink:title="PDF File  (Adobe PDF File), 184 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>CHERRIES checklist.</p>
        <media xlink:href="jmir_v27i1e81543_app5.pdf" xlink:title="PDF File  (Adobe PDF File), 53 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CHERRIES</term>
          <def>
            <p>Checklist for Reporting Results of Internet E-Surveys</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DHCW</term>
          <def>
            <p>Digital Health and Care Wales</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">NHS</term>
          <def>
            <p>National Health Service</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">NICE</term>
          <def>
            <p>National Institute for Health and Care Excellence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ONS</term>
          <def>
            <p>Office for National Statistics</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PPI</term>
          <def>
            <p>patient and public involvement</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PROM</term>
          <def>
            <p>patient-reported outcome measures</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">TAM</term>
          <def>
            <p>Technology Acceptance Model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">UTAUT</term>
          <def>
            <p>Unified Theory of Acceptance and Use of Technology</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We thank Dr Meg Kiseleva and Dr Simone Willis for their advice on developing and executing the literature search strategy; Dr Meg Kiseleva also helped with the MAXQDA 24 themes assessment. We thank Dr Robert Palmer for help with the analysis of the free-text responses, and Prof Andrew Brass and Ms Frances Hooley for advice on this project. We thank Ms Hawys Waddington for translating various project documents into Welsh and Mr Ellis Carne for tabulating the results by sex. We thank Dr Alan Davies and Dr Tim Pickles for statistical advice. Finally, we thank our PPI and steering group members for their input in both the design of the questionnaire and the discussion of the results.</p>
      <p>The authors declare the use of generative AI in the research process. The MAXQDA 24 AI Assist tool was used to develop the initial themes for free-text analysis. Responsibility for the final manuscript lies entirely with the authors. Generative AI tools are not listed as authors and do not bear responsibility for the final outcomes.</p>
    </ack>
    <notes>
      <sec>
        <title>Funding</title>
        <p>This review was carried out as part of MP’s DClinSci/HSST studies in health informatics, which are funded by Health Education and Improvement Wales. This research is part of a project, “Identifying the Barriers and Facilitators for Implementing Machine Learning to Achieve Value-Based Healthcare in Wales,” for which the Centre for Healthcare Evaluation, Device Assessment, and Research (CEDAR) received funding from Value Transformation (previously the Welsh Value in Health Centre), part of NHS Wales Performance and Improvement. Open Access funding for this publication has been provided by the University of Manchester. None of the funders played any role in the study design, or the collection, analysis, and interpretation of data, or the writing of the paper, or the decision to submit it for publication.</p>
      </sec>
    </notes>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The datasets generated or analyzed during this study are not publicly available due to the study’s information governance arrangements. The data can be requested from the corresponding author, and the request will be assessed for compliance with the study’s information governance arrangements.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>EM</given-names>
            </name>
          </person-group>
          <source>Diffusion of Innovations. 4th ed</source>
          <year>1995</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>The Free Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Linstone</surname>
              <given-names>HA</given-names>
            </name>
          </person-group>
          <source>Multiple Perspectives for Decision Making: Bridging the Gap Between Analysis and Action</source>
          <year>1984</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>North-Holland</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Armero</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gray</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Fields</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cole</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bates</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kovacheva</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>A survey of pregnant patients' perspectives on the implementation of artificial intelligence in clinical care</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2022</year>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>46</fpage>
          <lpage>53</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36250788"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocac200</pub-id>
          <pub-id pub-id-type="medline">36250788</pub-id>
          <pub-id pub-id-type="pii">6762040</pub-id>
          <pub-id pub-id-type="pmcid">PMC9748543</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heinsch</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wyllie</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Carlson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wells</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tickner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kay-Lambkin</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Theories informing eHealth implementation: systematic review and typology classification</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>e18500</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/5/e18500/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/18500</pub-id>
          <pub-id pub-id-type="medline">34057427</pub-id>
          <pub-id pub-id-type="pii">v23i5e18500</pub-id>
          <pub-id pub-id-type="pmcid">PMC8204232</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rouleau</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ramamoorthi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Boxall</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Maloney</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zelmer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Scott</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Larsen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wijeysundera</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Ziegler</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bhatia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kishimoto</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Steele Gray</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Desveaux</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Mapping theories, models, and frameworks to evaluate digital health interventions: scoping review</article-title>
          <source>J Med Internet Res</source>
          <year>2024</year>
          <volume>26</volume>
          <fpage>e51098</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2024//e51098/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/51098</pub-id>
          <pub-id pub-id-type="medline">38315515</pub-id>
          <pub-id pub-id-type="pii">v26i1e51098</pub-id>
          <pub-id pub-id-type="pmcid">PMC10877497</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thornton</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hardie</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Horton</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gerhold</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Priorities for an AI in health care strategy</article-title>
          <source>The Health Foundation</source>
          <year>2024</year>
          <month>6</month>
          <access-date>2025-11-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.health.org.uk/reports-and-analysis/briefings/priorities-for-an-ai-in-health-care-strategy">https://www.health.org.uk/reports-and-analysis/briefings/priorities-for-an-ai-in-health-care-strategy</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="web">
          <article-title>UK Standards for Public Involvement</article-title>
          <source>NIHR, Chief Scientist Office, Health and Care Research Wales, and Public Health Agency</source>
          <year>2019</year>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sites.google.com/nihr.ac.uk/pi-standards/standards">https://sites.google.com/nihr.ac.uk/pi-standards/standards</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <article-title>Impact of public involvement on the ethical aspects of research</article-title>
          <source>Health Research Authority</source>
          <year>2024</year>
          <month>11</month>
          <day>5</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.hra.nhs.uk/planning-and-improving-research/best-practice/public-involvement/impact-public-involvement-ethical-aspects-research/">https://www.hra.nhs.uk/planning-and-improving-research/best-practice/public-involvement/impact-public-involvement-ethical-aspects-research/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <article-title>Patient and public involvement policy</article-title>
          <source>NICE</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.nice.org.uk/get-involved/people-and-communities/patient-and-public-involvement-policy">https://www.nice.org.uk/get-involved/people-and-communities/patient-and-public-involvement-policy</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Porter</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Habli</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>McDermid</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kaas</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A principles-based ethics assurance argument pattern for AI and autonomous systems</article-title>
          <source>AI Ethics</source>
          <year>2023</year>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>593</fpage>
          <lpage>616</lpage>
          <pub-id pub-id-type="doi">10.1007/s43681-023-00297-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>What does it mean for a clinical AI to be just: conflicts between local fairness and being fit-for-purpose?</article-title>
          <source>J Med Ethics</source>
          <year>2024</year>
          <fpage>109675</fpage>
          <pub-id pub-id-type="doi">10.1136/jme-2023-109675</pub-id>
          <pub-id pub-id-type="medline">38423759</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Willis</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Withers</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>A narrative review of the use of PROMs and machine learning to impact value-based clinical decision-making</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2025</year>
          <volume>25</volume>
          <fpage>250</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-025-03083-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-025-03083-8</pub-id>
          <pub-id pub-id-type="medline">40615892</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-025-03083-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC12226851</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <article-title>Prudent Healthcare: Securing Health and Well-Being for Future Generations</article-title>
          <source>Welsh Government, NHS Wales</source>
          <year>2016</year>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://gov.wales/sites/default/files/publications/2019-04/securing-health-and-well-being-for-future-generations.pdf">https://gov.wales/sites/default/files/publications/2019-04/securing-health-and-well-being-for-future-generations.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>AI in health care: what do the public and NHS staff think?</article-title>
          <source>The Health Foundation</source>
          <year>2024</year>
          <month>7</month>
          <day>31</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.health.org.uk/publications/long-reads/ai-in-health-care-what-do-the-public-and-nhs-staff-think">https://www.health.org.uk/publications/long-reads/ai-in-health-care-what-do-the-public-and-nhs-staff-think</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <article-title>The future of AI in healthcare: public perceptions of AI in radiology</article-title>
          <source>The Royal College of Radiologists</source>
          <year>2025</year>
          <month>4</month>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.rcr.ac.uk/media/poelyzlz/rcr-reports-the-future-of-ai-in-healthcare-public-perceptions-of-ai-in-radiology.pdf">https://www.rcr.ac.uk/media/poelyzlz/rcr-reports-the-future-of-ai-in-healthcare-public-perceptions-of-ai-in-radiology.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <article-title>Public attitudes to data and AI: tracker survey (Wave 4) report</article-title>
          <source>Department for Science, Innovation &#38; Technology</source>
          <year>2024</year>
          <month>12</month>
          <day>16</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.gov.uk/government/publications/public-attitudes-to-data-and-ai-tracker-survey-wave-4/public-attitudes-to-data-and-ai-tracker-survey-wave-4-report">https://www.gov.uk/government/publications/public-attitudes-to-data-and-ai-tracker-survey-wave-4/public-attitudes-to-data-and-ai-tracker-survey-wave-4-report</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Doyle</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Rural poverty in Wales: existing research and evidence gaps</article-title>
          <source>Public Policy Institute for Wales</source>
          <year>2016</year>
          <month>5</month>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.wcpp.org.uk/wp-content/uploads/2018/04/An-introduction-to-Rural-Poverty.pdf">https://www.wcpp.org.uk/wp-content/uploads/2018/04/An-introduction-to-Rural-Poverty.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <article-title>Population estimates for England and Wales: mid-2024</article-title>
          <source>Office for National Statistics</source>
          <year>2025</year>
          <month>7</month>
          <day>30</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/bulletins/populationestimatesforenglandandwales/mid2024">https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/bulletins/populationestimatesforenglandandwales/mid2024</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="web">
          <article-title>Time to talk public health</article-title>
          <source>Public Health Wales</source>
          <year>2025</year>
          <month>2</month>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://phw.nhs.wales/topics/time-to-talk-public-health/time-to-talk-public-health-panel-publications/publications/time-to-talk-public-health-february-2025-survey-results/">https://phw.nhs.wales/topics/time-to-talk-public-health/time-to-talk-public-health-panel-publications/publications/time-to-talk-public-health-february-2025-survey-results/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A short commentary on: Does black box AI in medicine compromise informed consent?</article-title>
          <source>Philos Technol</source>
          <year>2025</year>
          <volume>38</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-025-00870-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Materiality and practicality: a response to – are clinicians ethically obligated to disclose their use of medical machine learning systems to patients?</article-title>
          <source>J Med Ethics</source>
          <year>2025</year>
          <volume>51</volume>
          <fpage>574</fpage>
          <lpage>575</lpage>
          <pub-id pub-id-type="doi">10.1136/jme-2024-110371</pub-id>
          <pub-id pub-id-type="medline">39214649</pub-id>
          <pub-id pub-id-type="pii">jme-2024-110371</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Ethical challenges to the adoption of AI in healthcare: a review</article-title>
          <source>New Bioeth</source>
          <year>2024</year>
          <volume>30</volume>
          <issue>4</issue>
          <fpage>251</fpage>
          <lpage>267</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.tandfonline.com/doi/10.1080/20502877.2025.2541438?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/20502877.2025.2541438</pub-id>
          <pub-id pub-id-type="medline">40779305</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Labonte-Lemoyne</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gregoire</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Radanielina-Hita</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Senecal</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Stephanidis</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Antona</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ntoa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Salvendy</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Understanding the patients’ adoption and usage of AI solution in mental health: a scoping review</article-title>
          <source>HCI International 2022 – Late Breaking Posters. HCII 2022. Communications in Computer and Information Science, vol 1655</source>
          <year>2022</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>671</fpage>
          <lpage>675</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Isbanner</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>O'Shaughnessy</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Steel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wilcock</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The adoption of artificial intelligence in health care and social services in Australia: findings from a methodologically innovative national survey of values and attitudes (the AVA-AI Study)</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <volume>24</volume>
          <issue>8</issue>
          <fpage>e37611</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/8/e37611/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/37611</pub-id>
          <pub-id pub-id-type="medline">35994331</pub-id>
          <pub-id pub-id-type="pii">v24i8e37611</pub-id>
          <pub-id pub-id-type="pmcid">PMC9446139</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kühne</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobsen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Legewie</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dollmann</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Attitudes toward AI usage in patient health care: evidence from a population survey vignette experiment</article-title>
          <source>J Med Internet Res</source>
          <year>2025</year>
          <volume>27</volume>
          <fpage>e70179</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2025//e70179/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/70179</pub-id>
          <pub-id pub-id-type="medline">40424613</pub-id>
          <pub-id pub-id-type="pii">v27i1e70179</pub-id>
          <pub-id pub-id-type="pmcid">PMC12152429</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ovadia</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Ratings and rankings: reconsidering the structure of values and their measurement</article-title>
          <source>Int J Soc Res Methodol</source>
          <year>2004</year>
          <volume>7</volume>
          <issue>5</issue>
          <fpage>403</fpage>
          <lpage>414</lpage>
          <pub-id pub-id-type="doi">10.1080/1364557032000081654</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Del Grande</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kaczorowski</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Rating versus ranking in a Delphi survey: a randomized controlled trial</article-title>
          <source>Trials</source>
          <year>2023</year>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>543</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://trialsjournal.biomedcentral.com/articles/10.1186/s13063-023-07442-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13063-023-07442-6</pub-id>
          <pub-id pub-id-type="medline">37596699</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13063-023-07442-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC10436639</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harzing</surname>
              <given-names>AW</given-names>
            </name>
            <name name-style="western">
              <surname>Baldueza</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Barner-Rasmussen</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Barzantny</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Canabal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Davila</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Espejo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ferreira</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Giroud</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Koester</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Mockaitis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Morley</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Myloni</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Odusanya</surname>
              <given-names>JO</given-names>
            </name>
            <name name-style="western">
              <surname>O'Sullivan</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Palaniappan</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Prochno</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Saka-Helmhout</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Siengthai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Viswat</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Soydas</surname>
              <given-names>AU</given-names>
            </name>
            <name name-style="western">
              <surname>Zander</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Rating versus ranking: what is the best way to reduce response and language bias in cross-national research?</article-title>
          <source>Int Bus Rev</source>
          <year>2009</year>
          <volume>18</volume>
          <issue>4</issue>
          <fpage>417</fpage>
          <lpage>432</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ibusrev.2009.03.001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jobin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The global landscape of AI ethics guidelines</article-title>
          <source>Nat Mach Intell</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>389</fpage>
          <lpage>399</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <article-title>Age classifications: Census 2021</article-title>
          <source>Office for National Statistics</source>
          <year>2023</year>
          <month>10</month>
          <day>26</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ons.gov.uk/census/census2021dictionary/variablesbytopic/demographyvariablescensus2021/age/classifications">https://www.ons.gov.uk/census/census2021dictionary/variablesbytopic/demographyvariablescensus2021/age/classifications</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <article-title>Ethnic group, national identity and religion</article-title>
          <source>Office for National Statistics</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ons.gov.uk/methodology/classificationsandstandards/measuringequality/ethnicgroupnationalidentityandreligion">https://www.ons.gov.uk/methodology/classificationsandstandards/measuringequality/ethnicgroupnationalidentityandreligion</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="web">
          <article-title>State Pension age timetable</article-title>
          <source>Department for Work and Pensions</source>
          <year>2014</year>
          <month>5</month>
          <day>15</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.gov.uk/government/publications/state-pension-age-timetable/state-pension-age-timetable">https://www.gov.uk/government/publications/state-pension-age-timetable/state-pension-age-timetable</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharpe</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Your chi-square test is statistically significant: now what?</article-title>
          <source>Pract Assess Res Eval</source>
          <year>2015</year>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>8</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.7275/tbfa-x148"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="web">
          <article-title>Staff characteristics</article-title>
          <source>StatsWales</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics">https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Correction: Improving the quality of web surveys: the Checklist for Reporting Results of Internet E-Surveys (CHERRIES)</article-title>
          <source>J Med Internet Res</source>
          <year>2012</year>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>e8</fpage>
          <pub-id pub-id-type="doi">10.2196/jmir.2042</pub-id>
          <pub-id pub-id-type="medline">22223031</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Improving the quality of web surveys: the Checklist for Reporting Results of Internet E-Surveys (CHERRIES)</article-title>
          <source>J Med Internet Res</source>
          <year>2004</year>
          <volume>6</volume>
          <issue>3</issue>
          <fpage>e34</fpage>
          <pub-id pub-id-type="doi">10.2196/jmir.6.3.e34</pub-id>
          <pub-id pub-id-type="medline">15471760</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>Population and household estimates, Wales: Census 2021</article-title>
          <source>Office for National Statistics</source>
          <year>2022</year>
          <month>6</month>
          <day>28</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/bulletins/populationandhouseholdestimateswales/census2021#age-and-sex-of-the-population">https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/bulletins/populationandhouseholdestimateswales/census2021#age-and-sex-of-the-population</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="web">
          <article-title>Percent of NHS staff by organisation, staff group and gender</article-title>
          <source>StatsWales</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/percentofnhsstaff-by-organisation-staffgroup-gender">https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/percentofnhsstaff-by-organisation-staffgroup-gender</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="web">
          <article-title>Ethnic group, national identity, language and religion in Wales (Census 2021)</article-title>
          <source>Welsh Government</source>
          <year>2022</year>
          <month>11</month>
          <day>29</day>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.gov.wales/ethnic-group-national-identity-language-and-religion-wales-census-2021-html">https://www.gov.wales/ethnic-group-national-identity-language-and-religion-wales-census-2021-html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="web">
          <article-title>Percent of NHS staff by organisation, staff group, and ethnicity</article-title>
          <source>StatsWales</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/percentofnhsstaff-by-organisation-staffgroup-ethnicity">https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/percentofnhsstaff-by-organisation-staffgroup-ethnicity</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <article-title>Percent of NHS staff by organisation, staff group and age band</article-title>
          <source>StatsWales</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/nhsstaff-by-organisation-staffgroup-age">https://statswales.gov.wales/Catalogue/Health-and-Social-Care/NHS-Staff/staff-characteristics/nhsstaff-by-organisation-staffgroup-age</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <article-title>Population estimates by local authority and year</article-title>
          <source>StatsWales</source>
          <access-date>2025-11-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://statswales.gov.wales/Catalogue/Population-and-Migration/Population/Estimates/Local-Authority/populationestimates-by-localauthority-year">https://statswales.gov.wales/Catalogue/Population-and-Migration/Population/Estimates/Local-Authority/populationestimates-by-localauthority-year</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pardhan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sehmbi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wijewickrama</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Onumajuru</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Piyasena</surname>
              <given-names>MP</given-names>
            </name>
          </person-group>
          <article-title>Barriers and facilitators for engaging underrepresented ethnic minority populations in healthcare research: an umbrella review</article-title>
          <source>Int J Equity Health</source>
          <year>2025</year>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>70</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://equityhealthj.biomedcentral.com/articles/10.1186/s12939-025-02431-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12939-025-02431-4</pub-id>
          <pub-id pub-id-type="medline">40075407</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12939-025-02431-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC11905581</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pruski</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>AI-enhanced healthcare: not a new paradigm for informed consent</article-title>
          <source>J Bioeth Inq</source>
          <year>2024</year>
          <volume>21</volume>
          <issue>3</issue>
          <fpage>475</fpage>
          <lpage>489</lpage>
          <pub-id pub-id-type="doi">10.1007/s11673-023-10320-0</pub-id>
          <pub-id pub-id-type="medline">38300443</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
