<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e69678</article-id><article-id pub-id-type="doi">10.2196/69678</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Trust in Artificial Intelligence&#x2013;Based Clinical Decision Support Systems Among Health Care Workers: Systematic Review</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Tun</surname><given-names>Hein Minn</given-names></name><degrees>MBBS,MPH</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rahman</surname><given-names>Hanif Abdul</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Naing</surname><given-names>Lin</given-names></name><degrees>MBBS, MPH, MMedStat, MHlthSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Malik</surname><given-names>Owais Ahmed</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>PAPRSB Institute of Health Sciences, Universiti Brunei Darussalam</institution><addr-line>Core Residential, Tower 4, Room 201A, UBDCorp, Jalan Tungku Link</addr-line><addr-line>Bandar Seri Begawan</addr-line><country>Brunei Darussalam</country></aff><aff id="aff2"><institution>School of Digital Science, Universiti Brunei Darussalam</institution><addr-line>Bandar Seri Begawan</addr-line><country>Brunei Darussalam</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Debbarma</surname><given-names>Arindam</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Popa</surname><given-names>Elena</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Quazi</surname><given-names>Fardin</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Kondapally</surname><given-names>Saritha</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Hein Minn Tun, MBBS,MPH, PAPRSB Institute of Health Sciences, Universiti Brunei Darussalam, Core Residential, Tower 4, Room 201A, UBDCorp, Jalan Tungku Link, Bandar Seri Begawan, BE1410, Brunei Darussalam, 673 7428942; <email>23h8750@ubd.edu.bn</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>29</day><month>7</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e69678</elocation-id><history><date date-type="received"><day>05</day><month>12</month><year>2024</year></date><date date-type="rev-recd"><day>28</day><month>01</month><year>2025</year></date><date date-type="accepted"><day>28</day><month>01</month><year>2025</year></date></history><copyright-statement>&#x00A9; Hein Minn Tun, Hanif Abdul Rahman, Lin Naing, Owais Ahmed Malik. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 29.7.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e69678"/><abstract><sec><title>Background</title><p>Artificial intelligence&#x2013;based clinical decision support systems (AI-CDSSs) have enhanced personalized medicine and improved the efficiency of health care workers. Despite these opportunities, trust in these tools remains a critical factor for their successful integration into practice. Existing research lacks synthesized insights and actionable recommendations to guide the development of AI-CDSSs that foster trust among health care workers.</p></sec><sec><title>Objective</title><p>This systematic review aims to identify and synthesize key factors that influence health care workers&#x2019; trust in AI-CDSSs and to provide actionable recommendations for enhancing their trust in these systems.</p></sec><sec sec-type="methods"><title>Methods</title><p>We conducted a systematic review of published studies from January 2020 to November 2024, retrieved from PubMed, Scopus, and Google Scholar. Inclusion criteria focused on studies that examined health care workers&#x2019; perceptions, experiences, and trust in AI-CDSSs. Studies in non&#x2013;English languages and those unrelated to health care settings were excluded. Two independent reviewers followed the Cochrane Collaboration Handbook and PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 guidelines. Analysis was conducted using a developed data charter. The Critical Appraisal Skills Programme tool was applied to assess the quality of the included studies and to evaluate the risk of bias, ensuring a rigorous and systematic review process.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 27 studies met the inclusion criteria, involving diverse health care workers, predominantly in hospitalized settings. Qualitative methods were the most common (n=16, 59%), with sample sizes ranging from small focus groups to cohorts of over 1000 participants. Eight key themes emerged as pivotal in improving health care workers&#x2019; trust in AI-CDSSs: (1) System Transparency, emphasizing the need for clear and interpretable AI; (2) Training and Familiarity, highlighting the importance of knowledge sharing and user education; (3) System Usability, focusing on effective integration into clinical workflows; (4) Clinical Reliability, addressing the consistency and accuracy of system performance; (5) Credibility and Validation, referring to how well the system performs across diverse clinical contexts; (6) Ethical Consideration, examining medicolegal liability, fairness, and adherence to ethical standards;(7) Human Centric Design, pioritizing patient centered approaches; (8) Customization and Control, highlighting the need to tailor tools to specific clinical needs while preserving health care providers&#x2019; decision-making autonomy. Barriers to trust included algorithmic opacity, insufficient training, and ethical challenges, while enabling factors for health care workers&#x2019; trust in AI-CDSS tools were transparency, usability, and clinical reliability.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The findings highlight the need for explainable AI models, comprehensive training, stakeholder involvement, and human-centered design to foster health care workers&#x2019; trust in AI-CDSSs. Although the heterogeneity of study designs and lack of specific data limit further analysis, this review bridges existing gaps by identifying key themes that support trust in AI-CDSSs. It also recommends that future research include diverse demographics, cross-cultural perspectives, and contextual differences in trust across various health care professions.</p></sec></abstract><kwd-group><kwd>trust in artificial intelligence</kwd><kwd>decision support systems</kwd><kwd>health care workers</kwd><kwd>PRISMA</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The adoption of artificial intelligence (AI) in health care has a potentially transformative impact on health care workers by enabling advancements in diagnostics, treatment planning, and patient management, thereby improving the health care system. The increasing availability of digitalized health care data, along with technological advancements in machine learning and deep learning algorithms, has enhanced the potential of AI-based clinical decision support systems (CDSSs). These systems can assist can health care workers by predicting patient outcomes and recommending optimal interventions, contributing to personalized medicine and improved health care efficiency [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Despite these advancements, health care professionals&#x2019; trust in AI-based CDSS (AI-CDSS) tools remains a critical factor for their successful integration and effective use in clinical practice. Furthermore, hesitation persists, particularly among highly skilled professionals, regarding AI&#x2019;s ability to provide substantial clinical value [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>Trust is a complex construct that affects how health care workers interact with AI-driven systems, which are developed through the complex and opaque mathematical mechanisms of machine learning models [<xref ref-type="bibr" rid="ref2">2</xref>]. Trust holds value only when directed toward agents or systems that are genuinely reliable, as placing trust in untrustworthy sources can lead to severe, even life-threatening, consequences. It can be understood through 3 interconnected elements: belief in the truthfulness of claims (such as trusting the accuracy of advice), confidence in commitments (like relying on a bank to send monthly statements), and faith in competence (for example, trusting a dentist to carry out a procedure properly) [<xref ref-type="bibr" rid="ref5">5</xref>]. Without adequate trust, health care workers may disregard AI recommendations, undermining the potential benefits of AI in enhancing patient care and optimizing clinical workflow.</p><p>Clinicians&#x2019; concerns about the opacity of AI decision-making processes, the potential for algorithmic bias, and the fear of technology replacing human judgment can undermine trust in these systems [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. Furthermore, trust in AI-based systems is not a static concept; it evolves as health care workers interact with the technology and gain experience with its functionality and outcomes. Vereschak et al [<xref ref-type="bibr" rid="ref10">10</xref>] highlight the importance of integrating theoretical elements of trust, such as vulnerability, positive expectations, and attitude, into the understanding of human-AI trust. Trust can also be reflected in behavioral dimensions, including decision time, reliance on or accepting recommendations, and compliance behaviors such as requesting recommendations. These behaviors have been conceptualized as passive indicators of trust, such as immediate agreement, disagreement, or mild agreement with the system&#x2019;s recommendations, and can offer valuable insights into the level of trust and how it influences decision-making [<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>A growing body of research has explored trust in AI-CDSS tools from various perspectives, including those of clinicians, nurses, and pharmacists. These studies have examined trust through multiple lenses, ranging from algorithmic development and mathematical considerations, to the use of devil&#x2019;s advocate approaches with large language models such as ChatGPT, to qualitative explorations of health care workers&#x2019; perspectives through a sociotechnical lens. Other angles include AI confidence levels and the impact of technology-induced dehumanization in health care. Trust has also been studied in the context of upstream relationships among different stakeholders [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Several factors influencing trust have been identified, including transparency, explainability, interpretability, privacy, ethical concerns, and the actionability or contestability required by decision makers. Additionally, the attitudes, perceptions, and individual experiences of health care workers have also been recognized as critical elements shaping trust [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>Despite these findings, there appears to be a lack of synthesized insights and recommendations regarding the factors that influence health care workers&#x2019; trust in AI-CDSSs. Our study aims to fill this gap by systematically reviewing the literature to identify the constraints and facilitators of trust in AI-CDSSs. Guided by existing research, we intend to formulate practical recommendations for the design and implementation of AI systems that are trusted and accepted by health care practitioners. This research will contribute to the development of strategies that promote the use of AI-CDSSs in health care in a way that complements, rather than disrupts, clinical decision-making.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Review Design</title><p>Our systematic review follows the Cochrane Collaboration Handbook [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>] and reports findings in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 checklist [<xref ref-type="bibr" rid="ref16">16</xref>]. This review systematically consolidates findings from the past 5 years to address underexplored areas of health care workers&#x2019; trust in AI-CDSSs, identifying both enablers and barriers within trust dynamics [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. The Critical Appraisal Skills Programme (CASP) tool [<xref ref-type="bibr" rid="ref17">17</xref>] was used to assess the quality of included studies and the risk of bias.</p></sec><sec id="s2-2"><title>Literature Search Strategies</title><p>We conducted a systematic search of published studies focusing on trust in AI-CDSS between January 1, 2020, and November 30, 2024, guided by PRISMA guidelines with the PICO (population, intervention, comparison, and outcome) framework [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. This study period was chosen to reflect the advancements and increased investment in AI, particularly following the release of generative models such as ChatGPT in the health care sector, especially in the aftermath of the COVID-19 pandemic. Our sources included PubMed, Scopus, and Google Scholar. The search strategy used a combination of English keywords, including &#x201C;trust&#x201D; or &#x201C;acceptance&#x201D; or &#x201C;perception&#x201D; and &#x201C;artificial intelligence&#x201D; or &#x201C;AI&#x201D; and &#x201C;decision support systems&#x201D; or &#x201C;clinical decision support&#x201D; or &#x201C;AI-based decision support&#x201D; and &#x201C;healthcare workers&#x201D; or &#x201C;clinicians&#x201D; or &#x201C;nurses&#x201D; or &#x201C;medical professionals&#x201D; or &#x201C;healthcare providers.&#x201D; Publication date filters were applied to include only studies within the specified time frame. Additionally, we used a snowball strategy to identify further sources from the references of relevant full texts. Medical Subject Headings (MeSH) and free-text terms were used to maximize search sensitivity and ensure comprehensive coverage of relevant literature, as described in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-3"><title>Eligibility Criteria</title><p>We included research articles that explicitly described health care professionals&#x2019; trust, acceptance, or reliance on AI-CDSSs, specifically within clinical and primary care settings. Qualitative, quantitative, and mixed-method studies were all considered, provided they explored aspects of trust or acceptance among health care workers. We excluded studies unrelated to the relationship between trust in health care providers and AI-CDSSs. To maintain the scientific credibility of our review, we also excluded non&#x2013;peer-reviewed articles, editorials, opinion pieces, and other forms of nonresearch literature.</p></sec><sec id="s2-4"><title>Data Extraction</title><p>Two researchers (HMT and HAR) initially screened titles and abstracts to determine whether they met the inclusion criteria. After removing duplicates, full texts were reviewed to assess potential exclusion criteria. Any disagreements regarding eligibility criteria were resolved through discussion with another team member. The Mixed Methods Appraisal Tool was used to assess the quality of studies, enabling evaluation across diverse methodological approaches [<xref ref-type="bibr" rid="ref18">18</xref>]. We extracted key study details, including author, country of data origin, study design, and the type of AI method utilized. Additional information was systematically extracted for each study, focusing on the type of AI application, the role of health care workers, the study setting and location, the specific department or clinical focus, and the type of AI-based decision support system. We also recorded information on trust measurement tools, qualitative questions related to trust, assessed trust factors, trust-related outcomes, levels of trust observed, influencing factors, study limitations, conclusions or recommendations, and funding sources. Additionally, qualitative information, such as quotes, themes, and findings from interviews, focus groups, and open-ended survey responses, was extracted. The quality of the included studies was also evaluated based on the alignment between study objectives and results.</p></sec><sec id="s2-5"><title>Data Synthesis and Analysis</title><p>Data synthesis and analysis were conducted to systematically integrate and interpret the findings. Relevant data were organized into an evidence matrix using a standardized template in Google Sheets (Google LLC/Alphabet Inc). Advanced tools for systematic review and data extraction, including Zotero 6 (Corporation for Digital Scholarship), Elicit (Ought), and Rayyan (Qatar Computing Research Institute), were used to screen and analyze abstracts from 27 studies that met the inclusion and exclusion criteria. A comprehensive data charter was developed to summarize the characteristics of the included study [<xref ref-type="bibr" rid="ref19">19</xref>]. This data charter includes the year of publication, geographic location, study setting, characteristics of CDSSs, methods used to evaluate trust, descriptions of the AI-CDSSs, and evaluation of trust-related factors in AI-CDSSs. Furthermore, we extracted qualitative outcomes and corresponding codes related to trust in AI-CDSSs. The extracted data were organized and analyzed in Microsoft Excel, with recurrent patterns categorized into themes using mind-mapping techniques. Each theme was carefully interpreted and synthesized into enablers and barriers to trust in AI-CDSSs. Based on these themes, we developed actionable recommendations for practical implementation, policy, and further research by triangulating insights from thematic synthesis, extracted quotes, and relevant literature.</p></sec><sec id="s2-6"><title>Quality and Risk of Bias Assessment</title><p>The quality of qualitative studies and the qualitative components of mixed-methods studies were assessed using the CASP tool. Each question in the tool was evaluated with 1 of 3 responses: &#x201C;yes,&#x201D; &#x201C;no,&#x201D; or &#x201C;cannot determine.&#x201D; Rather than producing a summative score, the CASP tool provides an overall assessment, categorizing studies as &#x201C;not valuable,&#x201D; &#x201C;semivaluable,&#x201D; &#x201C;valuable,&#x201D; or &#x201C;very valuable,&#x201D; as documented in previous literature. These assessments were based on a judgmental approach, in which reviewers evaluated the relevance and contribution of each study to understanding interaction traits within the AI-clinician quality of interaction construct [<xref ref-type="bibr" rid="ref17">17</xref>]. Quality assessments were conducted independently by 2 reviewers (OAM and LN), with any disagreements resolved through consensus. This process ensured a rigorous and transparent evaluation of study quality.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Study Selection</title><p>The article selection process consisted of 2 phases: (1) a review of titles and abstracts and (2) a full-text review. <xref ref-type="fig" rid="figure1">Figure 1</xref> illustrates the study selection process. Initially, 333 records were identified from 3 databases: PubMed (69 records), Scopus (142 records), and Google Scholar (122 records). An independent reviewer screened these records to remove duplicates and articles deemed irrelevant based on titles and abstracts, resulting in 60 records advancing to the next stage. Further screening excluded 20 articles due to unsuitable study designs, leaving 40 for eligibility assessment. Following an in-depth full-text review and final discussions among the reviewers, 13 articles were excluded for lacking a focus on trust in AI-based decision support systems among health care workers. Ultimately, 27 studies were included in the final analysis.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flowchart for the study selection process. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e69678_fig01.png"/></fig></sec><sec id="s3-2"><title>Quality Assessment of Included Studies</title><p>A total of 23 studies were assessed using the CASP checklist for qualitative analysis (<xref ref-type="table" rid="table1">Table 1</xref>), while 4 studies used other methodologies. Among the 23 studies, the majority (n=19, 83%) received a &#x201C;Yes&#x201D; rating for most CASP criteria and were categorized as &#x201C;valuable&#x201D; or &#x201C;very valuable&#x201D; in the quality assessment. Studies categorized as &#x201C;semivaluable&#x201D; (n=4, 17%) were flagged for issues such as the inappropriate use of qualitative methods to measure nonsubjective outcomes, suboptimal sample recruitment strategies, or insufficient consideration of bias. Although quality assessment was not an inclusion criterion for this systematic review, it was conducted to provide an overview of the quality of the eligible literature. Consequently, studies rated as &#x201C;semivaluable&#x201D; were still included in the data analysis. While these studies were limited in methodological rigor and offered less robust insights into the tools being evaluated, they contributed unique perspectives on health care workers&#x2019; trust in AI-CDSS tools that were not captured in other included studies. The included studies also discussed several limitations, including small sample sizes and various biases, such as potential selection bias, cognitive biases (eg, anchoring bias), and interviewer bias commonly associated with qualitative research. Other limitations included inaccuracies in self-reported data, regional differences in AI exposure, participants&#x2019; familiarity with the study context, and a focus on specific AI solutions or decision domains, all of which may limit the generalizability of findings.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Critical Appraisal Skills Programme responses for each study included in the systematic review (n=23)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup>.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">1. Was there a clear statement of the aims of the research?</td><td align="left" valign="bottom">2. Is the qualitative methodology appropriate?</td><td align="left" valign="bottom">3. Was the research design appropriate to address the aims of the research?</td><td align="left" valign="bottom">4. Was the recruitment strategy appropriate to the aims of the research?</td><td align="left" valign="bottom">5. Were data collected in a way that addressed the research issue?</td><td align="left" valign="bottom">6. Has the relationship between the researcher and participants been adequately considered?</td><td align="left" valign="bottom">7. Have ethical issues been taken into consideration?</td><td align="left" valign="bottom">8. Was the data analysis sufficiently rigorous?</td><td align="left" valign="bottom">9. Is there a clear statement of findings?</td><td align="left" valign="bottom">10. How valuable is the research?</td></tr></thead><tbody><tr><td align="left" valign="top">Jacobs et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Semivaluable</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">No</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Micocci et al [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Henry et al [<xref ref-type="bibr" rid="ref3">3</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Choudhury et al [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Gunasekeran et al [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Semivaluable</td></tr><tr><td align="left" valign="top">Choudhury [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Ankolekar et al [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Van Biesen et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Sivaraman et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Semivaluable</td></tr><tr><td align="left" valign="top">Amann et al [<xref ref-type="bibr" rid="ref13">13</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Bach et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Burgess et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Anjara et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Jones et al [<xref ref-type="bibr" rid="ref5">5</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Very valuable</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Semivaluable</td></tr><tr><td align="left" valign="top">Chiang et al [<xref ref-type="bibr" rid="ref12">12</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Liaw et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Nair et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Yoon et al [<xref ref-type="bibr" rid="ref7">7</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr><tr><td align="left" valign="top">Zheng et al [<xref ref-type="bibr" rid="ref4">4</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Semivaluable</td></tr><tr><td align="left" valign="top">Vereschak et al [<xref ref-type="bibr" rid="ref11">11</xref>]</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cannot tell</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Valuable</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Choudhury [<xref ref-type="bibr" rid="ref36">36</xref>], Stacy et al [<xref ref-type="bibr" rid="ref2">2</xref>], York et al [<xref ref-type="bibr" rid="ref37">37</xref>], and Elareed et al [<xref ref-type="bibr" rid="ref38">38</xref>] are cross-sectional quantitative studies not included in the Critical Appraisal Skills Programme analysis.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Characteristics of Included Studies</title><p>Of the 27 included articles, summarized in <xref ref-type="table" rid="table2">Table 2</xref> and Figure S1 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>, most were published recently: 12 (44%) in 2023, followed by 8 (30%) in 2022, 4 (15%) in 2024, and 3 (11%) in 2021. Geographically, most studies were conducted in the United States (n=12, 44%), followed by Europe (n=7, 26%), multinational collaborations (n=3, 11%), the United Kingdom (n=2, 7%), and 1 (4%) study each from China, Singapore, and Egypt. Most studies were conducted in hospital settings (n=17, 63%), across departments such as emergency care, radiology, and oncology, while 5 (19%) studies were conducted in primary care settings and 5 (19%) studies spanned both hospital and primary care environments. Study designs included qualitative research (n=16, 59%), mixed-methods studies (n=6, 22%), quantitative cross-sectional surveys (n=4, 15%), and 1 (4%) comparative evaluation study assessing AI-generated versus human-generated suggestions for clinical decision support. The study populations encompassed a wide range of health care providers such as physicians, nurses, nurse practitioners, general practitioners, intensive care unit clinicians, pharmacists, ophthalmologists, oncologists, interdisciplinary teams, behavioral health specialists, and AI practitioners. Sample sizes varied from small focus groups to cohorts exceeding 1000 individuals.</p><p>The included studies featured a wide range of AI-CDSS tools, demonstrating their application across various clinical functions and specialties (<xref ref-type="table" rid="table2">Table 2</xref>). These systems employ advanced technologies such as machine learning, deep learning, reinforcement learning, and explainable AI to support diagnostics, treatment planning, and clinical decision-making. Examples include the AI-based blood utilization calculator for improving transfusion procedures, the Brilliant Doctor system for dermatological diagnosis, machine learning and reinforcement learning models for providing sepsis treatment recommendations in intensive care unit settings, and QRhythm for identifying optimal rhythm management strategies in atrial fibrillation. Additional innovations are AI-CDSS tools for detecting and managing diabetic retinopathy, glaucoma, and cataracts; ChatGPT-enhanced electronic health record alerts for medication optimization in diabetes; as well as systems for lung cancer relapse prediction, vancomycin dosing, cardiovascular risk prediction, trauma radiography, and asthma management.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Characteristics of included studies and evaluation of trust factors in AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>-CDSSs<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> among health care workers, including study design, population, location, method of evaluating trust, along with a description of AI-CDSSs (n=27).</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Study</td><td align="left" valign="top">Geography</td><td align="left" valign="top">Setting</td><td align="left" valign="top">Study design</td><td align="left" valign="top">Study population (number of participants)</td><td align="left" valign="top">Method of evaluating trust</td><td align="left" valign="top">Description of AI-CDSS</td><td align="left" valign="top">Evaluation of health care worker trust factor for AI-CDSS</td></tr></thead><tbody><tr><td align="left" valign="top">Jacobs et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">Multinational: the United Arab Emirates, Singapore, and Hong Kong</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Physicians (n=9) and nurse practitioners (n=1) who are primary care providers</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">Machine learning models used to provide prognostic predictions and support treatment selection for major depressive disorder.</td><td align="left" valign="top">&#x2022; Previous system utilization, including its use by other clinicians and validation through randomized controlled trials<break/>&#x2022; Level of training received</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">China</td><td align="left" valign="top">Primary care</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Clinicians with expertise in both Western and Traditional Chinese medicine (n=22)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">A deep learning and knowledge graph&#x2013;based AI-CDSS system (Brilliant Doctor).</td><td align="left" valign="top">&#x2022; The &#x201C;black-box&#x201D; nature of the AI algorithm and its lack of transparency in the recommendations<break/>&#x2022; Perceived threat to professional autonomy and decision-making, with the &#x201C;click-through&#x201D; approach disrupting workflows<break/>&#x2022; Insufficient training on system features and functionality, along with clinicians&#x2019; understanding</td></tr><tr><td align="left" valign="top">Micocci et al [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">United Kingdom</td><td align="left" valign="top">Primary care</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">General practitioners (n=50)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI system developed to support the diagnosis of dermatological conditions.</td><td align="left" valign="top">&#x2022; Accuracy of the AI system<break/>&#x2022; General practitioners&#x2019; familiarity with AI<break/>&#x2022; Previous experiences with similar technologies</td></tr><tr><td align="left" valign="top">Henry et al [<xref ref-type="bibr" rid="ref3">3</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Physicians (n=13) and nurses (n=7) worked at the emergency department, critical care, and general ward</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">A machine learning&#x2013;based system called Targeted Real-time Early Warning System, designed to alert for sepsis detection, evaluate patients, and support treatment management.</td><td align="left" valign="top">&#x2022; Direct experience with the system and observing its behavior over time<break/>&#x2022; Endorsement and recommendations from colleagues and experts<break/>&#x2022; Understanding the system&#x2019;s development and validation process<break/>&#x2022; Ability to customize the system and ask questions about its design</td></tr><tr><td align="left" valign="top">Choudhury et al [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Clinicians involved in blood transfusion decision-making (n=10)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">An AI-based blood utilization calculator designed to optimize blood transfusion practices.</td><td align="left" valign="top">&#x2022; Workload<break/>&#x2022; Usability<break/>&#x2022; Impact on decision-making<break/>&#x2022; Alignment with clinical judgment</td></tr><tr><td align="left" valign="top">Gunasekeran et al [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">Multinational: more than 70 countries</td><td align="left" valign="top">Primary care and hospital</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">Ophthalmologists (n=1176)</td><td align="left" valign="top">Likert scales and dichotomous questions</td><td align="left" valign="top">Various AI-based assistive tools and clinical decision support applications used in ophthalmology to detect and manage eye diseases, including diabetic retinopathy, glaucoma, age-related macular degeneration, and cataract.</td><td align="left" valign="top">&#x2022; Usability<break/>&#x2022; Acceptable error levels and concerns over medical liability<break/>&#x2022; Professional acceptance<break/>&#x2022; Organizational support</td></tr><tr><td align="left" valign="top">Choudhury et al [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">Clinicians who used the blood utilization calculator (n=119)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">An AI-based blood utilization calculator.</td><td align="left" valign="top">&#x2022; Perception of AI<break/>&#x2022; Expectancy (effort and performance expectancy)<break/>&#x2022; Perceived risk</td></tr><tr><td align="left" valign="top">Ankolekar et al [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">The Netherlands</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">Patients with non-small-cell lung cancer (n=257) treated at a single radiotherapy clinic, and lung cancer specialists (n=9)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">CDSSs developed to support shared decision-making in lung cancer prognosis.</td><td align="left" valign="top">&#x2022; Lack of external validation<break/>&#x2022; Clinician experience<break/>&#x2022; Perceived usefulness of CDSSs</td></tr><tr><td align="left" valign="top">Stacy et al [<xref ref-type="bibr" rid="ref2">2</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Quantitative study</td><td align="left" valign="top">Health care workers involved included clinicians who manage patients with atrial fibrillation (n=33)</td><td align="left" valign="top">Likert scale (0&#x2010;5)</td><td align="left" valign="top">A 2-stage machine learning model&#x2013;based tool, the QRhythm model, designed to identify the optimal rhythm management strategy.</td><td align="left" valign="top">&#x2022; Accuracy of the AI recommendations<break/>&#x2022; Transparency of the AI processes<break/>&#x2022; Clinicians&#x2019; previous experiences with AI</td></tr><tr><td align="left" valign="top">Choudhury et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Quantitative study</td><td align="left" valign="top">Physician residents and fellows (n=111) and nurses (n=8)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">An AI-based decision support system known as the blood utilization calculator.</td><td align="left" valign="top">&#x2022; Perceived risk<break/>&#x2022; Expectancy<break/>&#x2022; Acceptance of the AI system</td></tr><tr><td align="left" valign="top">Van Biesen et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Belgium</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Physicians (n=30)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-CDSS tools integrated into electronic health records.</td><td align="left" valign="top">&#x2022; Transparency<break/>&#x2022; Reliability<break/>&#x2022; Perceived accuracy of the CDSSs</td></tr><tr><td align="left" valign="top">Sivaraman et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">Intensive care unit clinicians (n=24)</td><td align="left" valign="top">Likert scale (0&#x2010;10)</td><td align="left" valign="top">A reinforcement learning model&#x2013;based tool called the &#x201C;AI Clinician,&#x201D; designed to provide interpretable treatment recommendations for patients with sepsis in the intensive care unit.</td><td align="left" valign="top">&#x2022; The credibility of the developers who created the AI-based tool<break/>&#x2022; The perceived soundness of the methodology used to develop the tool</td></tr><tr><td align="left" valign="top">Amann et al [<xref ref-type="bibr" rid="ref13">13</xref>]</td><td align="left" valign="top">Germany and Switzerland</td><td align="left" valign="top">Primary care</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Health care professionals, including physicians (n=7), occupational therapists (n=1), physiotherapists (n=4), neuropsychologists (n=2), stroke survivors (n=14), and family members (n=6)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-CDSS tools designed to act as administrative assistants for routine tasks and to aid in the diagnosis and treatment of complex stroke cases.</td><td align="left" valign="top">&#x2022; Concerns that AI may lead to dehumanization in health care and erode patient-clinician trust</td></tr><tr><td align="left" valign="top">Bach et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">Denmark</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Ophthalmologists (n=7)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI system for detecting diabetic retinopathy by analyzing color-coded assessments of fundus images and optical coherence tomography scans to determine the presence and severity of lesions.</td><td align="left" valign="top">&#x2022; Accuracy and reliability of AI assessments, including its ability to minimize false positives/negatives<break/>&#x2022; Failure of the AI system to detect severe abnormalities beyond its intended scope<break/>&#x2022; Limitations in the AI system&#x2019;s performance due to factors such as image quality</td></tr><tr><td align="left" valign="top">Burgess et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Primary care and hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Primary care provider (n=14), nurse practitioner/physician assistant (n=18), endocrinologist (n=5), pharmacist (n=2), and internal medicine (n=2)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">A machine learning model trained on a large dataset of 141,625 patients with type 2 diabetes mellitus to optimize medication selection and predict the relative efficacy of different drug regimens in reducing hemoglobin A<sub>1c</sub> levels.</td><td align="left" valign="top">&#x2022; Comparison of AI-CDSS tools with the &#x201C;gold standard&#x201D; of randomized controlled trials in generating insights<break/>&#x2022; Clinicians&#x2019; understanding of how the insights are generated and which outcomes the system is designed to optimize<break/>&#x2022; Clinicians&#x2019; trust in the data, such as claims data, used to train the AI model</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Comparative evaluation</td><td align="left" valign="top">Clinicians (n=5)</td><td align="left" valign="top">Likert scale (0&#x2010;5)</td><td align="left" valign="top">ChatGPT, a large language model by OpenAI, used to improve CDSS alerts in electronic health records.</td><td align="left" valign="top">&#x2022; Understanding<break/>&#x2022; Relevance and clarity of AI suggestions<break/>&#x2022; Usefulness<break/>&#x2022; Acceptance<break/>&#x2022; Workflow impact<break/>&#x2022; Redundancy<break/>&#x2022; Potential for bias</td></tr><tr><td align="left" valign="top">Anjara et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Spain</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Oncologists with specialized training in treating lung cancer (n=10)</td><td align="left" valign="top">Think-aloud protocol</td><td align="left" valign="top">Explainable AI system based on a graph representation learning model for predicting lung cancer relapse.</td><td align="left" valign="top">&#x2022; Perception of clarity<break/>&#x2022; Credibility and utility<break/>&#x2022; Information overload and the presence of example-based explanation<break/>&#x2022; System&#x2019;s alignment with clinical decision-making needs</td></tr><tr><td align="left" valign="top">Jones et al [<xref ref-type="bibr" rid="ref5">5</xref>]</td><td align="left" valign="top">Multinational: Belgium, the United Kingdom, Italy, and China</td><td align="left" valign="top">Primary care and hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Physician (n=24)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-powered CDSS used in the context of ophthalmology (ie, clinical care specializing in eye and vision health).</td><td align="left" valign="top">&#x2022; Perception of clinicians&#x2019; control over decision-making<break/>&#x2022; Medical errors<break/>&#x2022; Legal responsibility/liability</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Critical care pharmacists (n=13)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-CDSS tools designed to facilitate vancomycin dosing for hospitalized patients.</td><td align="left" valign="top">&#x2022; Accuracy of recommendations<break/>&#x2022; Rationale behind dosing<break/>&#x2022; Transparency of the AI model<break/>&#x2022; The black-box nature of AI recommendations<break/>&#x2022; Complexity of algorithms</td></tr><tr><td align="left" valign="top">York et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">United Kingdom</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Quantitative study</td><td align="left" valign="top">Clinicians with varying levels of training, including foundation year 1 (n=108), foundation year 2 (n=28), specialty trainee/core trainee 1&#x2010;2 (n=35), specialty trainee 3/specialty registrar or above (n=49), and medical students (n=77)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-CDSS tools applied in the development of skeletal radiography for trauma.</td><td align="left" valign="top">&#x2022; Knowledge of AI<break/>&#x2022; Confidence in interpreting radiographs<break/>&#x2022; Level of training and experience of the clinician</td></tr><tr><td align="left" valign="top">Chiang et al [<xref ref-type="bibr" rid="ref12">12</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Primary care</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Ophthalmologists and optometrists from the University of California, San Diego (n=10)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-based decision support system designed to predict the risk of cardiovascular disease.</td><td align="left" valign="top">&#x2022; Accuracy<break/>&#x2022; Reliability<break/>&#x2022; Usefulness</td></tr><tr><td align="left" valign="top">Liaw et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Primary care and hospital</td><td align="left" valign="top">Mixed-method study</td><td align="left" valign="top">Physician (n=24)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">Diabetes AI prediction tool designed to predict the risk of poor diabetes control.</td><td align="left" valign="top">&#x2022; Accuracy of the tool<break/>&#x2022; Transparency of the AI processes<break/>&#x2022; Clinicians&#x2019; familiarity with AI</td></tr><tr><td align="left" valign="top">Nair et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Sweden</td><td align="left" valign="top">Primary care and hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Physician (n=14); nurse practitioner/nurse/physician assistant (n=3); behavioral specialist (n=1); social worker(n=1); and other staff including front desk, administrative, or medical assistant (n=3)</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-based decision support tool designed to reduce the risk of readmission in patients with heart failure.</td><td align="left" valign="top">&#x2022; Stakeholder engagement<break/>&#x2022; Perceived benefits<break/>&#x2022; Transparency</td></tr><tr><td align="left" valign="top">Yoon et al [<xref ref-type="bibr" rid="ref7">7</xref>]</td><td align="left" valign="top">Singapore</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Clinicians (n=13) in 4 focus groups</td><td align="left" valign="top">Focus group discussion</td><td align="left" valign="top">AI-enabled prescription advisory tool.</td><td align="left" valign="top">&#x2022; Interpretability of AI-generated recommendations<break/>&#x2022; Transparency of the system<break/>&#x2022; Clinicians&#x2019; previous experiences with AI</td></tr><tr><td align="left" valign="top">Zheng et al [<xref ref-type="bibr" rid="ref4">4</xref>]</td><td align="left" valign="top">United States</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">Clinicians (n=14) who treated pediatric patients with asthma at 2 outpatient facilities</td><td align="left" valign="top">How-Might-We questions</td><td align="left" valign="top">Machine learning&#x2013;based CDSS, the Asthma Guidance and Prediction System, for asthma management.</td><td align="left" valign="top">&#x2022; Accuracy<break/>&#x2022; Reliability<break/>&#x2022; Explainability of the AI tool</td></tr><tr><td align="left" valign="top">Elareed et al [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Egypt</td><td align="left" valign="top">Hospital</td><td align="left" valign="top">Quantitative study</td><td align="left" valign="top">Physician (n=249)</td><td align="left" valign="top">Likert scale (0&#x2010;5)</td><td align="left" valign="top">General AI applications in health care, including potential uses in disease management and treatment.</td><td align="left" valign="top">&#x2022; Job replacement by AI<break/>&#x2022; Perceived usefulness<break/>&#x2022; Reduction in workload &#x2022; Impact on physician-patient relationship<break/>&#x2022; AI to handle patient data responsibly</td></tr><tr><td align="left" valign="top">Vereschak et al [<xref ref-type="bibr" rid="ref11">11</xref>]</td><td align="left" valign="top">France and Germany</td><td align="left" valign="top">Primary care</td><td align="left" valign="top">Qualitative study</td><td align="left" valign="top">AI practitioners, including bioengineers and researchers (n=1) and others (n=6), and AI decision participants, including a medical student (n=1) and others (n=6).</td><td align="left" valign="top">Semistructured qualitative interview</td><td align="left" valign="top">AI-assisted decision-making systems, particularly those employing machine learning techniques.</td><td align="left" valign="top">&#x2022; AI transparency<break/>&#x2022; AI literacy<break/>&#x2022; Interpersonal relationships between stakeholders (developer and user)<break/>&#x2022; The complexity of tasks</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>CDSS: clinical decision support system.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4"><title>Factors Influencing Health Care Workers&#x2019; Trust in AI-CDSS Tools</title><p>To analyze health care workers&#x2019; trust in AI-CDSS tools, we identified the methods used across 27 studies to assess trust-related elements. The majority employed semistructured qualitative interviews (n=19, 70%), followed by Likert scales (n=4, 15%), focus group discussions (n=1, 4%), How-Might-We questions (n=1, 4%), Likert scales combined with dichotomous questions (n=1, 4%), and the think-aloud protocol (n=1, 4%; <xref ref-type="table" rid="table2">Table 2</xref>). The assessment of trust in AI-CDSS tools revealed various factors that must be addressed to strengthen trust among health care workers (<xref ref-type="table" rid="table2">Table 2</xref>). Factors described in the study include experience with the AI system, colleagues&#x2019; recommendations, results from randomized controlled trials, and clinicians&#x2019; direct experience with the system over time. Transparency, accuracy, and the reliability of AI recommendations were identified as critical, with recurring concerns about the &#x201C;black-box&#x201D; nature of algorithms and the lack of clarity regarding how insights are generated. Additional factors influencing trust included perceived or actual risks, ease of use, organizational fit, and alignment with clinical judgment. Health care workers emphasized the importance of adequate training, customizable features, and the credibility of system developers. Trust perceptions were also shaped by considerations such as workload impact, acceptable error thresholds, and concerns around medical liability. Stakeholder involvement and familiarity with AI systems were described as contributing positively to trust in AI-CDSS tools. However, concerns about job displacement and the potential dehumanization of care emerged as significant challenges to fostering trust in these technologies.</p></sec><sec id="s3-5"><title>Insights Into Health Care Workers&#x2019; Trust in AI</title><p>The synthesis of study findings on trust in AI-CDSSs revealed 8 key thematic insights (<xref ref-type="table" rid="table3">Table 3</xref> and <xref ref-type="fig" rid="figure2">Figure 2</xref>). These include (1) system transparency, which emphasizes the need for clear and interpretable AI systems; (2) training and familiarity, which highlights the importance of educating and familiarizing health care workers with AI-CDSS; (3) system usability, which focuses on seamless integration into clinical workflows; (4) clinical reliability, which stresses the need for consistent and accurate system performance; (5) credibility and validation, which describe the importance of system validation across diverse clinical contexts; (6) ethical consideration, which examines issues such as medicolegal liability, fairness, and adherence to ethical standards ,(7) human centric design, which focus on piroitizing patient centered approaches in design and finally, (8) customization and control, which reflect the need for AI tools to adapt to specific clinical needs while ensuring health care providers retain decision-making autonomy.</p><p>These themes were explored through the enablers and barriers that influence health care workers&#x2019; trust in AI-CDSS. Among the enablers, prior system use and validation through randomized controlled trials were cited as key factors that boosted confidence in the AI systems. Familiarity and training with AI tools further strengthened clinicians&#x2019; trust, empowering them to make informed decisions. Additionally, observing the system&#x2019;s performance over time and receiving endorsements from colleagues and domain experts contributed significantly to trust building. Furthermore, system usability, alignment with clinical judgment, and the ability to reduce workload emerged as important factors positively influencing trust. Transparency in the AI development process and the perceived credibility of the developers also played a critical role in fostering confidence. Finally, the explainability and interpretability of AI recommendations, along with the ability to customize the system and seek clarification, offered clinicians a greater sense of control, further enhancing trust.</p><p>However, the study revealed several barriers that erode trust in AI-CDSS. A major concern was the black-box nature of the AI algorithm, which renders recommendations opaque and difficult to interpret. Clinicians also expressed concerns about inadequate training, which diminishes their understanding and confidence in using these systems effectively. Additional barriers included workflow disruptions, perceived threats to professional autonomy, and doubts regarding the accuracy and reliability of AI-generated recommendations. Ethical considerations, such as fears of dehumanization in patient care and perceived risks of job replacement, added further complexity to trust in AI-CDSS. Concerns were also raised about the efficacy of these systems, particularly due to inadequate external validation and limited generalizability to diverse clinical contexts. Lastly, trust was further undermined by unresolved issues related to medical liability, potential algorithmic biases, and broader ethical risks.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Eight thematic areas influencing health care workers&#x2019; trust in AI<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup>-CDSS<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup>, along with enablers, barriers, and recommendations (n=27).</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Theme</td><td align="left" valign="top">Enablers</td><td align="left" valign="top">Barriers</td><td align="left" valign="top">Recommendations</td></tr></thead><tbody><tr><td align="left" valign="top">System Transparency</td><td align="left" valign="top">&#x2022; Prior system use and validation through randomized controlled trials</td><td align="left" valign="top">&#x2022; Lack of transparency in AI algorithms (&#x201C;black-box&#x201D; nature)<break/>&#x2022; Unclear recommendations</td><td align="left" valign="top">&#x2022; Use interpretable algorithms<break/>&#x2022; Provide clear, actionable recommendations</td></tr><tr><td align="left" valign="top">Training and Familiarity</td><td align="left" valign="top">&#x2022; Training and experience with the AI system<break/>&#x2022; Improved confidence and familiarity</td><td align="left" valign="top">&#x2022; Insufficient training on system functionality</td><td align="left" valign="top">&#x2022; Implement comprehensive training programs to build user confidence and understanding</td></tr><tr><td align="left" valign="top">System Usability</td><td align="left" valign="top">&#x2022; Direct observation of system behavior<break/>&#x2022; Endorsements from colleagues</td><td align="left" valign="top">&#x2022; Workflow disruption<break/>&#x2022; Perceived threat to professional autonomy (eg, &#x201C;click-through&#x201D; processes)</td><td align="left" valign="top">&#x2022; Conduct hands-on training<break/>&#x2022; Facilitate peer-led workshops to improve usability</td></tr><tr><td align="left" valign="top">Clinical Reliability</td><td align="left" valign="top">&#x2022; Usability aligned with clinical judgment<break/>&#x2022; Reduced workload</td><td align="left" valign="top">&#x2022; Concerns about accuracy and reliability of AI recommendations</td><td align="left" valign="top">&#x2022; Validate systems through randomized trials and real-world studies</td></tr><tr><td align="left" valign="top">Credibility and Validation</td><td align="left" valign="top">&#x2022; Perceived robustness of AI development methods</td><td align="left" valign="top">&#x2022; Limited external validation<break/>&#x2022; Poor generalizability to diverse clinical settings</td><td align="left" valign="top">&#x2022; Ensure external validation<break/>&#x2022; Test across varied health care settings to build trust</td></tr><tr><td align="left" valign="top">Ethical Considerations</td><td align="left" valign="top">&#x2022; Credibility of developers<break/>&#x2022; Engagement with stakeholders</td><td align="left" valign="top">&#x2022; Medical liability concerns<break/>&#x2022; Fear of clinical errors</td><td align="left" valign="top">&#x2022; Clarify legal responsibilities<break/>&#x2022; Ensure strong validation to reduce liability risk</td></tr><tr><td align="left" valign="top">Human-Centric Design</td><td align="left" valign="top">&#x2022; Explainable, interpretable AI recommendations</td><td align="left" valign="top">&#x2022; Concerns about dehumanization of care<break/>&#x2022; Threats to patient-clinician relationships</td><td align="left" valign="top">&#x2022; Design AI to support (not replace) human judgment<break/>&#x2022; Prioritize patient-centered care</td></tr><tr><td align="left" valign="top">Customization and Control</td><td align="left" valign="top">&#x2022; Clinicians&#x2019; ability to customize the system<break/>&#x2022; Ability to ask questions</td><td align="left" valign="top">&#x2022; Perceived risks: bias, job displacement, and ethical concerns</td><td align="left" valign="top">&#x2022; Involve stakeholders in the design process<break/>&#x2022; Address ethical issues and bias transparently</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table3fn2"><p><sup>b</sup>CDSS: clinical decision support system.</p></fn></table-wrap-foot></table-wrap><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Overview of eight thematic areas related to healthcare workers&#x2019; trust in AI-based clinical decision support systems (AI-CDSS).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e69678_fig02.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The systematic review included 27 studies analyzing health care workers&#x2019; trust in AI-CDSSs. The article selection process began with 333 records and, after rigorous screening based on inclusion criteria, was narrowed to 27 studies. Most studies were recent (n=12, 44% from 2023) and conducted in hospital settings across diverse health care worker groups. Qualitative methods dominated (n=16, 59%), with sample sizes ranging from small focus groups to cohorts of over 1000 participants. The synthesis of findings highlights 8 thematic areas influencing health care workers&#x2019; trust in AI-CDSS tools, encompassing both enablers and barriers. Key enablers include prior system validation, transparency, training, usability, and alignment with clinical judgment. By contrast, barriers such as algorithmic opacity, inadequate training, workflow disruptions, and ethical concerns undermine trust. Based on these themes, we provide actionable recommendations for the design and implementation of AI systems that are more likely to be trusted and accepted by health care practitioners (<xref ref-type="table" rid="table3">Table 3</xref>).</p></sec><sec id="s4-2"><title>System Transparency</title><p>Fostering trust in AI-CDSSs among health care workers involves enhancing the transparency of AI algorithms and providing clear, practical, and actionable recommendations for clinical decision-making [<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref41">41</xref>]. According to Nasarian et al [<xref ref-type="bibr" rid="ref42">42</xref>], black-box models pose challenges in CDSS due to their limited interpretability, especially when compared with simpler white-box models that offer transparent results without requiring additional parameters. While black-box models often deliver high accuracy, their opacity can lead to confusion about how decisions are made&#x2014;and, in some cases, may result in overreliance on the system, particularly among less experienced clinicians who may lack the expertise to interpret AI outputs effectively [<xref ref-type="bibr" rid="ref22">22</xref>]. Gray-box models, positioned between the extremes of black-box and white-box models, offer a balance between complexity and interpretability, provided they are designed effectively [<xref ref-type="bibr" rid="ref42">42</xref>]. Interpretability should be incorporated throughout the entire process, from data preprocessing and model selection to postmodeling phases. However, most existing AI-CDSS tools focus primarily on postmodeling explainability. To reduce skepticism surrounding the &#x201C;black-box&#x201D; nature of AI systems, developers should ensure transparency in the rationale behind recommendations at every stage of the development pipeline [<xref ref-type="bibr" rid="ref42">42</xref>].</p></sec><sec id="s4-3"><title>Training and Familiarity</title><p>To improve trust in AI-CDSS, comprehensive training programs on AI tools for health care workers play a vital role [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. These programs not only build familiarity with the systems but also enhance users&#x2019; confidence in their reliability and functionality. Dlugatch et al [<xref ref-type="bibr" rid="ref6">6</xref>] discussed the impact of AI on health care workers. As AI technology begins to surpass human capabilities, the epistemic authority of medical practitioners risks being undermined, challenged, or even supplanted [<xref ref-type="bibr" rid="ref6">6</xref>]. This may lead health care professionals to view AI-CDSS tools as replacements rather than assistants. To address these challenges, training programs should educate health care workers on how AI systems are developed, including their capabilities, limitations, and potential pitfalls.</p></sec><sec id="s4-4"><title>System Usability</title><p>To improve system usability and alignment with clinical judgment, hands-on training and peer-led workshops should be conducted [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. These approaches not only enhance health care workers&#x2019; understanding of AI systems but also improve their practical usability. According to Task Technology-Fit (TTF) theory, users are more likely to adopt a technology only if it aligns with their tasks and improves performance. However, external influences and uncertainty surrounding AI can introduce biases that either encourage or deter clinicians from adopting such technologies in the future [<xref ref-type="bibr" rid="ref25">25</xref>]. This highlight the importance of peer-to-peer sharing of experiences with AI-CDSS tools.</p></sec><sec id="s4-5"><title>Clinical Reliability</title><p>To ensure clinical reliability, AI systems should demonstrate accuracy and consistency through real-world testing and randomized controlled trials [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. Micocci et al [<xref ref-type="bibr" rid="ref22">22</xref>] noted that AI systems, like human clinicians, are inherently imperfect and should be designed to complement, not replace, the clinician&#x2019;s holistic understanding of each clinical scenario. While AI can offer valuable decision support, the ultimate responsibility for diagnostic resilience lies with the clinician, who retains the authority to accept or reject AI recommendations [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s4-6"><title>Credibility and Validation</title><p>Trust in AI-CDSS can be further fostered through external validation of the system in diverse clinical settings, which can help demonstrate the soundness of the AI methodology used in its development [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>]. Nair et al [<xref ref-type="bibr" rid="ref35">35</xref>] mentioned that clinicians express fatigue from the integration of AI-based tools into workflows, especially when organizations are reluctant to discontinue ineffective technologies. This underscores the crucial role of tool developers in thoughtfully managing and thoroughly validating systems across varied contexts to avoid adding further burden to health care workers.</p></sec><sec id="s4-7"><title>Human-Centric Design</title><p>The importance of human-centric design cannot be overstated in fostering trust in AI-CDSS [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>, <xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref> ]. Amann et al [<xref ref-type="bibr" rid="ref13">13</xref>] raised concerns about technology-induced dehumanization in patient care and its impact on the patient-clinician relationship. Sivaraman et al [<xref ref-type="bibr" rid="ref28">28</xref>] and Jacobs et al [<xref ref-type="bibr" rid="ref5">5</xref>] discussed the important role of a sociotechnical lens in designing AI-CDSS, emphasizing the need to integrate environmental and social factors into system development. Furthermore, Alruwaili et al [<xref ref-type="bibr" rid="ref8">8</xref>] discussed that health care professionals, such as nurses, have varying concerns about AI&#x2019;s impact on the human aspect of care, while others recognize its potential benefits. This highlights the importance of incorporating humanistic elements in the design of AI-CDSS as supportive tools that enhance, rather than detract from, patient care.</p></sec><sec id="s4-8"><title>Ethical Concerns and Guidelines</title><p>Clear guidelines on roles and responsibilities, along with robust validation of AI tools, will address liability related to ethical concerns, which will help alleviate concerns and build trust [<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref61">61</xref> ]. Gunasekeran et al [<xref ref-type="bibr" rid="ref24">24</xref>] and Jones et al [<xref ref-type="bibr" rid="ref5">5</xref>] noted that health care workers fear the medicolegal impact of AI-CDSS systems. Providing explicit guidance on the capabilities of AI-CDSS and clearly delineating the roles and responsibilities of health care workers can further help mitigate concerns related to medical errors and liability. Ethical AI frameworks, such as the European Commission&#x2019;s Ethics Guidelines for Trustworthy AI, the EU AI Act, and the OECD&#x2019;s AI Ethics Guidelines, offer specific guidance for the development of AI-CDSS in the health care sector [<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref64">64</xref>]. These frameworks not only help reduce ethical concerns but also promote human-centric design, which can enhance health care workers&#x2019; trust in AI-CDSS.</p></sec><sec id="s4-9"><title>Customization and Control</title><p>Trust in AI-CDSS can be fostered through collaboration, coordination, and meaningful stakeholder engagement during system design, helping to eliminate ethical concerns and fears of job replacement among health care workers [<xref ref-type="bibr" rid="ref65">65</xref>-<xref ref-type="bibr" rid="ref72">72</xref>]. Chiang et al [<xref ref-type="bibr" rid="ref12">12</xref>] emphasized the importance of securing support from a variety of stakeholders, such as organizational leadership and end users, early in the development process to improve trust in AI-based tools. Ball et al [<xref ref-type="bibr" rid="ref73">73</xref>] also highlighted the role of collaboration and continuous communication through a &#x201C;human-in-the-loop&#x201D; approach, which integrates human expertise and addresses the limitations of AI algorithms. Involving direct end users, such as health care workers, during the development phase enables them to better understand the supportive role of AI-CDSS, rather than perceiving it as a threat to their jobs. Furthermore, engaging a range of stakeholders can help reduce ethical concerns by raising possible issues, such as harm to patients, early in the process. This, in turn, allows developers to make necessary modifications and improve trust in the implementation of AI-CDSS tools [<xref ref-type="bibr" rid="ref11">11</xref>].</p></sec><sec id="s4-10"><title>Limitations of the Study</title><p>This systematic review has certain limitations. First, it included only studies published in English and did not account for AI system studies from nonindexed journals, which may limit the relevance of the findings to non&#x2013;English-speaking or unpublished research. Second, the quality assessment was conducted using the CASP checklist, which evaluates only the qualitative elements of included studies, regardless of their overall design. This may have limited the generalizability of findings derived from nonqualitative studies. Third, although we aimed to conduct a meta-analysis, including a subanalysis or comparative analysis, we were unable to do so due to the high level of heterogeneity across studies and the lack of detailed demographic information. Additionally, sufficient data on health care roles and geographic contexts associated with the qualitative quotes and outcomes hindered our ability to conduct a comparative analysis. Lastly, the study was formative in nature, with categories and components generated through a subjective synthesis process, which may introduce interpretive bias. Despite these limitations, the synthesis and recommendations from this study help bridge existing gaps and provide specific themes to foster health care workers&#x2019; trust in AI-CDSS. Future studies should consider incorporating more diverse demographic data, performing cross-cultural studies, and exploring contextual differences in trust across various health care professional groups to address these gaps more comprehensively.</p></sec><sec id="s4-11"><title>Conclusions</title><p>Our systematic review of 27 studies identifies 8 key themes influencing health care workers&#x2019; trust in AI-CDSS tools. We highlight important enabling factors such as transparency, training, usability, clinical reliability, and alignment with clinical judgment. Conversely, barriers include algorithmic obscurity, inadequate training, and ethical concerns. Based on these findings, we recommend prioritizing the development of transparent AI models, implementing comprehensive training initiatives, and conducting practical workshops with real-life testing to foster sustained trust in AI-CDSS among health care workers. Moreover, integrating human-centered design and addressing ethical considerations are crucial to ensuring that AI tools enhance, rather than hinder, the patient-health care worker relationship. Despite limitations, such as the exclusion of non-English studies, heterogeneity in study designs, and a lack of detailed data, the analysis is limited in conducting further exploration. Nevertheless, it bridges existing gaps and provides specific themes to foster the trust of health care workers in AI-CDSS. The identified thematic areas, along with our recommendations, establish a foundation for forthcoming research and development of AI-based tools to ensure that AI-CDSS are efficient, reliable, and trustworthy for health care workers.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available due to organizational intellectual property regulations.</p></sec></notes><fn-group><fn fn-type="con"><p>HMT was responsible for conceptualization, data curation, formal analysis, investigation, methodology, resources, software, validation, visualization, and writing the original draft. HAR contributed to methodology development, supervision, data validation, and writing, reviewing, and editing the manuscript. OAM provided supervision and data validation. LN contributed through supervision, data validation, and writing, reviewing, and editing the manuscript. All authors approved the final content of the manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CASP</term><def><p>Critical Appraisal Skills Programme</p></def></def-item><def-item><term id="abb3">CDSS</term><def><p>clinical decision support system</p></def></def-item><def-item><term id="abb4">MeSH</term><def><p>Medical Subject Headings</p></def></def-item><def-item><term id="abb5">PICO</term><def><p>population, intervention, comparison, and outcome</p></def></def-item><def-item><term id="abb6">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Rezaeian</surname><given-names>O</given-names> </name><name name-style="western"><surname>Bayrak</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name></person-group><article-title>An architecture to support graduated levels of trust for cancer diagnosis with AI</article-title><year>2024</year><conf-name>International Conference on Human-Computer Interaction Communications in Computer and Information Science 2119 CCIS</conf-name><conf-date>Jun 29 to Jul 4, 2024</conf-date><conf-loc>Washington, DC</conf-loc><fpage>344</fpage><lpage>351</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-61966-3_37</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stacy</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>R</given-names> </name><name name-style="western"><surname>Barrett</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Qualitative evaluation of an artificial intelligence-based clinical decision support system to guide rhythm management of atrial fibrillation: survey study</article-title><source>JMIR Form Res</source><year>2022</year><month>08</month><day>11</day><volume>6</volume><issue>8</issue><fpage>e36443</fpage><pub-id pub-id-type="doi">10.2196/36443</pub-id><pub-id pub-id-type="medline">35969422</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Henry</surname><given-names>KE</given-names> </name><name name-style="western"><surname>Kornfield</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sridharan</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Human-machine teaming is key to AI adoption: clinicians&#x2019; experiences with a deployed machine learning system</article-title><source>NPJ Digit Med</source><year>2022</year><month>07</month><day>21</day><volume>5</volume><issue>1</issue><fpage>97</fpage><pub-id pub-id-type="doi">10.1038/s41746-022-00597-7</pub-id><pub-id pub-id-type="medline">35864312</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zheng</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ohde</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Overgaard</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>Clinical needs assessment of a machine learning-based asthma management tool: user-centered design approach</article-title><source>JMIR Form Res</source><year>2024</year><month>01</month><day>15</day><volume>8</volume><issue>1</issue><fpage>e45391</fpage><pub-id pub-id-type="doi">10.2196/45391</pub-id><pub-id pub-id-type="medline">38224482</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jones</surname><given-names>C</given-names> </name><name name-style="western"><surname>Thornton</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wyatt</surname><given-names>JC</given-names> </name></person-group><article-title>Artificial intelligence and clinical decision support: clinicians&#x2019; perspectives on trust, trustworthiness, and liability</article-title><source>Med Law Rev</source><year>2023</year><month>11</month><day>27</day><volume>31</volume><issue>4</issue><fpage>501</fpage><lpage>520</lpage><pub-id pub-id-type="doi">10.1093/medlaw/fwad013</pub-id><pub-id pub-id-type="medline">37218368</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dlugatch</surname><given-names>R</given-names> </name><name name-style="western"><surname>Georgieva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kerasidou</surname><given-names>A</given-names> </name></person-group><article-title>AI-driven decision support systems and epistemic reliance: a qualitative study on obstetricians&#x2019; and midwives&#x2019; perspectives on integrating AI-driven CTG into clinical decision making</article-title><source>BMC Med Ethics</source><year>2024</year><month>01</month><day>6</day><volume>25</volume><issue>1</issue><fpage>6</fpage><pub-id pub-id-type="doi">10.1186/s12910-023-00990-1</pub-id><pub-id pub-id-type="medline">38184595</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yoon</surname><given-names>S</given-names> </name><name name-style="western"><surname>Goh</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>PC</given-names> </name><etal/></person-group><article-title>Assessing the utility, impact, and adoption challenges of an artificial intelligence-enabled prescription advisory tool for type 2 diabetes management: qualitative study</article-title><source>JMIR Hum Factors</source><year>2024</year><month>06</month><day>13</day><volume>11</volume><fpage>e50939</fpage><pub-id pub-id-type="doi">10.2196/50939</pub-id><pub-id pub-id-type="medline">38869934</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alruwaili</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Abuadas</surname><given-names>FH</given-names> </name><name name-style="western"><surname>Alsadi</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Exploring nurses&#x2019; awareness and attitudes toward artificial intelligence: Implications for nursing practice</article-title><source>Digit HEALTH</source><year>2024</year><volume>10</volume><fpage>20552076241271803</fpage><pub-id pub-id-type="doi">10.1177/20552076241271803</pub-id><pub-id pub-id-type="medline">39114115</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lei</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Who should I trust: AI or myself? Leveraging human and AI correctness likelihood to promote appropriate trust in AI-assisted decision-making</article-title><year>2023</year><month>04</month><day>19</day><access-date>2024-11-12</access-date><conf-name>CHI &#x2019;23: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>Apr 23-28, 2023</conf-date><conf-loc>Hamburg, Germany</conf-loc><pub-id pub-id-type="doi">10.1145/3544548.3581058</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vereschak</surname><given-names>O</given-names> </name><name name-style="western"><surname>Bailly</surname><given-names>G</given-names> </name><name name-style="western"><surname>Caramiaux</surname><given-names>B</given-names> </name></person-group><article-title>How to evaluate trust in AI-assisted decision making? A survey of empirical methodologies</article-title><source>Proc ACM Hum-Comput Interact</source><year>2021</year><month>10</month><day>13</day><volume>5</volume><issue>CSCW2</issue><fpage>1</fpage><lpage>39</lpage><pub-id pub-id-type="doi">10.1145/3476068</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Vereschak</surname><given-names>O</given-names> </name><name name-style="western"><surname>Alizadeh</surname><given-names>F</given-names> </name><name name-style="western"><surname>Bailly</surname><given-names>G</given-names> </name><name name-style="western"><surname>Caramiaux</surname><given-names>B</given-names> </name></person-group><article-title>Trust in AI-assisted decision making: perspectives from those behind the system and those for whom the decision is made</article-title><year>2024</year><month>05</month><day>11</day><conf-name>CHI &#x2019;24: Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>May 11-16, 2024</conf-date><conf-loc>Honolulu, HI</conf-loc><pub-id pub-id-type="doi">10.1145/3613904.3642018</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chiang</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Yin</surname><given-names>M</given-names> </name></person-group><article-title>Enhancing AI-assisted group decision making through LLM-powered devil&#x2019;s advocate</article-title><year>2024</year><month>03</month><day>18</day><conf-name>IUI &#x2019;24: Proceedings of the 29th International Conference on Intelligent User Interfaces</conf-name><conf-date>Mar 18-21, 2024</conf-date><conf-loc>Greenville, SC</conf-loc><pub-id pub-id-type="doi">10.1145/3640543.3645199</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name><name name-style="western"><surname>Ormond</surname><given-names>KE</given-names> </name><name name-style="western"><surname>Frey</surname><given-names>D</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><name name-style="western"><surname>Blasimme</surname><given-names>A</given-names> </name></person-group><article-title>Expectations and attitudes towards medical artificial intelligence: a qualitative study in the field of stroke</article-title><source>PLoS One</source><year>2023</year><volume>18</volume><issue>1</issue><fpage>e0279088</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0279088</pub-id><pub-id pub-id-type="medline">36630325</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="book"><source>Cochrane-Campbell Handbook for Qualitative Evidence Synthesis</source><publisher-name>The Cochrane Collaboration</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://training.cochrane.org/cochrane-campbell-handbook-qualitative-evidence-synthesis">https://training.cochrane.org/cochrane-campbell-handbook-qualitative-evidence-synthesis</ext-link></comment></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Higgins</surname><given-names>JPT</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chandler</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Chapter 3: defining the criteria for including studies and how they will be grouped for the synthesis</article-title><source>Cochrane Handbook for Systematic Reviews of Interventions</source><publisher-name>The Cochrane Collaboration</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://training.cochrane.org/handbook/current/chapter-03">https://training.cochrane.org/handbook/current/chapter-03</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><article-title>PRISMA</article-title><source>PRISMA 2020 checklist</source><access-date>2024-11-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.prisma-statement.org/prisma-2020-checklist">https://www.prisma-statement.org/prisma-2020-checklist</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><article-title>Critical Appraisal Skills Programme &#x201C;Qualitative Studies Checklist - CASP</article-title><source>Critical Appraisal Skills Programme</source><access-date>2024-11-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://casp-uk.net/casp-tools-checklists/qualitative-studies-checklist">https://casp-uk.net/casp-tools-checklists/qualitative-studies-checklist</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hong</surname><given-names>QN</given-names> </name><name name-style="western"><surname>F&#x00E0;bregues</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bartlett</surname><given-names>G</given-names> </name><etal/></person-group><article-title>The Mixed Methods Appraisal Tool (MMAT) version 2018 for information professionals and researchers</article-title><source>Education for Information</source><volume>34</volume><issue>4</issue><fpage>285</fpage><lpage>291</lpage><pub-id pub-id-type="doi">10.3233/EFI-180221</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>Data extraction for intervention systematic reviews</article-title><source>Covidence</source><access-date>2024-11-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.covidence.org/resource/data-extraction-for-intervention-systematic-reviews">https://www.covidence.org/resource/data-extraction-for-intervention-systematic-reviews</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Jacobs</surname><given-names>M</given-names> </name><name name-style="western"><surname>He</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pradier</surname><given-names>FM</given-names> </name><etal/></person-group><article-title>Designing ai for trust and collaboration in time-constrained medical decisions: a sociotechnical lens</article-title><year>2021</year><month>05</month><day>6</day><conf-name>CHI &#x2019;21: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>May 8-13, 2021</conf-date><conf-loc>Yokohama, Japan</conf-loc><pub-id pub-id-type="doi">10.1145/3411764.3445385</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>&#x201C;Brilliant AI doctor&#x201D; in rural clinics: challenges in AI-powered clinical decision support system deployment</article-title><year>2021</year><month>05</month><day>6</day><conf-name>CHI &#x2019;21: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>May 8-13, 2021</conf-date><conf-loc>Yokohama, Japan</conf-loc><fpage>1</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1145/3411764.3445432</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Micocci</surname><given-names>M</given-names> </name><name name-style="western"><surname>Borsci</surname><given-names>S</given-names> </name><name name-style="western"><surname>Thakerar</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Attitudes towards trusting artificial intelligence insights and factors to prevent the passive adherence of GPs: a pilot study</article-title><source>J Clin Med</source><year>2021</year><month>07</month><day>14</day><volume>10</volume><issue>14</issue><fpage>3101</fpage><pub-id pub-id-type="doi">10.3390/jcm10143101</pub-id><pub-id pub-id-type="medline">34300267</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name><name name-style="western"><surname>Medow</surname><given-names>JE</given-names> </name></person-group><article-title>Clinicians&#x2019; perceptions of an artificial intelligence-based blood utilization calculator: qualitative exploratory study</article-title><source>JMIR Hum Factors</source><year>2022</year><month>10</month><day>31</day><volume>9</volume><issue>4</issue><fpage>e38411</fpage><pub-id pub-id-type="doi">10.2196/38411</pub-id><pub-id pub-id-type="medline">36315238</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gunasekeran</surname><given-names>DV</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>F</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>GYS</given-names> </name><etal/></person-group><article-title>Acceptance and perception of artificial intelligence usability in eye care (APPRAISE) for ophthalmologists: a multinational perspective</article-title><source>Front Med (Lausanne)</source><year>2022</year><volume>9</volume><fpage>875242</fpage><pub-id pub-id-type="doi">10.3389/fmed.2022.875242</pub-id><pub-id pub-id-type="medline">36314006</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name></person-group><article-title>Factors influencing clinicians&#x2019; willingness to use an AI-based clinical decision support system</article-title><source>Front Digit Health</source><year>2022</year><volume>4</volume><fpage>920662</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2022.920662</pub-id><pub-id pub-id-type="medline">36339516</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ankolekar</surname><given-names>A</given-names> </name><name name-style="western"><surname>van der Heijden</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dekker</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Clinician perspectives on clinical decision support systems in lung cancer: implications for shared decision-making</article-title><source>Health Expect</source><year>2022</year><month>08</month><volume>25</volume><issue>4</issue><fpage>1342</fpage><lpage>1351</lpage><pub-id pub-id-type="doi">10.1111/hex.13457</pub-id><pub-id pub-id-type="medline">35535474</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Van Biesen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Van Cauwenberge</surname><given-names>D</given-names> </name><name name-style="western"><surname>Decruyenaere</surname><given-names>J</given-names> </name><name name-style="western"><surname>Leune</surname><given-names>T</given-names> </name><name name-style="western"><surname>Sterckx</surname><given-names>S</given-names> </name></person-group><article-title>An exploration of expectations and perceptions of practicing physicians on the implementation of computerized clinical decision support systems using a Qsort approach</article-title><source>BMC Med Inform Decis Mak</source><year>2022</year><month>07</month><day>16</day><volume>22</volume><issue>1</issue><fpage>185</fpage><pub-id pub-id-type="doi">10.1186/s12911-022-01933-3</pub-id><pub-id pub-id-type="medline">35842722</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Sivaraman</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bukowski</surname><given-names>LA</given-names> </name><name name-style="western"><surname>Levin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kahn</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Perer</surname><given-names>A</given-names> </name></person-group><article-title>Ignore, trust, or negotiate: understanding clinician acceptance of AI-based treatment recommendations in health care</article-title><conf-name>CHI &#x2019;23: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>Apr 23-28, 2023</conf-date><conf-loc>Hamburg, Germany</conf-loc><pub-id pub-id-type="doi">10.1145/3544548.3581075</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Bach</surname><given-names>AKP</given-names> </name><name name-style="western"><surname>N&#x00F8;rgaard</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Brok</surname><given-names>JC</given-names> </name><name name-style="western"><surname>van Berkel</surname><given-names>N</given-names> </name></person-group><article-title>&#x201C;If i had all the time in the world&#x201D;: ophthalmologists&#x2019; perceptions of anchoring bias mitigation in clinical AI support</article-title><year>2023</year><month>04</month><day>19</day><conf-name>CHI &#x2019;23: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>Apr 23-28, 2023</conf-date><conf-loc>Hamburg, Germany</conf-loc><pub-id pub-id-type="doi">10.1145/3544548.3581513</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Burgess</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Jankovic</surname><given-names>I</given-names> </name><name name-style="western"><surname>Austin</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Healthcare AI treatment decision support: design principles to enhance clinician adoption and trust</article-title><year>2023</year><month>04</month><day>19</day><conf-name>CHI &#x2019;23: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>Apr 23-28, 2023</conf-date><conf-loc>Hamburg, Germany</conf-loc><pub-id pub-id-type="doi">10.1145/3544548.3581251</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Patterson</surname><given-names>BL</given-names> </name><etal/></person-group><article-title>Using AI-generated suggestions from ChatGPT to optimize clinical decision support</article-title><source>J Am Med Inform Assoc</source><year>2023</year><month>06</month><day>20</day><volume>30</volume><issue>7</issue><fpage>1237</fpage><lpage>1245</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocad072</pub-id><pub-id pub-id-type="medline">37087108</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Anjara</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Janik</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dunford-Stenger</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Examining explainable clinical decision support systems with think aloud protocols</article-title><source>PLoS One</source><year>2023</year><volume>18</volume><issue>9</issue><fpage>e0291443</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0291443</pub-id><pub-id pub-id-type="medline">37708135</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Barreto</surname><given-names>EF</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Discrepancy between perceptions and acceptance of clinical decision support systems: implementation of artificial intelligence for vancomycin dosing</article-title><source>BMC Med Inform Decis Mak</source><year>2023</year><month>08</month><day>11</day><volume>23</volume><issue>1</issue><fpage>157</fpage><pub-id pub-id-type="doi">10.1186/s12911-023-02254-9</pub-id><pub-id pub-id-type="medline">37568134</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liaw</surname><given-names>WR</given-names> </name><name name-style="western"><surname>Ramos Silva</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Soltero</surname><given-names>EG</given-names> </name><name name-style="western"><surname>Krist</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stotts</surname><given-names>AL</given-names> </name></person-group><article-title>An assessment of how clinicians and staff members use a diabetes artificial intelligence prediction tool: mixed methods study</article-title><source>JMIR AI</source><year>2023</year><month>05</month><day>29</day><volume>2</volume><fpage>e45032</fpage><pub-id pub-id-type="doi">10.2196/45032</pub-id><pub-id pub-id-type="medline">38875578</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nair</surname><given-names>M</given-names> </name><name name-style="western"><surname>Andersson</surname><given-names>J</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Lundgren</surname><given-names>LE</given-names> </name></person-group><article-title>Barriers and enablers for implementation of an artificial intelligence-based decision support tool to reduce the risk of readmission of patients with heart failure: stakeholder interviews</article-title><source>JMIR Form Res</source><year>2023</year><month>08</month><day>23</day><volume>7</volume><fpage>e47335</fpage><pub-id pub-id-type="doi">10.2196/47335</pub-id><pub-id pub-id-type="medline">37610799</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name><name name-style="western"><surname>Medow</surname><given-names>JE</given-names> </name></person-group><article-title>Effect of risk, expectancy, and trust on clinicians&#x2019; intent to use an artificial intelligence system -- blood utilization calculator</article-title><source>Appl Ergon</source><year>2022</year><month>05</month><volume>101</volume><fpage>103708</fpage><pub-id pub-id-type="doi">10.1016/j.apergo.2022.103708</pub-id><pub-id pub-id-type="medline">35149301</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>York</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Raj</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ashdown</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jones</surname><given-names>G</given-names> </name></person-group><article-title>Clinician and computer: a study on doctors&#x2019; perceptions of artificial intelligence in skeletal radiography</article-title><source>BMC Med Educ</source><year>2023</year><month>01</month><day>10</day><volume>23</volume><issue>1</issue><fpage>16</fpage><pub-id pub-id-type="doi">10.1186/s12909-022-03976-6</pub-id><pub-id pub-id-type="medline">36627640</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elareed</surname><given-names>HR</given-names> </name><name name-style="western"><surname>Salama</surname><given-names>RAA</given-names> </name><name name-style="western"><surname>Ismaeel</surname><given-names>AY</given-names> </name><name name-style="western"><surname>Lotfy</surname><given-names>AMM</given-names> </name></person-group><article-title>Perception and opinion of physicians regarding artificial intelligence in Egypt</article-title><source>Egypt J Intern Med</source><year>2024</year><month>10</month><day>1</day><volume>97</volume><issue>1</issue><fpage>3423</fpage><lpage>3428</lpage><pub-id pub-id-type="doi">10.21608/ejhm.2024.384066</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Baxter</surname><given-names>SL</given-names> </name><name name-style="western"><surname>van den Brandt</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Usability and clinician acceptance of a deep learning-based clinical decision support tool for predicting glaucomatous visual field progression</article-title><source>J Glaucoma</source><year>2023</year><month>03</month><day>1</day><volume>32</volume><issue>3</issue><fpage>151</fpage><lpage>158</lpage><pub-id pub-id-type="doi">10.1097/IJG.0000000000002163</pub-id><pub-id pub-id-type="medline">36877820</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Haocan</surname><given-names>S</given-names> </name></person-group><article-title>Human-AI Trust Scale</article-title><source>Center for Open Science</source><year>2024</year><month>11</month><day>5</day><access-date>2025-07-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://osf.io/mk8d9">https://osf.io/mk8d9</ext-link></comment></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>B</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Luan</surname><given-names>S</given-names> </name></person-group><article-title>Developing trustworthy artificial intelligence: insights from research on interpersonal, human-automation, and human-AI trust</article-title><source>Front Psychol</source><year>2024</year><volume>15</volume><fpage>1382693</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2024.1382693</pub-id><pub-id pub-id-type="medline">38694439</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nasarian</surname><given-names>E</given-names> </name><name name-style="western"><surname>Alizadehsani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Acharya</surname><given-names>UR</given-names> </name><name name-style="western"><surname>Tsui</surname><given-names>KL</given-names> </name></person-group><article-title>Designing interpretable ML system to enhance trust in healthcare: a systematic review to proposed responsible clinician-AI-collaboration framework</article-title><source>Information Fusion</source><year>2024</year><month>08</month><volume>108</volume><fpage>102412</fpage><pub-id pub-id-type="doi">10.1016/j.inffus.2024.102412</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>M</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>L</given-names> </name><etal/></person-group><article-title>The need for digital health education among next-generation health workers in China: a cross-sectional survey on digital health education</article-title><source>BMC Med Educ</source><year>2023</year><month>07</month><day>31</day><volume>23</volume><issue>1</issue><fpage>541</fpage><pub-id pub-id-type="doi">10.1186/s12909-023-04407-w</pub-id><pub-id pub-id-type="medline">37525126</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Harari</surname><given-names>RE</given-names> </name><name name-style="western"><surname>Ahmadi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pourfalatoun</surname><given-names>S</given-names> </name><name name-style="western"><surname>Al-Taweel</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shokoohi</surname><given-names>H</given-names> </name></person-group><article-title>Clinician-AI collaboration for decision support in telemedicine: a randomized controlled trial study</article-title><conf-name>Proceedings of Conference on Cognitive and Computational Aspects of Situation Management 2023</conf-name><conf-date>Oct 16-19, 2023</conf-date><conf-loc>Philadelphia, PA</conf-loc><fpage>81</fpage><lpage>89</lpage><pub-id pub-id-type="doi">10.29007/9qxd</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Newton</surname><given-names>N</given-names> </name><name name-style="western"><surname>Bamgboje-Ayodele</surname><given-names>A</given-names> </name><name name-style="western"><surname>Forsyth</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tariq</surname><given-names>A</given-names> </name><name name-style="western"><surname>Baysari</surname><given-names>MT</given-names> </name></person-group><article-title>How are clinicians&#x2019; acceptance and use of clinical decision support systems evaluated over time? A systematic review</article-title><source>Stud Health Technol Inform</source><year>2024</year><month>01</month><day>25</day><volume>310</volume><fpage>259</fpage><lpage>263</lpage><pub-id pub-id-type="doi">10.3233/SHTI230967</pub-id><pub-id pub-id-type="medline">38269805</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benrimoh</surname><given-names>D</given-names> </name><name name-style="western"><surname>Tanguay-Sela</surname><given-names>M</given-names> </name><name name-style="western"><surname>Perlman</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Using a simulation centre to evaluate preliminary acceptability and impact of an artificial intelligence-powered clinical decision support system for depression treatment on the physician-patient interaction</article-title><source>BJPsych Open</source><year>2021</year><month>01</month><day>6</day><volume>7</volume><issue>1</issue><fpage>e22</fpage><pub-id pub-id-type="doi">10.1192/bjo.2020.127</pub-id><pub-id pub-id-type="medline">33403948</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elhaddad</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hamam</surname><given-names>S</given-names> </name></person-group><article-title>AI-driven clinical decision support systems: an ongoing pursuit of potential</article-title><source>Cureus</source><year>2024</year><month>04</month><volume>16</volume><issue>4</issue><fpage>e57728</fpage><pub-id pub-id-type="doi">10.7759/cureus.57728</pub-id><pub-id pub-id-type="medline">38711724</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gonzalez</surname><given-names>XT</given-names> </name><name name-style="western"><surname>Steger-May</surname><given-names>K</given-names> </name><name name-style="western"><surname>Abraham</surname><given-names>J</given-names> </name></person-group><article-title>Just another tool in their repertoire: uncovering insights into public and patient perspectives on clinicians&#x2019; use of machine learning in perioperative care</article-title><source>J Am Med Inform Assoc</source><year>2025</year><month>01</month><day>1</day><volume>32</volume><issue>1</issue><fpage>150</fpage><lpage>162</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocae257</pub-id><pub-id pub-id-type="medline">39401245</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>W</given-names> </name><name name-style="western"><surname>Chima</surname><given-names>S</given-names> </name><name name-style="western"><surname>Emery</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Perceptions of primary care patients on the use of electronic clinical decision support tools to facilitate health care: a systematic review</article-title><source>Patient Educ Couns</source><year>2024</year><month>08</month><volume>125</volume><fpage>108290</fpage><pub-id pub-id-type="doi">10.1016/j.pec.2024.108290</pub-id><pub-id pub-id-type="medline">38714007</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Knop</surname><given-names>M</given-names> </name><name name-style="western"><surname>Weber</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mueller</surname><given-names>M</given-names> </name><name name-style="western"><surname>Niehaves</surname><given-names>B</given-names> </name></person-group><article-title>Human factors and technological characteristics influencing the interaction of medical professionals with artificial intelligence-enabled clinical decision support systems: literature review</article-title><source>JMIR Hum Factors</source><year>2022</year><month>03</month><day>24</day><volume>9</volume><issue>1</issue><fpage>e28639</fpage><pub-id pub-id-type="doi">10.2196/28639</pub-id><pub-id pub-id-type="medline">35323118</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Labkoff</surname><given-names>S</given-names> </name><name name-style="western"><surname>Oladimeji</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kannry</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Toward a responsible future: recommendations for AI-enabled clinical decision support</article-title><source>J Am Med Inform Assoc</source><year>2024</year><month>11</month><day>1</day><volume>31</volume><issue>11</issue><fpage>2730</fpage><lpage>2739</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocae209</pub-id><pub-id pub-id-type="medline">39325508</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Payne</surname><given-names>VL</given-names> </name><name name-style="western"><surname>Sattar</surname><given-names>U</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Clinician perspectives on how situational context and augmented intelligence design features impact perceived usefulness of sepsis prediction scores embedded within a simulated electronic health record</article-title><source>J Am Med Inform Assoc</source><year>2024</year><month>05</month><day>20</day><volume>31</volume><issue>6</issue><fpage>1331</fpage><lpage>1340</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocae089</pub-id><pub-id pub-id-type="medline">38661564</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perivolaris</surname><given-names>A</given-names> </name><name name-style="western"><surname>Adams-McGavin</surname><given-names>C</given-names> </name><name name-style="western"><surname>Madan</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Quality of interaction between clinicians and artificial intelligence systems. A systematic review</article-title><source>Future Healthc J</source><year>2024</year><month>09</month><volume>11</volume><issue>3</issue><fpage>100172</fpage><pub-id pub-id-type="doi">10.1016/j.fhj.2024.100172</pub-id><pub-id pub-id-type="medline">39281326</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinsky</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Bedoya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bihorac</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Use of artificial intelligence in critical care: opportunities and obstacles</article-title><source>Crit Care</source><year>2024</year><month>04</month><day>8</day><volume>28</volume><issue>1</issue><fpage>113</fpage><pub-id pub-id-type="doi">10.1186/s13054-024-04860-z</pub-id><pub-id pub-id-type="medline">38589940</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rambach</surname><given-names>T</given-names> </name><name name-style="western"><surname>Gleim</surname><given-names>P</given-names> </name><name name-style="western"><surname>Mandelartz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Heizmann</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kunze</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kellmeyer</surname><given-names>P</given-names> </name></person-group><article-title>Challenges and facilitation approaches for the participatory design of AI-based clinical decision support systems: protocol for a scoping review</article-title><source>JMIR Res Protoc</source><year>2024</year><month>09</month><day>5</day><volume>13</volume><fpage>e58185</fpage><pub-id pub-id-type="doi">10.2196/58185</pub-id><pub-id pub-id-type="medline">39235846</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zicari</surname><given-names>RV</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>S</given-names> </name><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Co-design of a trustworthy AI system in healthcare: deep learning based skin lesion classifier</article-title><source>Front Hum Dyn</source><year>2021</year><volume>3</volume><fpage>688152</fpage><pub-id pub-id-type="doi">10.3389/fhumd.2021.688152</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McKee</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wouters</surname><given-names>OJ</given-names> </name></person-group><article-title>The challenges of regulating artificial intelligence in healthcare comment on &#x201C;Clinical decision support and new regulatory frameworks for medical devices: are we ready for it? - A viewpoint paper&#x201D;</article-title><source>Int J Health Policy Manag</source><year>2023</year><volume>12</volume><fpage>7261</fpage><pub-id pub-id-type="doi">10.34172/ijhpm.2022.7261</pub-id><pub-id pub-id-type="medline">36243948</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Gillespie</surname><given-names>N</given-names> </name><name name-style="western"><surname>Lockey</surname><given-names>S</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>C</given-names> </name><name name-style="western"><surname>Pool</surname><given-names>J</given-names> </name><name name-style="western"><surname>Akbari</surname><given-names>A</given-names> </name></person-group><article-title>Trust in artificial intelligence: a global study</article-title><year>2023</year><access-date>2025-07-17</access-date><publisher-name>The University of Queensland and KPMG Australia</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://assets.kpmg.com/content/dam/kpmg/au/pdf/2023/trust-in-ai-global-insights-2023.pdf">https://assets.kpmg.com/content/dam/kpmg/au/pdf/2023/trust-in-ai-global-insights-2023.pdf</ext-link></comment><pub-id pub-id-type="doi">10.14264/00d3c94</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rojas</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Teran</surname><given-names>M</given-names> </name><name name-style="western"><surname>Umscheid</surname><given-names>CA</given-names> </name></person-group><article-title>Clinician trust in artificial intelligence: what is known and how trust can be facilitated</article-title><source>Crit Care Clin</source><year>2023</year><month>10</month><volume>39</volume><issue>4</issue><fpage>769</fpage><lpage>782</lpage><pub-id pub-id-type="doi">10.1016/j.ccc.2023.02.004</pub-id><pub-id pub-id-type="medline">37704339</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>H</given-names> </name><name name-style="western"><surname>Downer</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ives</surname><given-names>J</given-names> </name></person-group><article-title>Clinicians and AI use: where is the professional guidance?</article-title><source>J Med Ethics</source><year>2024</year><month>06</month><day>21</day><volume>50</volume><issue>7</issue><fpage>437</fpage><lpage>441</lpage><pub-id pub-id-type="doi">10.1136/jme-2022-108831</pub-id><pub-id pub-id-type="medline">37607805</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Subasi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ozaltin</surname><given-names>O</given-names> </name><name name-style="western"><surname>Mitra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Subasi</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Sarirete</surname><given-names>A</given-names> </name></person-group><article-title>Trustworthy artificial intelligence in healthcare</article-title><source>Accelerating Strategic Changes for Digital Transformation in the Healthcare Industry</source><year>2023</year><publisher-name>Elsevier</publisher-name><fpage>145</fpage><lpage>177</lpage><pub-id pub-id-type="doi">10.1016/B978-0-443-15299-3.00015-4</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="web"><article-title>Ethics guidelines for trustworthy AI: shaping europe&#x2019;s digital future</article-title><source>European Commission</source><year>2019</year><month>04</month><day>8</day><access-date>2025-07-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai">https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</ext-link></comment></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="web"><article-title>European parliament "EU AI Act": first regulation on artificial intelligence</article-title><source>European Commission</source><year>2023</year><month>08</month><day>6</day><access-date>2025-07-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence">https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="web"><source>Organisation for Economic Co-operation and Development (OECD)</source><year>2019</year><access-date>2025-07-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://oecd.ai/en">https://oecd.ai/en</ext-link></comment></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schuh</surname><given-names>C</given-names> </name><name name-style="western"><surname>de Bruin</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Seeling</surname><given-names>W</given-names> </name></person-group><article-title>Clinical decision support systems at the Vienna General Hospital using Arden syntax: design, implementation, and integration</article-title><source>Artif Intell Med</source><year>2018</year><month>11</month><volume>92</volume><fpage>24</fpage><lpage>33</lpage><pub-id pub-id-type="doi">10.1016/j.artmed.2015.11.002</pub-id><pub-id pub-id-type="medline">26706047</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alzahrani</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Al-Meer</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Alsaygh</surname><given-names>SMY</given-names> </name><etal/></person-group><article-title>Healthcare professionals&#x2019; perceptions of the use of artificial intelligence applications in decision making in Saudi healthcare settings</article-title><source>Review of Contemporary Philosophy</source><year>2023</year><volume>22</volume><issue>1</issue><fpage>85</fpage><lpage>98</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://reviewofconphil.com/index.php/journal/article/view/168/124">https://reviewofconphil.com/index.php/journal/article/view/168/124</ext-link></comment></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elnaggar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Alharbi</surname><given-names>ZA</given-names> </name><name name-style="western"><surname>Alanazi</surname><given-names>AM</given-names> </name><etal/></person-group><article-title>Assessment of the perception and worries of Saudi healthcare providers about the application of artificial intelligence in Saudi health facilities</article-title><source>Cureus</source><year>2023</year><month>08</month><volume>15</volume><issue>8</issue><fpage>e42858</fpage><pub-id pub-id-type="doi">10.7759/cureus.42858</pub-id><pub-id pub-id-type="medline">37664374</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Krop</surname><given-names>P</given-names> </name><name name-style="western"><surname>Koch</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Carolus</surname><given-names>A</given-names> </name><name name-style="western"><surname>Latoschik</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Wienrich</surname><given-names>C</given-names> </name></person-group><article-title>The effects of expertise, humanness, and congruence on perceived trust, warmth, competence and intention to use embodied AI</article-title><year>2024</year><month>05</month><day>11</day><conf-name>CHI EA &#x2019;24: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>May 11-16, 2024</conf-date><conf-loc>Honolulu, HI</conf-loc><fpage>1</fpage><lpage>9</lpage><pub-id pub-id-type="doi">10.1145/3613905.3650749</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Higgins</surname><given-names>O</given-names> </name><name name-style="western"><surname>Chalup</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Wilson</surname><given-names>RL</given-names> </name></person-group><article-title>Artificial intelligence in nursing: trustworthy or reliable?</article-title><source>J Res Nurs</source><year>2024</year><month>03</month><volume>29</volume><issue>2</issue><fpage>143</fpage><lpage>153</lpage><pub-id pub-id-type="doi">10.1177/17449871231215696</pub-id><pub-id pub-id-type="medline">39070561</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Helen</surname><given-names>D</given-names> </name><name name-style="western"><surname>Suresh</surname><given-names>NV</given-names> </name></person-group><article-title>Generative AI in healthcare: opportunities, challenges, and future perspectives</article-title><source>Revolutionizing the Healthcare Sector with AI</source><year>2024</year><publisher-name>IGI Global</publisher-name><fpage>79</fpage><lpage>90</lpage><pub-id pub-id-type="doi">10.4018/979-8-3693-3731-8.ch004</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aquino</surname><given-names>YSJ</given-names> </name><name name-style="western"><surname>Rogers</surname><given-names>WA</given-names> </name><name name-style="western"><surname>Braunack-Mayer</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Utopia versus dystopia: professional perspectives on the impact of healthcare artificial intelligence on clinical roles and skills</article-title><source>Int J Med Inform</source><year>2023</year><month>01</month><volume>169</volume><fpage>104903</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104903</pub-id><pub-id pub-id-type="medline">36343512</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tucci</surname><given-names>V</given-names> </name><name name-style="western"><surname>Saary</surname><given-names>J</given-names> </name><name name-style="western"><surname>Doyle</surname><given-names>TE</given-names> </name></person-group><article-title>Factors influencing trust in medical artificial intelligence for healthcare professionals: a narrative review</article-title><source>J Med Artif Intell</source><year>2022</year><month>03</month><day>30</day><volume>5</volume><fpage>4</fpage><lpage>4</lpage><pub-id pub-id-type="doi">10.21037/jmai-21-25</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ball</surname><given-names>R</given-names> </name><name name-style="western"><surname>Talal</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Dang</surname><given-names>O</given-names> </name><name name-style="western"><surname>Mu&#x00F1;oz</surname><given-names>M</given-names> </name><name name-style="western"><surname>Markatou</surname><given-names>M</given-names> </name></person-group><article-title>Trust but verify: lessons learned for the application of AI to case-based clinical decision-making from postmarketing drug safety assessment at the US Food and Drug Administration</article-title><source>J Med Internet Res</source><year>2024</year><month>06</month><day>6</day><volume>26</volume><fpage>e50274</fpage><pub-id pub-id-type="doi">10.2196/50274</pub-id><pub-id pub-id-type="medline">38842929</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search strategies.</p><media xlink:href="jmir_v27i1e69678_app1.docx" xlink:title="DOCX File, 13 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Additional analysis.</p><media xlink:href="jmir_v27i1e69678_app2.docx" xlink:title="DOCX File, 203 KB"/></supplementary-material><supplementary-material id="app3"><label>Checklist 1</label><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) checklist.</p><media xlink:href="jmir_v27i1e69678_app3.pdf" xlink:title="PDF File, 82 KB"/></supplementary-material></app-group></back></article>