<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e73374</article-id>
      <article-id pub-id-type="pmid">40773743</article-id>
      <article-id pub-id-type="doi">10.2196/73374</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Improving Explainability and Integrability of Medical AI to Promote Health Care Professional Acceptance and Use: Mixed Systematic Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Sarvestan</surname>
            <given-names> Javad</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Scharlau</surname>
            <given-names>Ingrid</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zheng</surname>
            <given-names>Lu</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Yun</surname>
            <given-names>Hye Sun</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Khosravi</surname>
            <given-names>Mohsen</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Oloruntoba</surname>
            <given-names>Oluwafemi</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Udensi</surname>
            <given-names>Chukwuma</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Kath</surname>
            <given-names>Suraj</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Liu</surname>
            <given-names>Yushu</given-names>
          </name>
          <degrees>MPA</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-7794-2872</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Liu</surname>
            <given-names>Chenxi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>School of Medicine and Health Management, Huazhong University of Science and Technology</institution>
            <addr-line>Hangkong Road No. 13</addr-line>
            <addr-line>Qiaokou District</addr-line>
            <addr-line>Wuhan, 430030</addr-line>
            <country>China</country>
            <phone>86 13476237004</phone>
            <email>liu_chenxi@hust.edu.cn</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0567-8032</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Zheng</surname>
            <given-names>Jianing</given-names>
          </name>
          <degrees>MPA</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0002-4777-2211</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Xu</surname>
            <given-names>Chang</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3323-9918</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Wang</surname>
            <given-names>Dan</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <xref rid="aff6" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7384-7542</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>School of Medicine and Health Management, Huazhong University of Science and Technology</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Major Disciplinary Platform under Double First-Class Initiative for Liberal Arts at Huazhong University of Science and Technology (Research Center for High-Quality Development of Hospitals)</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Smart Hospital Research Institute, Peking University Shenzhen Hospita</institution>
        <addr-line>Shenzhen</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>School of Management, Hubei University of Chinese Medicine</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Hubei Shizhen Laboratory, Hubei University of Chinese Medicine</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Research Center for Traditional Chinese Medicine Development, Hubei University of Chinese Medicine</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Chenxi Liu <email>liu_chenxi@hust.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>7</day>
        <month>8</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e73374</elocation-id>
      <history>
        <date date-type="received">
          <day>5</day>
          <month>3</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>8</day>
          <month>4</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>12</day>
          <month>5</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>2</day>
          <month>6</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Yushu Liu, Chenxi Liu, Jianing Zheng, Chang Xu, Dan Wang. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 07.08.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e73374" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The integration of artificial intelligence (AI) in health care has significant potential, yet its acceptance by health care professionals (HCPs) is essential for successful implementation. Understanding HCPs’ perspectives on the explainability and integrability of medical AI is crucial, as these factors influence their willingness to adopt and effectively use such technologies.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to improve the acceptance and use of medical AI. From a user perspective, it explores HCPs’ understanding of the explainability and integrability of medical AI.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We performed a mixed systematic review by conducting a comprehensive search in the PubMed, Web of Science, Scopus, IEEE Xplore, and ACM Digital Library and arXiv databases for studies published between 2014 and 2024. Studies concerning an explanation or the integrability of medical AI were included. Study quality was assessed using the Joanna Briggs Institute critical appraisal checklist and Mixed Methods Appraisal Tool, with only medium- or high-quality studies included. Qualitative data were analyzed via thematic analysis, while quantitative findings were synthesized narratively.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Out of 11,888 records initially retrieved, 22 (0.19%) studies met the inclusion criteria. All selected studies were published from 2020 onward, reflecting the recency and relevance of the topic. The majority (18/22, 82%) originated from high-income countries, and most (17/22, 77%) adopted qualitative methodologies, with the remainder (5/22, 23%) using quantitative or mixed method approaches. From the included studies, a conceptual framework was developed that delineates HCPs’ perceptions of explainability and integrability. Regarding explainability, HCPs predominantly emphasized postprocessing explanations, particularly aspects of local explainability such as feature relevance and case-specific outputs. Visual tools that enhance the explainability of AI decisions (eg, heat maps and feature attribution) were frequently mentioned as important enablers of trust and acceptance. For integrability, key concerns included workflow adaptation, system compatibility with electronic health records, and overall ease of use. These aspects were consistently identified as primary conditions for real-world adoption.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>To foster wider adoption of AI in clinical settings, future system designs must center on the needs of HCPs. Enhancing post hoc explainability and ensuring seamless integration into existing workflows are critical to building trust and promoting sustained use. The proposed conceptual framework can serve as a practical guide for developers, researchers, and policy makers in aligning AI solutions with frontline user expectations.</p>
        </sec>
        <sec sec-type="trial registration">
          <title>Trial Registration</title>
          <p>PROSPERO CRD420250652253; https://www.crd.york.ac.uk/PROSPERO/view/CRD420250652253</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>explainability</kwd>
        <kwd>integrability</kwd>
        <kwd>healthcare professionals</kwd>
        <kwd>systematic review</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>The rapid development of artificial intelligence (AI) has demonstrated profound impacts across various industries, particularly in the health care sector. The application of AI has shown significant potential and is widely used in areas such as disease diagnosis, patient monitoring, robotic surgery, and clinical decision-making [<xref ref-type="bibr" rid="ref1">1</xref>]. However, with the increasing prevalence of AI technology, issues concerning doctors’ acceptance, trust, and willingness to use AI have garnered widespread attention. A study revealed that only 10% to 30% of doctors use AI in real-world scenarios [<xref ref-type="bibr" rid="ref2">2</xref>]. The poor acceptance and low use of AI systems by users are influenced by various factors, including the technical characteristics of AI itself, individual factors (eg, users’ AI literacy), organizational factors (eg, advocacy by management), and policy-related issues (eg, responsibility attribution in the use of AI). These challenges hinder the widespread adoption of AI technology [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref7">7</xref>].</p>
        <p>Among the technical characteristics of AI, explainability and integrability are considered 2 key factors influencing doctors’ acceptance and use of AI [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. While other elements, such as security and social influence, also play important roles in clinicians’ trust in AI [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], their practical impact on daily clinical adoption differs. Security concerns are essential for AI implementation, but are most often managed at the technical or regulatory level [<xref ref-type="bibr" rid="ref10">10</xref>]. Social influence, encompassing peer and organizational advocacy, tends to influence adoption at the institutional level [<xref ref-type="bibr" rid="ref12">12</xref>]. In contrast, multiple systematic reviews and clinician surveys have identified explainability and integrability as the most immediate and actionable factors influencing real-world AI adoption in health care [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. Accordingly, focusing on explainability and integrability provides practical, user-centered insights for promoting effective integration of AI into clinical practice. In this study, <italic>explainability</italic> refers to the extent to which an AI system provides human-understandable and faithful representations of its decision-making process [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>], while <italic>integrability</italic> refers to the extent to which AI systems can be embedded into clinical workflows with minimal disruption, ensuring usability, interoperability, and alignment with routine practices [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Importantly, integrability is a broader concept that encompasses interoperability. While interoperability ensures the technical capacity for systems to exchange and interpret data using standardized formats, integrability goes further to consider how AI systems align with clinical roles, decision-making contexts, workflow timing, and user experience [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. A system may be technically interoperable but still lack integrability if it fails to deliver value in practice or imposes additional burdens on clinicians.</p>
        <p>Explainability is crucial in the medical field, where decision-making is highly complex and involves significant risks. Clinicians need to ensure the accuracy and safety of AI outputs before they can trust and rely on AI. A lack of explainability in AI clinical decision support systems (AI-CDSS) may lead to distrust among decision makers and reduce their willingness to use these technologies [<xref ref-type="bibr" rid="ref19">19</xref>]. In addition, integrability ensures that AI-CDSS can seamlessly integrate into existing clinical workflows. When AI-CDSS are effectively embedded into doctors’ daily routines, clinicians can more efficiently access and use patient data, receive recommendations that better meet practical needs, and reduce the time spent on redundant data entry, allowing them to focus on clinical decision-making [<xref ref-type="bibr" rid="ref20">20</xref>]. Conversely, a lack of integrability in AI-CDSS may negatively impact clinician work by increasing operational complexity, workload, and time costs. Such systems, which fail to align with practical requirements, can reduce doctors’ willingness to adopt them [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>].</p>
        <p>However, on the one hand, most existing studies primarily neglect the end users, namely, physicians’ understanding of and need for explainable AI [<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref27">27</xref>]. Studies have mainly concentrated on the perspectives of developers and researchers and have developed methods to present technical descriptions of model processes in AI [<xref ref-type="bibr" rid="ref27">27</xref>], although such approaches seem meaningless if they are not aligned with physicians’ understanding of and requirement for explainability in real-world application in medical settings [<xref ref-type="bibr" rid="ref26">26</xref>]. On the other hand, a few studies have explored how system compatibility, user-friendly design, and workflow adaptability [<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>] may contribute to the seamless integration of AI into the clinical workflow. However, what AI integrability entails and what its underlying components are from the physicians’ perspective remains unclear.</p>
      </sec>
      <sec>
        <title>Objectives</title>
        <p>Therefore, this study aims to systematically review existing literature on explainability and integrability of AI systems in health care from the perspective of health care professionals (HCPs). Specifically, it seeks to identify how these 2 factors—explainability and integrability—influence clinicians’ acceptance and use of AI-based decision support systems. On the basis of the findings, the study proposes a conceptual framework that synthesizes key user-centered concerns, with the goal of informing the design of more acceptable and adoptable clinical AI tools.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Literature Search</title>
        <p>We conducted this systematic review of systematic reviews according to a protocol registered in PROSPERO (CRD420250652253), and the databases we searched included PubMed, Web of Science, Scopus, IEEE Xplore, ACM Digital Library, and arXiv. We did not explicitly search proceedings from major machine learning and AI conferences such as Association for Computational Linguistics, Neural Information Processing Systems, International Conference on Learning Representations, or International Conference on Machine Learning, as many papers from these venues are concurrently available on arXiv. We used keywords such as “Explainable AI,” “explainability,” “XAI,” “AI integrability,” “usability,” and “human-centered AI” (the detailed search strategy is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) and covered publications from January 2014 to July 2024. In addition, references cited in the retrieved articles and reviews were manually screened.</p>
      </sec>
      <sec>
        <title>Inclusion and Exclusion Criteria</title>
        <p>Studies were eligible for inclusion if they addressed either the explainability or the integrability of AI in health care (ie, meeting at least one of the following two sets of topic-specific criteria): (1) explainability, the study must address at least 1 of the following aspects—how AI processes input information; how conclusions are reached; the rationale behind these conclusions; how explainability fosters user trust and their use; or the knowledge, attitudes, and perceptions of HCPs regarding AI explainability—and (2) integrability, the study must address at least 1 of the following aspects—how AI integrates with hospital information systems and clinical workflows; how integrability fosters HCPs’ trust and use; or the knowledge, attitudes, and perceptions of HCPs regarding AI integrability.</p>
        <p>In addition, studies were required to (3) focus on the perspective of AI end users, specifically HCPs such as doctors, nurses, and medical laboratory staff and (4) be original research, including original quantitative, qualitative, or mixed methods studies.</p>
        <p>Editorials, reviews, conference abstracts, narrative studies, and studies focusing on the perspectives of AI developers, engineers, or technical professionals were excluded (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>).</p>
        <boxed-text id="box1" position="float">
          <title>Inclusion and exclusion criteria for the literature.</title>
          <p>
            <bold>Inclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Time: published between 2014 and 2024</p>
            </list-item>
            <list-item>
              <p>Population: artificial intelligence (AI) users, focusing on health care professionals (HCPs)</p>
            </list-item>
            <list-item>
              <p>Field: related to the medical field or broadly relevant to health care</p>
            </list-item>
            <list-item>
              <p>Outcome: the study must address at least 1 of the following aspects: explaining how AI processes input information; how AI reaches conclusions; the rationale behind AI conclusions; how AI integrates with hospital information systems or clinical workflows; AI usability; how explainability or integrability fosters user trust and use, HCPs (eg, doctors and nurses) knowledge, attitudes, or perceptions of AI explainability or integrability</p>
            </list-item>
            <list-item>
              <p>Study design: original qualitative, quantitative, or mixed method research</p>
            </list-item>
            <list-item>
              <p>Other: published in English</p>
            </list-item>
          </list>
          <p>
            <bold>Exclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Time: published at other periods</p>
            </list-item>
            <list-item>
              <p>Population: AI experts, engineers, or other technical professionals</p>
            </list-item>
            <list-item>
              <p>Field: other specific industries</p>
            </list-item>
            <list-item>
              <p>Study design: abstracts, editorials, commentaries, reviews, and narrative studies</p>
            </list-item>
            <list-item>
              <p>Other: full text unavailable</p>
            </list-item>
          </list>
        </boxed-text>
        <p>To maximize comprehensiveness, we initially applied a broad search strategy, but studies were then screened against strict inclusion and exclusion criteria based on the study objectives. YL initially screened titles and abstracts to assess eligibility, with all decisions systematically recorded in a structured Microsoft Excel spreadsheet to ensure transparency. Owing to resource constraints, this stage was conducted by a single reviewer following strictly defined inclusion and exclusion criteria to minimize potential bias. For full-text screening, 2 reviewers (YS and CL) evaluated each study using a standardized Excel form designed to promote consistency in assessment. Any disagreements or uncertainties were discussed with a third author (JZ) until a consensus was reached, further strengthening the reliability of the screening process. Data extraction was conducted by a single reviewer but performed twice to ensure accuracy and completeness. Extracted items included study characteristics, AI system details, target health care settings, participant types, and key findings related to explainability and integrability. Any uncertainties during extraction were resolved by referring back to the full text.</p>
        <p>To capture users’ perceptions of explainability and integrability across both health care and general domains, we initially adopted a broad search strategy without restricting to medical-specific terms. However, as 21 (95%) of the 22 included studies were from the health care domain, the final analysis focused on medical contexts.</p>
      </sec>
      <sec>
        <title>Quality Assessment</title>
        <p>All included studies underwent quality assessment based on their respective study designs. Quantitative and qualitative studies were evaluated using the Joanna Briggs Institute critical appraisal checklists, while mixed methods studies were assessed using the Mixed Methods Appraisal Tool (version 2018). For the JBI checklists, studies scoring ≥80% (ie, meeting at least 8 of 10 items) were considered high quality, 60% to 79% as medium quality, and &#60;60% as low quality. For the Mixed Methods Appraisal Tool, studies that met ≥4 of 5 criteria were rated high quality, while those meeting 3 were considered medium quality. Only studies rated as medium or high quality (22/26, 85%) were included in the final synthesis.</p>
      </sec>
      <sec>
        <title>Data Extraction and Evidence Synthesis</title>
        <p>The data extraction process included the following items for each eligible article—(1) basic information: authors, year, study region, content, and participants; (2) methodology: study design, study population, data collection methods, and data analysis methods; (3) results: HCPs’ understanding and needs regarding AI explainability or integrability.</p>
        <p>This study analyzed quantitative and qualitative results separately and integrated them through narrative synthesis, using a descriptive method to present the findings [<xref ref-type="bibr" rid="ref28">28</xref>]. For qualitative data, we followed the guidelines of Braun and Clarke for thematic analysis. An inductive approach was applied to code data, covering 4 stages: familiarization with the data, initial coding, identification of themes, and review of themes. Thematic analysis was conducted using both deductive and inductive approaches. For the analysis of explainability, we adopted a deductive coding structure based on a preexisting conceptual framework, which includes 3 first-order dimensions: preprocessing, model-level, and postprocessing explainability [<xref ref-type="bibr" rid="ref25">25</xref>]. Specifically, preprocessing explainability refers to enhancing transparency during data preparation and feature engineering before model training. Model explainability focuses on understanding and interpreting the inner mechanisms, parameters, or representations of the model itself during training. Postprocessing explainability involves applying interpretability techniques after model predictions, which can provide both local (instance-level) and global (model-level) explanations of the model’s behavior. These categories guided our initial coding and interpretation. Within each of these dimensions, we generated subthemes inductively from the data to capture participants’ nuanced perspectives. For integrability, no prior framework was applied; all themes were developed inductively based on participants’ responses. Because there is a lack of theory guiding the coding for integrability, themes regarding AI integrability emerged based on the coding of extracted text from retrieved studies. NVivo 10 was used for thematic analysis. YL conducted the initial coding individually, and the coded data were then reviewed by a second person to ensure the validity of the themes. Any discrepancies in coding were discussed in group meetings, where consensus was reached.</p>
        <p>In addition, quantitative data related to the explainability and integrability of medical AI from the user’s perspective were also extracted. Owing to substantial heterogeneity in study outcomes and exposure measures, a meta-analysis was inappropriate, and a narrative synthesis was used to analyze the collated studies, including methods, sample size, participants, outcome variables, and dimensions of explainability and integrability.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Literature Screening Results</title>
        <p>A total of 11,888 articles were retrieved through the search and their references. Among them, 26 (0.22%) articles met the inclusion criteria for this study. After quality evaluation, 4 (15%) low-quality articles were excluded, and a total of 22 (85%) articles were included in the analysis. The study selection process is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e73374_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Characteristics of the Included Studies</title>
        <p>All the included studies were published in 2020 or later. Most of the studies were conducted in developed countries (18/22, 82%), including 9 (41%) from the United States, and only 2 (9%) studies originated from developing countries (China and Brazil). Regarding study type, most studies (17/22, 77%) were qualitative in design, and 5 (23%) studies adopted quantitative or mixed method approaches. The basic characteristics of all the included studies are presented in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Characteristics of the included studies.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="100"/>
            <col width="100"/>
            <col width="100"/>
            <col width="100"/>
            <col width="120"/>
            <col width="480"/>
            <thead>
              <tr valign="top">
                <td>Study</td>
                <td>Region</td>
                <td>Study design</td>
                <td>Content</td>
                <td>Participants</td>
                <td>Main findings</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Graziani et al [<xref ref-type="bibr" rid="ref13">13</xref>], 2023</td>
                <td>Worldwide</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>Health care professionals, industry practitioners, and academic researchers</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Explainability in AI<sup>a</sup> refers to the ability to understand and interpret how models make decisions. It is typically categorized into 3 types: intrinsic explainability, where models are inherently interpretable or designed to be transparent; global explainability, which focuses on understanding the overall behavior of a model through methods such as feature importance and visualization; and local explainability, which aims to explain individual predictions using techniques such as local approximations, counterfactual explanations, or adversarial examples.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Marco-Ruiz et al [<xref ref-type="bibr" rid="ref29">29</xref>], 2024</td>
                <td>Europe and America</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>AI technology developers in hospitals, clinicians using AI, and clinical managers involved in adopting AI, among others</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Interviewees highlighted that varying protocols across health care organizations can affect AI system effectiveness, emphasizing the need for local validation to assess performance and workflow impact. They also stressed the importance of tools such as process mining and visualizations to analyze patient pathways and optimize complex workflows using real-world data.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Liaw et al [<xref ref-type="bibr" rid="ref23">23</xref>], 2023</td>
                <td>Multicountry</td>
                <td>Mixed method</td>
                <td>Integrability</td>
                <td>Clinicians managing diabetes</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Interviewees expressed concerns that AI tools might affect patient outcomes and clinical workflows, leading to overdiagnosis, increased costs, and exacerbating health disparities and alert fatigue. The tool’s utility is limited for doctors familiar with their patients. Inaccurate data can cause false alarms or missed diagnoses, with a lack of evidence supporting its accuracy. A user-centered design is recommended to improve the system.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Panagoulias et al [<xref ref-type="bibr" rid="ref30">30</xref>], 2023</td>
                <td>Greece</td>
                <td>Quantitative</td>
                <td>Explainability</td>
                <td>Medical personnel (including medical students and medical practitioners)</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Clinicians rated diagnostic information, certainty, and related reasoning as very important, particularly when their diagnoses conflicted with AI recommendations</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Wang et al [<xref ref-type="bibr" rid="ref22">22</xref>], 2021</td>
                <td>China</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Clinicians in rural clinics</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Heavy workload: rural doctors handle many patients daily with frequent interruptions, leaving little time for detailed communication or documentation, hindering AI-CDSS<sup>b</sup> use.</p>
                    </list-item>
                    <list-item>
                      <p>Resource limitations: lack of necessary equipment and medications in clinics makes many AI-CDSS recommendations impractical, reducing their utility.</p>
                    </list-item>
                    <list-item>
                      <p>Design mismatch: AI-CDSS are designed for time-consuming, standardized processes that do not fit the fast-paced rural practice, and poor integration with other systems leads to data and recommendation issues.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Zheng et al [<xref ref-type="bibr" rid="ref31">31</xref>], 2024</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Explainability and integrability</td>
                <td>Pediatric asthma clinicians</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>User needs and challenges in using ML<sup>c</sup> systems fell into 3 main areas: how well the system fits into daily workflows (eg, avoiding alert fatigue), the need for clear explanations behind system decisions, and difficulties adapting the tool to real-world settings.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Wolf and Ringland [<xref ref-type="bibr" rid="ref32">32</xref>], 2020</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>Users and developers involved in the design and use of XAI<sup>d</sup> systems</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Nonexpert users preferred simple, intuitive explanations of AI decisions, using clear language, visual tools, and real-life examples. They focused on fairness, transparency, and the impact on daily life, needing straightforward charts and simplified explanations to build trust.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Morais et al [<xref ref-type="bibr" rid="ref33">33</xref>], 2023</td>
                <td>Brazil</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>Oncologists</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Visualization helps: experts found visual elements useful for identifying major and minor influencing features.</p>
                    </list-item>
                    <list-item>
                      <p>They also expressed a need for more detail: participants wanted more traceability to see how results are generated, enhancing confidence in decisions.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Helman et al [<xref ref-type="bibr" rid="ref34">34</xref>], 2023</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Explainability and integrability</td>
                <td>Doctors, nurse practitioners, and physician assistants</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Doctors primarily focused on several key aspects when using AI tools: analytic transparency, graphical explainability, the impact on clinical practice, the value of integrating dynamic patient data trends, decision weighting (how much to trust and balance AI outputs in real decisions), and display location—including usability and how the interface is viewed by patients and families.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Ghanvatkar and Rajan [<xref ref-type="bibr" rid="ref11">11</xref>], 2024</td>
                <td>Singapore</td>
                <td>Quantitative</td>
                <td>Explainability</td>
                <td>Clinicians</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>The integration of XGB<sup>e</sup> with SHAP<sup>f</sup>, as well as the combination of LR<sup>g</sup> with SHAP, showed high usefulness because of their strong conceptual explanations, with the XGB and SHAP combination performing best in prediction but lowest in fidelity. Usefulness scores also improved during neural network training, indicating better alignment between explanation importance and predictive power over time.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Kinney et al [<xref ref-type="bibr" rid="ref35">35</xref>], 2024</td>
                <td>Portugal</td>
                <td>Qualitative</td>
                <td>Explainability and integrability</td>
                <td>Doctors, educators, and students</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Transparency in sources: users need to know where AI obtains its information to trust it, similar to how students cite sources.</p>
                    </list-item>
                    <list-item>
                      <p>Impact on doctor-patient relationships: doctors fear AI could reduce personal interaction, mirroring issues with EHRsh.</p>
                    </list-item>
                    <list-item>
                      <p>Increased burnout: additional AI-driven tasks may increase physician stress and lead to uncritical reliance on AI suggestions.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Burgess et al [<xref ref-type="bibr" rid="ref36">36</xref>], 2023</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Endocrinology clinicians</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>The study proposes these design principles: (1) ensure algorithms are practical for clinical settings to avoid unrealistic insights; (2) allow clinicians to consider patient-specific factors and maintain control over model outputs; (3) avoid adding “research” tasks to patient visits; and (4) focus on aiding complex decisions in the workflow, not repeating known information.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Yoo et al [<xref ref-type="bibr" rid="ref37">37</xref>], 2023</td>
                <td>South Korea</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Medical and nursing staff in emergency departments and intensive care units of tertiary care hospitals</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Anticipated benefits: most participants believe medical AI can reduce decision-making time and handle repetitive tasks, easing workloads and improving efficiency.</p>
                    </list-item>
                    <list-item>
                      <p>Main concerns: worries include workflow disruptions, added tasks, reduced clinical autonomy, overreliance on algorithms, skill decline, alert fatigue, and the inability to integrate information beyond electronic records.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Schoonderwoerd et al [<xref ref-type="bibr" rid="ref38">38</xref>], 2021</td>
                <td>Netherlands</td>
                <td>Quantitative</td>
                <td>Explainability</td>
                <td>Pediatrician clinicians</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Diagnosis explanations are essential: clinicians agree that understanding how CDSS<sup>i</sup> arrives at a diagnosis is important for trust and decision-making.</p>
                    </list-item>
                    <list-item>
                      <p>Need for personalized explanations: while most information elements are seen as valuable, preferences vary, suggesting that explanations should be tailored to individual needs.</p>
                    </list-item>
                    <list-item>
                      <p>Balance detail and overreliance: key explanation elements include evidence used, supporting or contradicting data, certainty level, missing information, alternative diagnoses, and past performance—but too much detail may lead to blind trust in the system.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Hong et al [<xref ref-type="bibr" rid="ref39">39</xref>], 2020</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>Practitioners in various industries, such as health care, software companies, and social media</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Understanding model behavior: during validation, builders need to know why a model produces a specific output for a given case, especially when it performs unexpectedly. Methods such as LIME<sup>j</sup> and SHAP help provide these insights.</p>
                    </list-item>
                    <list-item>
                      <p>Feature importance analysis: builders assess model logic by examining feature importance, focusing not only on key features but also on less important ones to gain a complete understanding of decision-making.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Gu et al [<xref ref-type="bibr" rid="ref16">16</xref>], 2023</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Medical professionals in pathology</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Enhanced accuracy and efficiency: the xPath system improved diagnostic accuracy and efficiency, reducing workload and boosting confidence.</p>
                    </list-item>
                    <list-item>
                      <p>Traceable evidence for transparency: it provides a layered evidence chain (eg, heat maps and confidence scores), making AI diagnoses transparent and verifiable.</p>
                    </list-item>
                    <list-item>
                      <p>User-friendly design: the system aligns with pathologists’ workflow, supporting easy verification and adjustment of AI recommendations, thus enhancing usability and adoption.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Wenderott et al [<xref ref-type="bibr" rid="ref40">40</xref>], 2024</td>
                <td>Germany</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Radiologists</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>The key barriers to AI adoption are (1) workflow delays, (2) extra steps, and (3) inconsistent AI-CAD<sup>k</sup> performance. The key facilitators are (1) good self-organization and (2) software usability.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Verma et al [<xref ref-type="bibr" rid="ref41">41</xref>], 2023</td>
                <td>Switzerland</td>
                <td>Qualitative</td>
                <td>Explainability and integrability</td>
                <td>Clinicians involved in cancer care (large health care organizations)</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Integration challenges: integrating AI into clinical practice is difficult because of issues with data integration, ontologies, and generating actionable insights.</p>
                    </list-item>
                    <list-item>
                      <p>Trust and generalization: clinicians distrust “black-box” models, and AI performance varies across different populations, limiting widespread use.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Tonekaboni et al [<xref ref-type="bibr" rid="ref42">42</xref>], 2019</td>
                <td>Canada</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>Clinicians in intensive care units and emergency departments</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Transparency: doctors need to know the model’s context and limitations, such as missing patient information, to trust it even if accuracy is not perfect.</p>
                    </list-item>
                    <list-item>
                      <p>Feature explanation: clearly explaining the features used in decisions helps build trust and guides appropriate use in different patient groups.</p>
                    </list-item>
                    <list-item>
                      <p>Visualization: well-designed visualizations enhance understanding and support clinical reasoning.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Brennen [<xref ref-type="bibr" rid="ref43">43</xref>], 2020</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Explainability</td>
                <td>End users and policy makers</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Model debugging and understanding: XAI tools should help users understand model behavior (eg, using LIME or SHAP), but they often require advanced knowledge of ML.</p>
                    </list-item>
                    <list-item>
                      <p>Bias detection: tools should identify and explain systemic bias in models and provide context to assess fairness and reliability.</p>
                    </list-item>
                    <list-item>
                      <p>Building trust: clearly presenting the data and logic behind decisions helps users understand and trust AI systems.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Fogliato et al [<xref ref-type="bibr" rid="ref44">44</xref>], 2022</td>
                <td>America</td>
                <td>Quantitative</td>
                <td>Integrability</td>
                <td>Radiologists</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>One-stage workflow boosts AI reliance: participants more closely followed AI suggestions, especially on noncritical points.</p>
                    </list-item>
                    <list-item>
                      <p>AI outperforms but risks overtrust: AI performed better, but improvements were largely because of reliance on AI, even when incorrect.</p>
                    </list-item>
                    <list-item>
                      <p>Workflow impacts experience: 1-stage users felt increased confidence and speed; 2-stage users found it more complex and burdensome.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Salwei et al [<xref ref-type="bibr" rid="ref45">45</xref>], 2021</td>
                <td>America</td>
                <td>Qualitative</td>
                <td>Integrability</td>
                <td>Emergency physicians</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>The study identified 25 components for integrating a human factors–based CDSS<sup>l</sup> into emergency departments, organized into 4 dimensions: time (when the CDSS is used), flow (how it integrates into workflows), patient journey scope (which care stages it covers), and level (integration at individual, team, and organizational levels).</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>AI: artificial intelligence.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>AI-CDSS: artificial intelligence clinical decision support systems.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>ML: machine learning.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>XAI: explainable artificial intelligence.</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>XGB: extreme gradient boosting.</p>
            </fn>
            <fn id="table1fn6">
              <p><sup>f</sup>SHAP: Shapley additive explanations.</p>
            </fn>
            <fn id="table1fn7">
              <p><sup>g</sup>LR: logistic regression.</p>
            </fn>
            <fn id="table1fn8">
              <p><sup>h</sup>EHR: electronic health record.</p>
            </fn>
            <fn id="table1fn9">
              <p><sup>i</sup>CDSS: clinical decision support systems.</p>
            </fn>
            <fn id="table1fn10">
              <p><sup>j</sup>LIME: local interpretable model-agnostic explanations.</p>
            </fn>
            <fn id="table1fn11">
              <p><sup>k</sup>AI-CAD: artificial intelligence–based computer-aided detection.</p>
            </fn>
            <fn id="table1fn12">
              <p><sup>l</sup>CDSS: clinical decision support system.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Qualitative Research</title>
        <sec>
          <title>Dimensions of AI Explainability From the User’s Perspective</title>
          <sec>
            <title>Overview</title>
            <p>A total of 16 articles focusing on the explainability of AI in the medical field were included [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. According to the results of thematic analysis, HCPs are most concerned with postprocessing explainability, with 14 articles highlighting the necessity and importance of explanations provided in the postprocessing stage for HCPs [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. The second most discussed aspect is the doctors’ concern regarding model explainability [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. HCPs showed the least interest in preprocessing explainability [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. The themes and subthemes regarding AI explainability from the user perspective are shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>.</p>
            <fig id="figure2" position="float">
              <label>Figure 2</label>
              <caption>
                <p>Conceptual framework of artificial intelligence (AI) explainability from the perspective of health care professionals.</p>
              </caption>
              <graphic xlink:href="jmir_v27i1e73374_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
            </fig>
          </sec>
          <sec>
            <title>Postprocessing Explainability</title>
            <sec>
              <title>Overview</title>
              <p>Postprocessing explainability refers to the explanations provided after the AI system has made a decision or prediction. This stage focuses on clarifying the model’s output, helping HCPs understand how specific features or data points contributed to a particular decision. HCPs demonstrate the strongest interest in postprocessing explainability of AI, which could be divided into 2 dimensions: local explainability and global explainability. <italic>Local explainability</italic> refers to explanations for individual decisions or specific instances, helping HCPs understand how the model reaches a particular conclusion in a given situation. By contrast, global explainability refers to the HCPs’ understanding of how the model functions and its underlying decision logic. Comparatively, HCPs focus more on local explainability [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p>
            </sec>
            <sec>
              <title>Local Explainability</title>
              <p>On the basis of the current synthesis, local explainability is the most important aspect of explainability for HCPs, as highlighted by 12 studies, including (1) explanation of features and their importance for specific outputs, (2) certainty of output results, and (3) explanation based on similar cases.</p>
              <p>First, the features of AI are the most critical component of local explainability, with 11 studies [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>] highlighting that HCPs are concerned with which features were included and their importance in contributing to a specific AI-generated output. On the one hand, identifying which features were used in decision-making is fundamental for clinicians to build trust. By clearly showing the features used by the model, HCPs can verify their relevance, which enhances confidence in the model’s decisions. On the other hand, HCPs are also concerned with the extent to which a feature would impact an AI decision, and HCPs would construct trust if their perception of the importance of features was consistent with the results showing which features had the greatest impact on AI decisions [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], and visualization methods play a critical role in enabling HCPs to quickly grasp and understand how these features influence the predictions [<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. By providing clear visual cues such as color-coded severity indicators (eg, red, yellow, and green for high-, medium-, and low-risk categories, respectively) alongside numerical data, these visualizations allow HCPs to assess risk levels at a glance [<xref ref-type="bibr" rid="ref31">31</xref>]. In addition, the ease of interpreting visual elements helps in distinguishing major and minor influencing factors [<xref ref-type="bibr" rid="ref33">33</xref>], which enhances the accuracy and speed of decision-making in clinical settings. Moreover, personalized visualization designs are considered an effective way to enhance explainability [<xref ref-type="bibr" rid="ref32">32</xref>]. Visualization schemes customized to meet HCPs’ specific needs can further improve their understanding of the model’s decision-making process. See illustrative descriptions from Zheng et al [<xref ref-type="bibr" rid="ref31">31</xref>] and Hong et al [<xref ref-type="bibr" rid="ref39">39</xref>]:</p>
              <disp-quote>
                <p>Visual indications of severity, such as red, yellow, and green to define high, medium, and low-risk categories paired with a numerical indication were required.</p>
                <attrib>Zheng et al, 2024</attrib>
              </disp-quote>
              <disp-quote>
                <p>Visual elements are easy to interpret and identification of minor influencing features...most domain experts acknowledged that the visual elements are easy to interpret and were able to perform the identification of major/minor influencing features.</p>
                <attrib>Hong et al, 2020</attrib>
              </disp-quote>
              <p>Second, the certainty of output results also plays an important role in local explainability. Displaying the confidence or certainty of the model’s predictions provides HCPs with additional reference information, enabling them to better understand the model’s outputs and thereby increasing their trust in the model [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. For example, providing CIs can help HCPs more effectively assess the reliability of the predictions [<xref ref-type="bibr" rid="ref39">39</xref>]. See illustrative descriptions from Schoonderwoerd et al [<xref ref-type="bibr" rid="ref38">38</xref>] and Hong et al [<xref ref-type="bibr" rid="ref39">39</xref>]:</p>
              <disp-quote>
                <p>More specifically, clinicians stated that supporting- and counterevidence, and the certainty of the system will likely remain important in explanations, while information that is used to make the diagnosis, and the diagnosis in similar cases is likely to become less important over time.</p>
                <attrib>Schoonderwoerd et al, 2021</attrib>
              </disp-quote>
              <disp-quote>
                <p>Presenting certainty score on model performance or predictions is perceived by clinicians as a sort of explanation that complements the output result.</p>
                <attrib>Hong et al, 2020</attrib>
              </disp-quote>
              <p>Third, AI local explainability means the model’s ability to explain its decisions by offering examples of previous instances that are similar to the current case [<xref ref-type="bibr" rid="ref42">42</xref>]. This allows clinicians to interpret and understand how the model arrived at a decision based on prior cases with comparable features. It essentially helps in making the model’s predictions more transparent and interpretable through analogies to similar real-world examples. See illustrative description from Tonekaboni et al [<xref ref-type="bibr" rid="ref42">42</xref>]:</p>
              <disp-quote>
                <p>For example, in cases where an ML model is helping clinicians find a diagnosis for a patient, it is valuable to know the samples the model has previously seen. Clinicians view this as finding similar patients and believe that this kind of explanation can be only helpful in specific applications.</p>
                <attrib>Tonekaboni et al, 2019</attrib>
              </disp-quote>
              <p>In addition, the timing of explanations is a key consideration. Providing HCPs with excessive information may lead to information overload, potentially hindering their understanding of the system. For example, some participants noted that they do not want to verify every AI-generated result each time they use a clinical decision support tool. Instead, they only wish to delve into the underlying logic when the results are unexpected [<xref ref-type="bibr" rid="ref36">36</xref>]. This indicates that HCPs prefer the ability to choose to access more information as needed, rather than being overwhelmed by excessive or redundant details during the explanation process.</p>
              <p>A mixed method study confirmed the aforementioned results [<xref ref-type="bibr" rid="ref38">38</xref>]. Schoonderwoerd et al [<xref ref-type="bibr" rid="ref38">38</xref>] explored physician requirements for clinical support system explainability and surveyed 6 pediatricians, in which clinicians rated the following aspects as very important in any scenario involving AI-CDSS use, namely, features and their importance, results certainty, features increasing results certainty, and the ability of AI to generalize results to similar situations (median importance ratings were rated as highly important by doctors whether the diagnosis of the doctor and the computerized decision support systems is consistent).</p>
            </sec>
            <sec>
              <title>Global Explainability</title>
              <p>Six studies highlighted the importance of global explainability for HCPs [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref46">46</xref>], including (1) decision logic and rules, (2) explanations in an easily understandable way, and (3) knowing when the model makes errors.</p>
              <p>First, explaining the decision logic and rules is a core part of understanding the overall functioning mechanism of AI models. This involves helping HCPs understand the entire decision-making process of the model, from input features to final outputs, including feature combinations, trade-offs, and the determination of decision boundaries (the dividing criteria set by medical AI). Such explanations are key to enhancing model transparency and enabling HCPs to grasp the overarching decision logic of the system [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. See illustrative description from Hong et al [<xref ref-type="bibr" rid="ref39">39</xref>]:</p>
              <disp-quote>
                <p>The majority of our participants desired better tools to help them understand the mechanism by which a model makes predictions; in particular regarding root cause analysis (P1), identification of decision boundaries (P3), and identification of a global structure to describe how a model works (P13).</p>
                <attrib>Hong et al, 2020</attrib>
              </disp-quote>
              <p>Second, explaining the model in a way that is easy for HCPs to understand is also an important approach to enhancing overall explainability [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. When the model’s explanations align with the HCPs’ cognitive models and logic, they serve as evidence to support decisions, which can greatly enhance the HCPs’ understanding and acceptance of AI model outputs. See illustrative description from Hong et al [<xref ref-type="bibr" rid="ref39">39</xref>]:</p>
              <disp-quote>
                <p>Explanations for model predictions can be used as evidence (P16, P18, P20) to corroborate a decision, when ML model and user’s mental model agree.</p>
                <attrib>Hong et al, 2020</attrib>
              </disp-quote>
              <p>Third, understanding when the model might make mistakes is another key aspect of global explainability. HCPs need to not only understand the normal decision-making logic of the model but also recognize the conditions under which the model may fail or generate incorrect decisions [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Only when HCPs are fully aware of the model’s limitations can they use AI cautiously in practical applications, thereby improving decision accuracy and safety. For example, HCPs should be informed of potential risks, such as when the model fails to account for specific historical data or lacks certain critical information [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
            </sec>
          </sec>
          <sec>
            <title>Model Explainability</title>
            <p>From the user’s perspective, the explainability of the model itself can be discussed regarding model reliability and the structural explainability of the model.</p>
            <sec>
              <title>Model Reliability</title>
              <p>Model reliability refers to the ability of an AI tool to consistently produce accurate and dependable results over time. It is typically assessed using performance metrics, such as accuracy, specificity, and sensitivity, which evaluate how well the model performs in predicting outcomes [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. These metrics significantly influence clinicians’ initial adoption of the tools [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
            </sec>
            <sec>
              <title>Transparency of Model Structure</title>
              <p>Structural explainability, on the other hand, relates to how transparent and interpretable a model is in demonstrating the relationship between input features and the final output [<xref ref-type="bibr" rid="ref42">42</xref>]. AI models based on algorithms, such as decision trees or logistic regression, can clearly demonstrate how input features influence the final output, helping clinicians understand the model’s parameters and reasoning mechanisms [<xref ref-type="bibr" rid="ref46">46</xref>]. In contrast, “black-box” models, such as deep learning, while demonstrating superior performance in certain scenarios, have a complexity that makes it challenging for HCPs to understand their internal workings. See illustrative descriptions from Tonekaboni et al [<xref ref-type="bibr" rid="ref42">42</xref>] and Fischer et al [<xref ref-type="bibr" rid="ref46">46</xref>]:</p>
              <disp-quote>
                <p>Familiar metrics such as reliability, speciﬁcity, and sensitivity were important to the initial uptake of an AI tool, a critical factor for continued usage was whether the tool was repeatedly successful in prognosticating their patient’s condition in their personal experience.</p>
                <attrib>Tonekaboniet al, 2019</attrib>
              </disp-quote>
              <disp-quote>
                <p>If you know what that model is based on, it is not some mysterious black box where something comes out, but we as doctors know what those models are based on and what parameters are included. Then I can live with it not seeing the parameters for each prediction.</p>
                <attrib>Fischer et al, 2023</attrib>
              </disp-quote>
            </sec>
          </sec>
          <sec>
            <title>Preprocessing Explainability</title>
            <p>Preprocessing explainability refers to the transparency of AI systems when processing input data, including data sources and data preprocessing methods. Clinicians often need to understand what types of data AI systems are based on for predictions or diagnoses to evaluate the model’s applicability and reliability. This includes (1) transparency of data sources and (2) transparency of data processing.</p>
            <p>First, the transparency of data sources is the foundation of HCPs’ trust. A total of 3 studies indicated that HCPs pay attention to the data sources of AI systems [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. In the design of medical decision support systems, HCPs often want to know the origin of the data when they first encounter AI tools.</p>
            <p>Second, the lack of transparency in data processing may undermine HCPs’ trust in AI systems. When HCPs cannot clearly understand how input data are processed, they may become skeptical of the system. For example, some HCPs mentioned that if they are unaware of who processes the input data, how they are processed, or how they are stored, this uncertainty can lead to reduced trust in the AI system [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
          </sec>
        </sec>
        <sec>
          <title>Dimensions of AI Integrability From the User Perspective</title>
          <p>According to the thematic analysis, AI integrability can be understood from the following 3 dimensions: workflow adaptation, system compatibility, and usability (<xref rid="figure3" ref-type="fig">Figure 3</xref>).</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Conceptual framework of artificial intelligence (AI) integrability from the perspective of health care professionals. EHR: electronic health record.</p>
            </caption>
            <graphic xlink:href="jmir_v27i1e73374_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <sec>
            <title>Workflow Adaptation</title>
            <p>Workflow adaptation is a critical dimension of AI integrability, referring to the ability of AI systems to fit seamlessly into existing workflows without disrupting them, avoiding additional workload, and providing recommendations that meet practical needs [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. This includes (1) providing support at appropriate decision points, (2) moderate frequency of prompts and alerts, and (3) providing recommendations aligned with practical needs.</p>
            <p>First, the time points of AI assistance must not disrupt or interrupt doctors’ routines, increase workload, and extend time requirements for a workflow with well-integrated AI. It is essential to identify which complex decision points require AI assistance, rather than providing information or data that doctors already know [<xref ref-type="bibr" rid="ref36">36</xref>]. This is especially problematic in high–patient-volume settings, where doctors are already under significant pressure. Workflow disruptions caused by AI systems can negatively impact various aspects of the diagnostic and treatment process [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. See illustrative description from Wenderott et al [<xref ref-type="bibr" rid="ref40">40</xref>]:</p>
            <disp-quote>
              <p>When using AI-CAD, Seven radiologists were concerned about potential time constraints associated with the software.</p>
              <attrib>Wenderott et al, 2024</attrib>
            </disp-quote>
            <p>Second, attention must be paid to the frequency of AI alerts and notifications. Frequent alerts and information prompts from AI systems may lead to alert fatigue among doctors, making them desensitized to genuinely critical alerts. This can also result in information overload, further impairing doctors’ decision-making abilities [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. See illustrative description from Zheng et al [<xref ref-type="bibr" rid="ref31">31</xref>]:</p>
            <disp-quote>
              <p>In-basket message was mentioned by many clinicians as a common type of active alarm. However, it is necessary to balance effective information delivery and alert fatigue as clinicians, especially physicians, receive various alarms and notifications from multiple channels in their daily work.</p>
              <attrib>Zheng et al, 2024</attrib>
            </disp-quote>
            <p>Third, 3 studies highlighted that it is crucial for AI outputs to align with the context of HCPs [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. AI recommendations must match the operational capacity of HCPs and health care facilities, especially in resource-limited community clinics. For instance, if an AI system suggests conducting laboratory tests that cannot be performed or prescribing medications that are unavailable, it may reduce clinicians’ trust in the system and negatively impact its practical effectiveness [<xref ref-type="bibr" rid="ref25">25</xref>]. Therefore, thorough local validation is essential when introducing AI systems to ensure they function appropriately within specific health care environments [<xref ref-type="bibr" rid="ref29">29</xref>]. See illustrative description from Wang et al [<xref ref-type="bibr" rid="ref22">22</xref>]:</p>
            <disp-quote>
              <p>In addition, since our research sites are first-tier community clinics, they are only capable of performing a limited number of laboratory examinations (e.g., none of the research sites have CT scan equipment). They also have very limited medication resources in stock. However, AI-CDSS would suggest a variety of laboratory tests, and treatment and medicine options, which clinicians often cannot prescribe. In this case, theses recommendations are often ignored by the clinician users.</p>
              <attrib>Wang et al, 2021</attrib>
            </disp-quote>
          </sec>
          <sec>
            <title>System Compatibility</title>
            <p>System compatibility is a critical dimension of AI integrability, particularly in the health care field, where it primarily refers to integration with electronic health records (EHRs). A total of 5 studies highlighted that many clinicians are willing to integrate AI systems with patient EHRs to provide more comprehensive and relevant information during clinical decision-making [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Integrating AI risk models into EHR systems can offer clinicians more valuable references, improving patient management while reducing repetitive tasks and minimizing information omissions in clinical workflows. See illustrative description from Kinney et al [<xref ref-type="bibr" rid="ref35">35</xref>]:</p>
            <disp-quote>
              <p>Physicians cited the diverse factors that impact a treatment plan that is not able to be captured in an electronic system as a reason it may not be helpful.</p>
              <attrib>Kinney et al, 2024</attrib>
            </disp-quote>
          </sec>
          <sec>
            <title>Usability</title>
            <p>Usability refers to the ease with which users can interact with and effectively use a system to achieve their goals [<xref ref-type="bibr" rid="ref47">47</xref>]. In the context of AI systems, usability is a key aspect, with 5 studies highlighting it as a critical factor that directly impacts the acceptance and effectiveness of the system in clinical settings [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p>
            <sec>
              <title>Simplicity of User Interface</title>
              <p>A user-friendly interface needs to be intuitive and easy to operate while providing timely and useful information without disrupting clinical workflows. The display and organization of interface functions are major dimensions of interface usability [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. For example, one study found that overly frequent and space-consuming pop-up designs in clinical decision support systems hindered doctors’ access to other important information, leading to a poor user experience [<xref ref-type="bibr" rid="ref22">22</xref>].</p>
            </sec>
            <sec>
              <title>Ease of Operation</title>
              <p>In addition, whether the system is easy for doctors to master directly affects its use. Clinicians tend to reject AI systems if they require significant time to learn [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. A mixed method study conducted in diabetes management surveyed HCPs’ attitudes toward AI [<xref ref-type="bibr" rid="ref23">23</xref>]. The results showed that 68% of participants considered usability (simple and easy operation) an important factor influencing their use. Another quantitative study, based on the technology acceptance model and diffusion of innovations theory, analyzed the key factors affecting the adoption of AI technologies among doctors and medical students, with 17.9% indicating the lack of user-friendly software and support systems as a barrier [<xref ref-type="bibr" rid="ref30">30</xref>]. See illustrative description from Wang et al [<xref ref-type="bibr" rid="ref22">22</xref>]:</p>
              <disp-quote>
                <p>A primary issue of AI-CDSS usability was that the system always pop up to occupy one-third of the screen, whenever the clinician opened a patient’s medical record in EHR. If the monitor’s screen size is small, the floating window of AI-CDSS may block the access to some EHR features (e.g., data fields). This frustrated many participants. To workaround this issue, clinicians had to minimize it while it was not in use.</p>
                <attrib>Wang et al, 2021</attrib>
              </disp-quote>
            </sec>
          </sec>
        </sec>
      </sec>
      <sec>
        <title>Quantitative Research</title>
        <p>This study included 3 quantitative and 2 mixed method studies, focusing on HCPs’ willingness to use AI, the key influencing factors, explainability needs, and integrability. Only 1 quantitative study addressed AI integrability. Owing to the limited number of quantitative studies, they primarily serve to complement and validate the qualitative analysis in this study. See <xref ref-type="table" rid="table2">Table 2</xref> for details.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Characteristics of the publications of quantitative data.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="200"/>
            <col width="180"/>
            <col width="100"/>
            <col width="160"/>
            <col width="170"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td>Study</td>
                <td>Research methods</td>
                <td>Sample size</td>
                <td>Participants</td>
                <td>Outcome variables</td>
                <td>Concerned dimensions of explainability or integrability</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Liaw et al [<xref ref-type="bibr" rid="ref23">23</xref>], 2023</td>
                <td>Semistructured interviews and surveys</td>
                <td>22</td>
                <td>Clinicians managing diabetes</td>
                <td>Factors influencing the adoption of the tool, perception of the tool’s usefulness, and ease of use</td>
                <td>Transparency, usability, and impact on clinic workflows need to be tailored to the demands and resources of clinics and communities.</td>
              </tr>
              <tr valign="top">
                <td>Schoonderwoerd et al [<xref ref-type="bibr" rid="ref38">38</xref>], 2021</td>
                <td>Domain analysis, interviews, surveys, and scenario experiment</td>
                <td>6</td>
                <td>Pediatrician clinicians</td>
                <td>Diagnosis, information they have used in their decision-making, and the importance ranking of different types of explanations in various contexts</td>
                <td>The information that is used to make a diagnosis, the information that supports the diagnosis, how certain the clinician is of the diagnosis, and the relevance of the information for their diagnosis</td>
              </tr>
              <tr valign="top">
                <td>Panagoulias et al [<xref ref-type="bibr" rid="ref30">30</xref>], 2023</td>
                <td>Survey</td>
                <td>39</td>
                <td>Medical personnel (including medical students and medical practitioners)</td>
                <td>Suggested level of explainability, knowledge of AI<sup>a</sup>, ways to better integrate AI, and AI concerns</td>
                <td>The overall system functions, user-friendly software, and impact on workflow</td>
              </tr>
              <tr valign="top">
                <td>Ghanvatkar and Rajan [<xref ref-type="bibr" rid="ref11">11</xref>], 2024</td>
                <td>Theoretical construction and case analysis</td>
                <td>—<sup>b</sup></td>
                <td>Clinicians</td>
                <td>Usefulness of AI explanations for clinicians</td>
                <td>Local explanations and global explanations</td>
              </tr>
              <tr valign="top">
                <td>Fogliato et al [<xref ref-type="bibr" rid="ref44">44</xref>], 2022</td>
                <td>Scenario experiment</td>
                <td>19</td>
                <td>Radiologists</td>
                <td>Anchoring effects; human-AI team diagnostic performance and agreement; time spent and confidence in decision-making; perceived usefulness of the AI</td>
                <td>Do not waste time and no additional workload.</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>AI: artificial intelligence.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>Not available.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>Two studies explored factors affecting physicians’ willingness to adopt AI, with a focus on explainability and ease of use. One mixed method study found that 77% of HCPs managing diabetes were willing to use AI, citing ease of use (68%) as a key factor [<xref ref-type="bibr" rid="ref23">23</xref>]. Another study revealed that 25.6% of participants identified a lack of understanding of underlying technology as a barrier [<xref ref-type="bibr" rid="ref30">30</xref>], which confirms the focus of HCPs on usability and explainability described in the previous section of the research.</p>
        <p>Two studies focused on explainability needs. One found that post hoc local explanations, such as those provided by logistic regression and Shapley additive explanations (SHAP), received higher usability scores from clinicians than model-level explainability [<xref ref-type="bibr" rid="ref11">11</xref>]. Another study found that clinicians rated diagnostic information, certainty, and related reasoning as very important, particularly when their diagnoses conflicted with AI recommendations [<xref ref-type="bibr" rid="ref38">38</xref>]. <italic>These quantitative results support and validate the qualitative findings that post hoc local explainability is crucial for HCPs.</italic></p>
        <p>One study examined AI integration into workflows, comparing its placement in different stages of decision-making. It found that AI support at the start of a diagnostic session increased participants’ confidence and perceived usefulness but also highlighted that poor integration could increase task complexity and workload [<xref ref-type="bibr" rid="ref44">44</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>To enhance HCPs’ trust and use of AI-CDSS in future real-world clinical settings, this study adopted a mixed systematic review approach to synthesize evidence regarding AI explainability and integrability from the HCPs’ perspective. To the best of our knowledge, this study is the first to systematically summarize the concept of “AI integrability” from the HCPs’ perspective. It refers to the ability of AI systems to be easily and seamlessly integrated into workflows, providing timely, appropriately scaled, and practically relevant prompts or recommendations at the right points, without requiring excessive effort from the HCPs. HCPs’ needs for AI integrability are primarily reflected in 3 aspects: system compatibility, usability, and workflow adaptation.</p>
        <p>Second, this study decodes the components of AI explainability based on HCPs’ lived experiences. It identifies that the core HCPs’ requirements of AI explainability can be divided into 3 stages, namely, data preprocessing, the AI model itself, and postprocessing explainability. Unlike the results from AI developers and researchers, the study found that general users (HCPs) are more focused on the explainability of the postprocessing stage, particularly local explainability, such as the importance of specific output features and the certainty of results. From the HCPs’ perspective, an explainable AI must clearly present data sources, processing workflows, model structures, decision mechanisms, and their rationales, using tools such as visualization and user-comprehensible language and logic to help HCPs understand and trust AI.</p>
      </sec>
      <sec>
        <title>Comparison With Existing Research</title>
        <p>This study is the first to systematically review AI integrability from a comprehensive perspective. Current discussions about easily integrable AI only sporadically mention its compatibility with other systems and its ability to integrate into HCPs’ workflows (eg, the location of the AI and when it provides assistance), but lack in-depth and systematic exploration [<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref50">50</xref>]. For instance, the study by Maleki Varnosfaderani and Forouzanfar [<xref ref-type="bibr" rid="ref51">51</xref>] discussed the possibility of integrating AI with medical practice but did not thoroughly examine the specific needs faced by HCPs during the integration process. A study from rural clinics in China reported various tensions between AI-CDSS design and the rural clinical environment, such as misalignment with local environments and workflows, technical limitations, and usability barriers [<xref ref-type="bibr" rid="ref22">22</xref>]. Another study concerning AI-CDSS in emergency departments identified integrability factors (eg, time, treatment processes, and mobility) through interviews with 12 emergency department doctors, but it was limited to a specific environment with a small sample size [<xref ref-type="bibr" rid="ref45">45</xref>], resulting in poor extrapolation capability.</p>
        <p>This study proposes, from the perspective of HCPs’ needs, that AI explainability should not only focus on technical transparency but also emphasize HCPs’ understanding and trust, particularly in clinical settings, where AI explanations should support HCPs in making more accurate and effective decisions. Existing research in explainable AI primarily focuses on algorithms, with related reviews mainly discussing taxonomies of explainability and technological innovations [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. For example, Markus et al [<xref ref-type="bibr" rid="ref53">53</xref>] proposed an explainability framework that mainly focuses on providing better tools for developers but largely explores explainability from an algorithmic perspective. Similarly, Amann et al [<xref ref-type="bibr" rid="ref14">14</xref>] highlighted ethical and technical issues in explainable medical AI, pointing out the multidimensional nature of explainability, but their research remains focused on algorithm optimization and technical compliance. This study emphasizes the user perspective, offering more practical guidance for the design and promotion of explainable medical AI. The study found that HCPs focus more on postprocessing local explainability, meaning how specific predictions made by the model can explain changes in the patient’s condition or decision-making basis. This aligns with Shin [<xref ref-type="bibr" rid="ref54">54</xref>], who emphasized that local explainability and causal relationships are key to user trust. A systematic review from medical and technical perspectives also supports this view [<xref ref-type="bibr" rid="ref55">55</xref>]. Unlike data experts, who focus on model and data-layer explainability [<xref ref-type="bibr" rid="ref25">25</xref>], users prioritize post hoc explainability. Some experimental research shows users (eg, doctors) have a higher understanding and acceptance of post hoc explanations, which are more actionable than traditional technical explanations [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. This preference stems from 3 key clinical needs. First, post hoc local explanations help clinicians understand AI predictions in the context of individual patient cases, enabling more personalized and relevant decision-making [<xref ref-type="bibr" rid="ref54">54</xref>]. Second, given their professional and legal responsibilities, doctors need to justify their choices based on understandable and traceable reasoning. Post hoc explainability provides the transparency required to assess whether AI outputs align with clinical guidelines and ethical standards [<xref ref-type="bibr" rid="ref53">53</xref>]. Third, in high-pressure clinical environments, HCPs prioritize usability over theoretical clarity. Local, case-specific explanations are more practical and immediately applicable, which enhances trust and facilitates integration into routine workflows [<xref ref-type="bibr" rid="ref58">58</xref>]. Thus, this study’s conclusion better matches real clinical scenarios, offering insights for developers to create AI-CDSS that meet the needs of HCPs.</p>
      </sec>
      <sec>
        <title>Challenges for Explainability and Integrability</title>
        <p>Despite these advances, significant challenges remain in achieving explainability and integrability of AI-CDSS in clinical practice. These challenges are as follows.</p>
        <sec>
          <title>Lack of Tailored Explainability Methods for HCPs</title>
          <p>A key challenge is the lack of explainability methods tailored to HCPs [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. HCPs focus on the explainability of the postprocessing stage, especially local explainability. To address this, post hoc explainability techniques such as local interpretable model-agnostic explanations (LIME) and SHAP [<xref ref-type="bibr" rid="ref61">61</xref>] explain decision-making in black-box models, helping HCPs understand how predictions are made based on input data. For instance, Alabi et al [<xref ref-type="bibr" rid="ref62">62</xref>] demonstrated the use of SHAP and LIME in prognostic modeling for nasopharyngeal carcinoma, highlighting their potential in clinical decision support. Simplifying output by avoiding technical jargon and using graphical explanations [<xref ref-type="bibr" rid="ref63">63</xref>] allows HCPs to adjust detail levels to avoid overload. Medical AI can also offer personalized explanations based on roles, preferences [<xref ref-type="bibr" rid="ref64">64</xref>], or feedback after explanations [<xref ref-type="bibr" rid="ref65">65</xref>].</p>
        </sec>
        <sec>
          <title>Dynamic Nature of AI Models Affecting Explanation Consistency</title>
          <p>A significant challenge in explainable AI for clinical use lies in the evolving nature of explanations as AI models are continuously updated. These updates—whether for improving performance, incorporating new data, or aligning with emerging medical knowledge—can change a model’s internal logic, rendering previously valid explanations obsolete or misleading [<xref ref-type="bibr" rid="ref66">66</xref>]. In clinical settings, where trust and transparency are paramount, outdated explanations may lead to incorrect interpretations or reduced confidence in AI recommendations. To address this, explainability methods must be adaptive—capable of automatically regenerating explanations following model updates, tracking changes over time, and surfacing the rationale behind those changes [<xref ref-type="bibr" rid="ref67">67</xref>]. Therefore, maintaining the temporal validity of explanations is as crucial as ensuring their initial explainability, especially as AI systems become increasingly dynamic and responsive to new clinical evidence.</p>
        </sec>
        <sec>
          <title>Limited Technical Compatibility With Existing Information Systems</title>
          <p>Compatibility with existing information systems is a major challenge [<xref ref-type="bibr" rid="ref68">68</xref>]. AI-CDSS often require large amounts of patient data to provide decision support, but if these data cannot be electronically retrieved, clinicians must manually input them, leading to frustration and abandonment [<xref ref-type="bibr" rid="ref69">69</xref>]. Integrability difficulties are also linked to the lack of semantic interoperability standards [<xref ref-type="bibr" rid="ref70">70</xref>]. To address this, standardized application programming interface and data format protocols should be developed to enable AI systems to automatically retrieve patient data from EHRs, reducing manual workload. In addition, implementing standards such as the Fast Healthcare Interoperability Resources and Unified Medical Language System can facilitate integrability with various patient information systems [<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref72">72</xref>].</p>
        </sec>
        <sec>
          <title>Complexity of AI Integrability in Clinical Settings</title>
          <p>While this review identifies key enablers of AI integrability—namely, system compatibility, usability, and workflow adaptation—it is important to emphasize that integration is rarely seamless in real-world clinical settings. These dimensions, although essential, do not guarantee smooth adoption. For example, perceptions of usability often vary among different clinical roles, leading to inconsistent engagement [<xref ref-type="bibr" rid="ref73">73</xref>]. Embedding AI tools into existing workflows can require significant adaptation, redefinition of tasks, and role negotiations. Even when systems are technically compatible, they may introduce new tensions, including resistance from clinicians or disruption to established routines [<xref ref-type="bibr" rid="ref22">22</xref>]. Thus, integrability should not be treated purely as a technical process, but as a complex challenge shaped by cultural norms, institutional readiness, and professional autonomy [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref74">74</xref>].</p>
          <p>To address this, frequent involvement of HCPs during system design, continuous feedback loops, and adaptation to local workflows are crucial. Methods such as human-computer interaction with expert input [<xref ref-type="bibr" rid="ref75">75</xref>] and consumer journey mapping [<xref ref-type="bibr" rid="ref76">76</xref>] have been used to enhance AI-CDSS integration. At the same time, it is necessary to develop a standardized diagnostic support framework that aligns AI with specific clinical needs [<xref ref-type="bibr" rid="ref77">77</xref>]. Another promising direction to enhance integrability is dynamic adaptation, where AI-CDSS adjust their level of support based on contextual factors such as patient volume, emergency status, or resource availability [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]. In high-pressure situations (eg, during emergencies or when clinician workload is high), the AI system could provide more proactive or autonomous recommendations. Conversely, during low-acuity periods, it could take a more supportive or background role, allowing clinicians greater control. Such adaptability can reduce disruption, improve acceptance, and ensure that AI interventions align with real-time clinical needs and capacities.</p>
        </sec>
        <sec>
          <title>Ethical Concerns</title>
          <p>In addition, the ethical implications of explainability and integrability should not be overlooked. Explainability is ethically significant in supporting informed consent, accountability, and clinicians’ ability to critically evaluate AI recommendations [<xref ref-type="bibr" rid="ref14">14</xref>]. When clinicians can understand how an AI system reaches its conclusions, they are better equipped to maintain professional autonomy and protect patient rights. Integrability also has ethical implications. If an AI system is not well integrated into clinical workflows, clinicians may not know when or how to use it properly. This can create confusion about who is responsible for decisions influenced by AI. For example, if an AI recommendation appears at the wrong time in the workflow or is difficult to interpret in context, a clinician might follow it without full understanding or ignore it when it should have been considered. In both cases, the boundaries of responsibility become blurred [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]. These issues highlight the necessity of designing AI systems that align not only with technical and operational requirements but also with core ethical principles, such as transparency, fairness, and trustworthiness in health care.</p>
        </sec>
      </sec>
      <sec>
        <title>Implementation Strategies Based on the Exploration, Preparation, Implementation, and Sustainment Framework</title>
        <p>As noted in the previous section, AI faces persistent challenges in explainability and system integration. These cannot be resolved through isolated interventions but require a structured, phased approach. The exploration, preparation, implementation, and sustainment framework—comprising exploration, preparation, implementation, and sustainment—offers a widely validated model for supporting health care technology adoption and enhancing clinician acceptance of AI systems [<xref ref-type="bibr" rid="ref80">80</xref>]. To systematically address these challenges, we draw on the exploration, preparation, implementation, and sustainment framework, providing structured strategies for each phase of implementation.</p>
        <p>In the exploration phase, institutions should identify clinical needs and collaborate with multidisciplinary teams (eg, physicians, nurses, and IT staff) to assess AI integration opportunities. Prioritizing explainability, especially alignment with clinical reasoning, is critical. Models supporting SHAP, LIME, or other visual local explanation tools are recommended, alongside qualitative feedback collection from end users [<xref ref-type="bibr" rid="ref81">81</xref>].</p>
        <p>The preparation phase focuses on resolving integration barriers and tailoring explanations for different roles. Multilevel explanation interfaces can accommodate varying expertise levels [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Close coordination with IT departments is essential to embed AI tools into existing EHR systems, minimizing manual input and workflow disruption [<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref72">72</xref>]. Organizational readiness and role-specific training are also key to successful adoption [<xref ref-type="bibr" rid="ref82">82</xref>].</p>
        <p>In the implementation phase, a phased deployment strategy helps minimize disruption and support gradual clinician adaptation. AI-CDSS tools can first target low-risk, supportive tasks (eg, risk alerts and abnormal laboratory flagging), then gradually expand to core decision-making such as diagnosis or treatment support. To ensure alignment with clinical needs, implementation should combine performance metrics (eg, alert response times and override rates) with user feedback (eg, satisfaction ratings and suggestion boxes). Regular log reviews and focus groups can surface usability issues and guide iterative improvement [<xref ref-type="bibr" rid="ref83">83</xref>].</p>
        <p>The sustainment phase focuses on the long-term integration of AI into clinical workflows. Continuous monitoring of system performance and user experience is essential to ensure sustained adoption [<xref ref-type="bibr" rid="ref84">84</xref>]. As models evolve, transparent update mechanisms—such as automatically generated explanation revisions and change logs—should be maintained to support clinician trust and promote continued engagement with the system.</p>
      </sec>
      <sec>
        <title>Strengths and Limitations</title>
        <p>This study offers a systematic exploration of AI integrability from the HCPs’ perspective, providing a conceptual framework to guide medical AI design and development. Unlike previous studies focusing on technical developers or researchers’ perspectives [<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref86">86</xref>], it emphasizes the needs of actual HCPs, such as physicians. It establishes an AI explainability framework based on their priorities in data preprocessing, model structure, and postprocessing. This approach facilitates the development of user-centered AI-CDSS, promoting its acceptance and use by HCPs.</p>
        <p>The limitation of this systematic review is the limited number of quantitative studies, which restricts quantitative analysis and statistical inference. Future research should include high-quality quantitative studies to validate and complement the conclusions.</p>
        <p>Although we used a comprehensive set of keywords, the decision not to use truncation (eg, an asterisk) may have led to the omission of some relevant studies. This choice was made to maintain specificity, but it may have limited the search breadth. To address this, we supplemented the search with citation tracking. This limitation is acknowledged in the review to clarify the scope of our search strategy.</p>
        <p>In addition, while emphasizing user perspectives, this review provides limited analysis of varying needs among different medical roles. This is partly because of the small number of eligible studies and the lack of detail regarding specific clinical tasks, settings, or user groups in many of the included papers. These limitations made it difficult to conduct deeper, context-sensitive analysis of HCPs’ perceptions of explainability and integrability. In this regard, we recommend that future research draw on implementation frameworks such as the Consolidated Framework for Implementation Research or Promoting Action on Research Implementation in Health Service framework to better account for the contextual and role-specific factors that shape HCPs’ experiences. These frameworks can support more nuanced analyses of clinical settings and tasks, guiding the development of AI tools that are better aligned with real-world practices.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In conclusion, the explainability and integrability of medical AI are key factors influencing its acceptance and use in clinical settings. On the basis of the user-centered conceptual framework proposed in this study, future AI design should focus on HCPs’ needs to enhance explainability and integrability, thereby promoting HCPs’ acceptance and use and improving its effectiveness in real-world clinical applications.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Search strategy for artificial intelligence (AI) explainability and for AI integrability from the user perspective.</p>
        <media xlink:href="jmir_v27i1e73374_app1.docx" xlink:title="DOCX File , 16 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>PRISMA (Preferred Reporting Items for Systematic reviews and Meta-Analyses) checklist.</p>
        <media xlink:href="jmir_v27i1e73374_app2.docx" xlink:title="DOCX File , 32 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AI-CDSS</term>
          <def>
            <p>artificial intelligence clinical decision support systems</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">EHR</term>
          <def>
            <p>electronic health record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">HCP</term>
          <def>
            <p>health care professional</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">LIME</term>
          <def>
            <p>local interpretable model-agnostic explanations</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">SHAP</term>
          <def>
            <p>Shapley additive explanations</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was supported by the Shenzhen Basic Research Program (Natural Science Foundation; JCYJ20240813115806009); the National Natural Science Program of China (project 72004066); the Humanities and Social Sciences Research Project; the Ministry of Education, China (24YJAZH086 and 24YJCZH284); the Knowledge Innovation Project of Wuhan (2023020201020471); the Teaching Research Project of Huazhong University of Science and Technology (2023146); the Teaching Ability Training Curriculum Development Project of Huazhong University of Science and Technology (202408) and the China Scholarship Council. The authors confirm that generative artificial intelligence tools were used only for English language polishing and not for drafting the original or revised versions of the manuscript, which were entirely generated by the authors.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The data used and analyzed in this study will be made available by the corresponding author upon reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>YL was responsible for original draft preparation, formal analysis, and software support. CL contributed to conceptualization, writing—review and editing, and methodology. JZ assisted with formal analysis and software development. CX and DW both contributed to conceptualization and supervision. All authors provided substantive contributions to the writing and revision of the manuscript and approved the final version before submission.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Beam</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Kohane</surname>
              <given-names>IS</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in healthcare</article-title>
          <source>Nat Biomed Eng</source>
          <year>2018</year>
          <month>10</month>
          <volume>2</volume>
          <issue>10</issue>
          <fpage>719</fpage>
          <lpage>31</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41551-018-0305-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41551-018-0305-z</pub-id>
          <pub-id pub-id-type="medline">31015651</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41551-018-0305-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Acceptance of clinical artificial intelligence among physicians and medical students: a systematic review with cross-sectional survey</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2022</year>
          <month>8</month>
          <day>31</day>
          <volume>9</volume>
          <fpage>990604</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36117979"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2022.990604</pub-id>
          <pub-id pub-id-type="medline">36117979</pub-id>
          <pub-id pub-id-type="pmcid">PMC9472134</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Delory</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Jeanmougin</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lariven</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aubert</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Peiffer-Smadja</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Boëlle</surname>
              <given-names>PY</given-names>
            </name>
            <name name-style="western">
              <surname>Bouvet</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lescure</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Le Bel</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A computerized decision support system (CDSS) for antibiotic prescription in primary care-Antibioclic: implementation, adoption and sustainable use in the era of extended antimicrobial resistance</article-title>
          <source>J Antimicrob Chemother</source>
          <year>2020</year>
          <month>08</month>
          <day>01</day>
          <volume>75</volume>
          <issue>8</issue>
          <fpage>2353</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1093/jac/dkaa167</pub-id>
          <pub-id pub-id-type="medline">32357226</pub-id>
          <pub-id pub-id-type="pii">5827803</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sambasivan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Esmaeilzadeh</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Nezakati</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Intention to adopt clinical decision support systems in a developing country: effect of physician's perceived professional autonomy, involvement and belief: a cross-sectional study</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2012</year>
          <month>12</month>
          <day>05</day>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>142</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/1472-6947-12-142"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/1472-6947-12-142</pub-id>
          <pub-id pub-id-type="medline">23216866</pub-id>
          <pub-id pub-id-type="pii">1472-6947-12-142</pub-id>
          <pub-id pub-id-type="pmcid">PMC3519751</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeng</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Tzeng</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Social influence on the use of clinical decision support systems: revisiting the unified theory of acceptance and use of technology by the fuzzy DEMATEL technique</article-title>
          <source>Comput Ind Eng</source>
          <year>2012</year>
          <month>4</month>
          <volume>62</volume>
          <issue>3</issue>
          <fpage>819</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cie.2011.12.016</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shibl</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lawley</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Debuse</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Factors influencing decision support system acceptance</article-title>
          <source>Decis Support Syst</source>
          <year>2013</year>
          <month>01</month>
          <volume>54</volume>
          <issue>2</issue>
          <fpage>953</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1016/j.dss.2012.09.018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Thornton</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wyatt</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and clinical decision support: clinicians' perspectives on trust, trustworthiness, and liability</article-title>
          <source>Med Law Rev</source>
          <year>2023</year>
          <month>11</month>
          <day>27</day>
          <volume>31</volume>
          <issue>4</issue>
          <fpage>501</fpage>
          <lpage>20</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37218368"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/medlaw/fwad013</pub-id>
          <pub-id pub-id-type="medline">37218368</pub-id>
          <pub-id pub-id-type="pii">7176027</pub-id>
          <pub-id pub-id-type="pmcid">PMC10681355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Burkart</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Huber</surname>
              <given-names>MF</given-names>
            </name>
          </person-group>
          <article-title>A survey on the explainability of supervised machine learning</article-title>
          <source>J Artif Intell Res</source>
          <year>2021</year>
          <month>01</month>
          <day>19</day>
          <volume>70</volume>
          <fpage>245</fpage>
          <lpage>317</lpage>
          <pub-id pub-id-type="doi">10.1613/jair.1.12228</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blezek</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Olson-Williams</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Missert</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Korfiatis</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>AI integration in the clinical workflow</article-title>
          <source>J Digit Imaging</source>
          <year>2021</year>
          <month>12</month>
          <day>22</day>
          <volume>34</volume>
          <issue>6</issue>
          <fpage>1435</fpage>
          <lpage>46</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34686923"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10278-021-00525-3</pub-id>
          <pub-id pub-id-type="medline">34686923</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10278-021-00525-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC8669074</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The practical implementation of artificial intelligence technologies in medicine</article-title>
          <source>Nat Med</source>
          <year>2019</year>
          <month>01</month>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>30</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30617336"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41591-018-0307-0</pub-id>
          <pub-id pub-id-type="medline">30617336</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-018-0307-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC6995276</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghanvatkar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rajan</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Evaluating explanations from AI algorithms for clinical decision-making: a social science-based approach</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2024</year>
          <month>07</month>
          <volume>28</volume>
          <issue>7</issue>
          <fpage>4269</fpage>
          <lpage>80</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2024.3393719</pub-id>
          <pub-id pub-id-type="medline">38662559</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hua</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Petrina</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Young</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Poon</surname>
              <given-names>SK</given-names>
            </name>
          </person-group>
          <article-title>Understanding the factors influencing acceptability of AI in medical imaging domains among healthcare professionals: A scoping review</article-title>
          <source>Artif Intell Med</source>
          <year>2024</year>
          <month>01</month>
          <volume>147</volume>
          <fpage>102698</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(23)00212-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2023.102698</pub-id>
          <pub-id pub-id-type="medline">38184343</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(23)00212-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Graziani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dutkiewicz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Calvaresi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Amorim</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Yordanova</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Vered</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nair</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Abreu</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Blanke</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pulignano</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>JO</given-names>
            </name>
            <name name-style="western">
              <surname>Lauwaert</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Reijers</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Depeursinge</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Andrearczyk</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A global taxonomy of interpretable AI: unifying the terminology for the technical and social sciences</article-title>
          <source>Artif Intell Rev</source>
          <year>2023</year>
          <month>09</month>
          <day>06</day>
          <volume>56</volume>
          <issue>4</issue>
          <fpage>3473</fpage>
          <lpage>504</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36092822"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10462-022-10256-8</pub-id>
          <pub-id pub-id-type="medline">36092822</pub-id>
          <pub-id pub-id-type="pii">10256</pub-id>
          <pub-id pub-id-type="pmcid">PMC9446618</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Blasimme</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Frey</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Madai</surname>
              <given-names>VI</given-names>
            </name>
            <collab>Precise4Q consortium</collab>
          </person-group>
          <article-title>Explainability for artificial intelligence in healthcare: a multidisciplinary perspective</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2020</year>
          <month>11</month>
          <day>30</day>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>310</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-020-01332-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id>
          <pub-id pub-id-type="medline">33256715</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-020-01332-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7706019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shortliffe</surname>
              <given-names>EH</given-names>
            </name>
            <name name-style="western">
              <surname>Sepúlveda</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Clinical decision support in the era of artificial intelligence</article-title>
          <source>JAMA</source>
          <year>2018</year>
          <month>12</month>
          <day>04</day>
          <volume>320</volume>
          <issue>21</issue>
          <fpage>2199</fpage>
          <lpage>200</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2018.17163</pub-id>
          <pub-id pub-id-type="medline">30398550</pub-id>
          <pub-id pub-id-type="pii">2713901</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>CK</given-names>
            </name>
            <name name-style="western">
              <surname>Magaki</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Khanlou</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Vinters</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>XR</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Haeri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>XA</given-names>
            </name>
          </person-group>
          <article-title>Improving workflow integration with xPath: design and evaluation of a human-AI diagnosis system in pathology</article-title>
          <source>ACM Trans Comput Hum Interact</source>
          <year>2023</year>
          <month>03</month>
          <day>17</day>
          <volume>30</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1145/3577011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mandl</surname>
              <given-names>KD</given-names>
            </name>
            <name name-style="western">
              <surname>Gottlieb</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mandel</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Integration of AI in healthcare requires an interoperable digital data ecosystem</article-title>
          <source>Nat Med</source>
          <year>2024</year>
          <month>03</month>
          <day>30</day>
          <volume>30</volume>
          <issue>3</issue>
          <fpage>631</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-023-02783-w</pub-id>
          <pub-id pub-id-type="medline">38291298</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-023-02783-w</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <article-title>Interoperability in healthcare</article-title>
          <source>Healthcare Information and Management Systems Society</source>
          <access-date>2025-05-09</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://legacy.himss.org/resources/interoperability-healthcare">https://legacy.himss.org/resources/interoperability-healthcare</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barredo Arrieta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Díaz-Rodríguez</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Del Ser</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bennetot</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tabik</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Barbado</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gil-Lopez</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Molina</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Benjamins</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chatila</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Herrera</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Explainable Artificial Intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI</article-title>
          <source>Inf Fusion</source>
          <year>2020</year>
          <month>06</month>
          <volume>58</volume>
          <fpage>82</fpage>
          <lpage>115</lpage>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2019.12.012</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sutton</surname>
              <given-names>RT</given-names>
            </name>
            <name name-style="western">
              <surname>Pincock</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Baumgart</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Sadowski</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Fedorak</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Kroeker</surname>
              <given-names>KI</given-names>
            </name>
          </person-group>
          <article-title>An overview of clinical decision support systems: benefits, risks, and strategies for success</article-title>
          <source>NPJ Digit Med</source>
          <year>2020</year>
          <volume>3</volume>
          <fpage>17</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-020-0221-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-020-0221-y</pub-id>
          <pub-id pub-id-type="medline">32047862</pub-id>
          <pub-id pub-id-type="pii">221</pub-id>
          <pub-id pub-id-type="pmcid">PMC7005290</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dowding</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Randell</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Foster</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lattimer</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Nurses' use of computerised clinical decision support systems: a case site analysis</article-title>
          <source>J Clin Nurs</source>
          <year>2009</year>
          <month>04</month>
          <day>05</day>
          <volume>18</volume>
          <issue>8</issue>
          <fpage>1159</fpage>
          <lpage>67</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1365-2702.2008.02607.x</pub-id>
          <pub-id pub-id-type="medline">19320785</pub-id>
          <pub-id pub-id-type="pii">JCN2607</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>“Brilliant AI doctor” in rural clinics: challenges in AI-powered clinical decision support system deployment</article-title>
          <source>Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems</source>
          <year>2021</year>
          <conf-name>CHI '21</conf-name>
          <conf-date>May 8-13, 2021</conf-date>
          <conf-loc>Yokohama, Japan</conf-loc>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3411764.3445432"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3411764.3445432</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liaw</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Ramos Silva</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Soltero</surname>
              <given-names>EG</given-names>
            </name>
            <name name-style="western">
              <surname>Krist</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stotts</surname>
              <given-names>AL</given-names>
            </name>
          </person-group>
          <article-title>An assessment of how clinicians and staff members use a diabetes artificial intelligence prediction tool: mixed methods study</article-title>
          <source>JMIR AI</source>
          <year>2023</year>
          <month>05</month>
          <day>29</day>
          <volume>2</volume>
          <fpage>e45032</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ai.jmir.org/2023//e45032/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/45032</pub-id>
          <pub-id pub-id-type="medline">38875578</pub-id>
          <pub-id pub-id-type="pii">v2i1e45032</pub-id>
          <pub-id pub-id-type="pmcid">PMC11041401</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dwivedi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dave</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Naik</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Singhal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Omer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Morgan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ranjan</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Explainable AI (XAI): core ideas, techniques, and solutions</article-title>
          <source>ACM Comput Surv</source>
          <year>2023</year>
          <month>01</month>
          <day>16</day>
          <volume>55</volume>
          <issue>9</issue>
          <fpage>1</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1145/3561048</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Abuhmed</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>El-Sappagh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Muhammad</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Alonso-Moral</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Confalonieri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Guidotti</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Del Ser</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Díaz-Rodríguez</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Herrera</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Explainable Artificial Intelligence (XAI): what we know and what is left to attain Trustworthy Artificial Intelligence</article-title>
          <source>Inf Fusion</source>
          <year>2023</year>
          <month>11</month>
          <volume>99</volume>
          <fpage>101805</fpage>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2023.101805</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Explanation in artificial intelligence: insights from the social sciences</article-title>
          <source>Artif Intell</source>
          <year>2019</year>
          <month>02</month>
          <volume>267</volume>
          <fpage>1</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1016/j.artint.2018.07.007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Srinivasan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chander</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Explanation perspectives from the cognitive sciences—a survey</article-title>
          <source>Proceedings of the 29th International Conference on International Joint Conferences on Artificial Intelligence</source>
          <year>2020</year>
          <conf-name>IJCAI '20</conf-name>
          <conf-date>January 7-15, 2020</conf-date>
          <conf-loc>Yokohama, Japan</conf-loc>
          <fpage>4812</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/abs/10.5555/3491440.3492110"/>
          </comment>
          <pub-id pub-id-type="doi">10.24963/ijcai.2020/670</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pearson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Bath-Hextall</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Salmond</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Apostolo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kirkpatrick</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>A mixed-methods approach to systematic reviews</article-title>
          <source>Int J Evid Based Healthc</source>
          <year>2015</year>
          <month>09</month>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>121</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.1097/XEB.0000000000000052</pub-id>
          <pub-id pub-id-type="medline">26196082</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marco-Ruiz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hernández</surname>
              <given-names>MÁ</given-names>
            </name>
            <name name-style="western">
              <surname>Ngo</surname>
              <given-names>PD</given-names>
            </name>
            <name name-style="western">
              <surname>Makhlysheva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Svenning</surname>
              <given-names>TO</given-names>
            </name>
            <name name-style="western">
              <surname>Dyb</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chomutare</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Llatas</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Muñoz-Gama</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tayefi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A multinational study on artificial intelligence adoption: clinical implementers' perspectives</article-title>
          <source>Int J Med Inform</source>
          <year>2024</year>
          <month>04</month>
          <volume>184</volume>
          <fpage>105377</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1386-5056(24)00040-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2024.105377</pub-id>
          <pub-id pub-id-type="medline">38377725</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(24)00040-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Panagoulias</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Virvou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tsihrintzis</surname>
              <given-names>GA</given-names>
            </name>
          </person-group>
          <article-title>An empirical study concerning the impact of perceived usefulness and ease of use on the adoption of AI-empowered medical applications</article-title>
          <source>Proceedings of the 23rd International Conference on Bioinformatics and Bioengineering</source>
          <year>2023</year>
          <conf-name>BIBE '23</conf-name>
          <conf-date>December 4-6, 2023</conf-date>
          <conf-loc>Dayton, OH</conf-loc>
          <fpage>338</fpage>
          <lpage>45</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/10431843"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/bibe60311.2023.00062</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ohde</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Overgaard</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Brereton</surname>
              <given-names>TA</given-names>
            </name>
            <name name-style="western">
              <surname>Jose</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Peterson</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Juhn</surname>
              <given-names>YJ</given-names>
            </name>
          </person-group>
          <article-title>Clinical needs assessment of a machine learning-based asthma management tool: user-centered design approach</article-title>
          <source>JMIR Form Res</source>
          <year>2024</year>
          <month>01</month>
          <day>15</day>
          <volume>8</volume>
          <fpage>e45391</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2024//e45391/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/45391</pub-id>
          <pub-id pub-id-type="medline">38224482</pub-id>
          <pub-id pub-id-type="pii">v8i1e45391</pub-id>
          <pub-id pub-id-type="pmcid">PMC10825767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>Ringland</surname>
              <given-names>KE</given-names>
            </name>
          </person-group>
          <article-title>Designing accessible, explainable AI (XAI) experiences</article-title>
          <source>SIGACCESS Access Comput</source>
          <year>2020</year>
          <month>03</month>
          <day>02</day>
          <issue>125</issue>
          <fpage>1</fpage>
          <lpage>1</lpage>
          <pub-id pub-id-type="doi">10.1145/3386296.3386302</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morais</surname>
              <given-names>FL</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Dos Santos</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Ribeiro</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>Do Explainable AI techniques effectively explain their rationale? A case study from the domain expert’s perspective Publisher: IEEE</article-title>
          <source>Proceedings of the 26th International Conference on Computer Supported Cooperative Work in Design</source>
          <year>2023</year>
          <conf-name>CSCWD '23</conf-name>
          <conf-date>May 24-26, 2023</conf-date>
          <conf-loc>Rio de Janeiro, Brazil</conf-loc>
          <fpage>1569</fpage>
          <lpage>74</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/10152722"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/cscwd57460.2023.10152722</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Terry</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Pellathy</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hravnak</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>George</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Zaiti</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Clermont</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Engaging multidisciplinary clinical users in the design of an artificial intelligence-powered graphical user interface for intensive care unit instability decision support</article-title>
          <source>Appl Clin Inform</source>
          <year>2023</year>
          <month>08</month>
          <day>04</day>
          <volume>14</volume>
          <issue>4</issue>
          <fpage>789</fpage>
          <lpage>802</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.1055/s-0043-1775565"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0043-1775565</pub-id>
          <pub-id pub-id-type="medline">37793618</pub-id>
          <pub-id pub-id-type="pmcid">PMC10550364</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kinney</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Anastasiadou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Naranjo-Zolotov</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Santos</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Expectation management in AI: a framework for understanding stakeholder trust and acceptance of artificial intelligence systems</article-title>
          <source>Heliyon</source>
          <year>2024</year>
          <month>04</month>
          <day>15</day>
          <volume>10</volume>
          <issue>7</issue>
          <fpage>e28562</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2405-8440(24)04593-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e28562</pub-id>
          <pub-id pub-id-type="medline">38576546</pub-id>
          <pub-id pub-id-type="pii">S2405-8440(24)04593-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC10990870</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Burgess</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Jankovic</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Austin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kapuścińska</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Currie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Overhage</surname>
              <given-names>IM</given-names>
            </name>
            <name name-style="western">
              <surname>Poole</surname>
              <given-names>ES</given-names>
            </name>
            <name name-style="western">
              <surname>Kaye</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Healthcare AI treatment decision support: design principles to enhance clinician adoption and trust</article-title>
          <source>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</source>
          <year>2023</year>
          <conf-name>CHI '23</conf-name>
          <conf-date>April 23-28, 2023</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <fpage>1</fpage>
          <lpage>19</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3544548.3581251"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3544548.3581251</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yoo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hur</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>WC</given-names>
            </name>
          </person-group>
          <article-title>Healthcare professionals' expectations of medical artificial intelligence and strategies for its clinical implementation: a qualitative study</article-title>
          <source>Healthc Inform Res</source>
          <year>2023</year>
          <month>01</month>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>64</fpage>
          <lpage>74</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36792102"/>
          </comment>
          <pub-id pub-id-type="doi">10.4258/hir.2023.29.1.64</pub-id>
          <pub-id pub-id-type="medline">36792102</pub-id>
          <pub-id pub-id-type="pii">hir.2023.29.1.64</pub-id>
          <pub-id pub-id-type="pmcid">PMC9932312</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schoonderwoerd</surname>
              <given-names>TA</given-names>
            </name>
            <name name-style="western">
              <surname>Jorritsma</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Neerincx</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>van den Bosch</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Human-centered XAI: developing design patterns for explanations of clinical decision support systems</article-title>
          <source>Int J Hum Comput Stud</source>
          <year>2021</year>
          <month>10</month>
          <volume>154</volume>
          <fpage>102684</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2021.102684</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Hullman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bertini</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Human factors in model interpretability: industry practices, challenges, and needs</article-title>
          <source>Proc ACM Hum Comput Interact</source>
          <year>2020</year>
          <month>05</month>
          <day>29</day>
          <volume>4</volume>
          <issue>CSCW1</issue>
          <fpage>1</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1145/3392878</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wenderott</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Krups</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Luetkens</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Weigl</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Radiologists' perspectives on the workflow integration of an artificial intelligence-based computer-aided detection system: a qualitative study</article-title>
          <source>Appl Ergon</source>
          <year>2024</year>
          <month>05</month>
          <volume>117</volume>
          <fpage>104243</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0003-6870(24)00020-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.apergo.2024.104243</pub-id>
          <pub-id pub-id-type="medline">38306741</pub-id>
          <pub-id pub-id-type="pii">S0003-6870(24)00020-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Verma</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mlynar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schaer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Reichenbach</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jreige</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Evéquoz</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Depeursinge</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Rethinking the role of AI with physicians in oncology: revealing perspectives from clinical and research workflows</article-title>
          <source>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</source>
          <year>2023</year>
          <conf-name>CHI '23</conf-name>
          <conf-date>April 23-28, 2023</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <fpage>22</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/fullHtml/10.1145/3544548.3581506"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3544548.3581506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tonekaboni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McCradden</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Goldenberg</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>What clinicians want: contextualizing explainable machine learning for clinical end use</article-title>
          <source>arXiv. preprint posted online on May 13, 2019</source>
          <year>2019</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1905.05134"/>
          </comment>
          <pub-id pub-id-type="doi">10.5260/chara.21.2.8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brennen</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>What do people really want when they say they want "explainable AI?" we asked 60 stakeholders</article-title>
          <source>Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems</source>
          <year>2020</year>
          <conf-name>CHI EA '20</conf-name>
          <conf-date>April 25-30, 2020</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>1</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3334480.3383047"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3334480.3383047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fogliato</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chappidi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lungren</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Fisher</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fitzke</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Parkinson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Inkpen</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Nushi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Who goes first? Influences of human-AI workflow on decision making in clinical imaging</article-title>
          <source>Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency</source>
          <year>2022</year>
          <conf-name>FAccT '22</conf-name>
          <conf-date>June 21-24, 2022</conf-date>
          <conf-loc>Seoul, Republic of Korea</conf-loc>
          <fpage>1362</fpage>
          <lpage>74</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3531146.3533193"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3531146.3533193</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salwei</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Carayon</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hoonakker</surname>
              <given-names>PL</given-names>
            </name>
            <name name-style="western">
              <surname>Hundt</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Wiegmann</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pulia</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Patterson</surname>
              <given-names>BW</given-names>
            </name>
          </person-group>
          <article-title>Workflow integration analysis of a human factors-based clinical decision support in the emergency department</article-title>
          <source>Appl Ergon</source>
          <year>2021</year>
          <month>11</month>
          <volume>97</volume>
          <issue>3 Suppl. 2</issue>
          <fpage>103498</fpage>
          <lpage>95</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34182430"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.apergo.2021.103498</pub-id>
          <pub-id pub-id-type="medline">34182430</pub-id>
          <pub-id pub-id-type="pii">S0003-6870(21)00145-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC8474147</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rietveld</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Teunissen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hoogendoorn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bakker</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>What is the future of artificial intelligence in obstetrics? A qualitative study among healthcare professionals</article-title>
          <source>BMJ Open</source>
          <year>2023</year>
          <month>10</month>
          <day>24</day>
          <volume>13</volume>
          <issue>10</issue>
          <fpage>e076017</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmjopen.bmj.com/lookup/pmidlookup?view=long&#38;pmid=37879682"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjopen-2023-076017</pub-id>
          <pub-id pub-id-type="medline">37879682</pub-id>
          <pub-id pub-id-type="pii">bmjopen-2023-076017</pub-id>
          <pub-id pub-id-type="pmcid">PMC10603416</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="web">
          <article-title>ISO 9241-11: 2018 ergonomics of human-system interaction part 11: usability: definitions and concepts</article-title>
          <source>International Organization for Standardization</source>
          <access-date>2025-02-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://inen.isolutions.iso.org/obp/ui#iso:std:iso:9241:-11:ed-2:v1:en">https://inen.isolutions.iso.org/obp/ui#iso:std:iso:9241:-11:ed-2:v1:en</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cutillo</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Foschini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kundu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mackintosh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mandl</surname>
              <given-names>KD</given-names>
            </name>
            <collab>MI in Healthcare Workshop Working Group</collab>
          </person-group>
          <article-title>Machine intelligence in healthcare-perspectives on trustworthiness, explainability, usability, and transparency</article-title>
          <source>NPJ Digit Med</source>
          <year>2020</year>
          <month>03</month>
          <day>26</day>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>47</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-020-0254-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-020-0254-2</pub-id>
          <pub-id pub-id-type="medline">32258429</pub-id>
          <pub-id pub-id-type="pii">254</pub-id>
          <pub-id pub-id-type="pmcid">PMC7099019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Human-centered design and evaluation of AI-empowered clinical decision support systems: a systematic review</article-title>
          <source>Front Comput Sci</source>
          <year>2023</year>
          <month>6</month>
          <day>2</day>
          <volume>5</volume>
          <fpage>15</fpage>
          <pub-id pub-id-type="doi">10.3389/fcomp.2023.1187299</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mebrahtu</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Skyrme</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Randell</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Keenan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bloor</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Andre</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ledward</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Effects of computerised clinical decision support systems (CDSS) on nursing and allied health professional performance and patient outcomes: a systematic review of experimental and observational studies</article-title>
          <source>BMJ Open</source>
          <year>2021</year>
          <month>12</month>
          <day>15</day>
          <volume>11</volume>
          <issue>12</issue>
          <fpage>e053886</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmjopen.bmj.com/lookup/pmidlookup?view=long&#38;pmid=34911719"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjopen-2021-053886</pub-id>
          <pub-id pub-id-type="medline">34911719</pub-id>
          <pub-id pub-id-type="pii">bmjopen-2021-053886</pub-id>
          <pub-id pub-id-type="pmcid">PMC8679061</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maleki Varnosfaderani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Forouzanfar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The role of AI in hospitals and clinics: transforming healthcare in the 21st century</article-title>
          <source>Bioengineering (Basel)</source>
          <year>2024</year>
          <month>03</month>
          <day>29</day>
          <volume>11</volume>
          <issue>4</issue>
          <fpage>337</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bioengineering11040337"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bioengineering11040337</pub-id>
          <pub-id pub-id-type="medline">38671759</pub-id>
          <pub-id pub-id-type="pii">bioengineering11040337</pub-id>
          <pub-id pub-id-type="pmcid">PMC11047988</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Combi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Amico</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bellazzi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Zitnik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>A manifesto on explainability for artificial intelligence in medicine</article-title>
          <source>Artif Intell Med</source>
          <year>2022</year>
          <month>11</month>
          <volume>133</volume>
          <fpage>102423</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(22)00175-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2022.102423</pub-id>
          <pub-id pub-id-type="medline">36328669</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(22)00175-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Markus</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Kors</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Rijnbeek</surname>
              <given-names>PR</given-names>
            </name>
          </person-group>
          <article-title>The role of explainability in creating trustworthy artificial intelligence for health care: A comprehensive survey of the terminology, design choices, and evaluation strategies</article-title>
          <source>J Biomed Inform</source>
          <year>2021</year>
          <month>01</month>
          <volume>113</volume>
          <fpage>103655</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(20)30283-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2020.103655</pub-id>
          <pub-id pub-id-type="medline">33309898</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(20)30283-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The effects of explainability and causability on perception, trust, and acceptance: implications for explainable AI</article-title>
          <source>Int J Hum Comput Stud</source>
          <year>2021</year>
          <month>02</month>
          <volume>146</volume>
          <fpage>102551</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2020.102551</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xiong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lyu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Interpretability of clinical decision support systems based on artificial intelligence from technological and medical perspective: a systematic review</article-title>
          <source>J Healthc Eng</source>
          <year>2023</year>
          <month>02</month>
          <day>03</day>
          <volume>2023</volume>
          <issue>1</issue>
          <fpage>9919269</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2023/9919269"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2023/9919269</pub-id>
          <pub-id pub-id-type="medline">36776958</pub-id>
          <pub-id pub-id-type="pmcid">PMC9918364</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moradi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Samwald</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Post-hoc explanation of black-box classifiers using confident itemsets</article-title>
          <source>Expert Syst Appl</source>
          <year>2021</year>
          <month>03</month>
          <volume>165</volume>
          <fpage>113941</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2020.113941</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Abdul</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>BY</given-names>
            </name>
          </person-group>
          <article-title>Designing theory-driven user-centric explainable AI</article-title>
          <source>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems</source>
          <year>2019</year>
          <conf-name>CHI '19</conf-name>
          <conf-date>May 4-9, 2019</conf-date>
          <conf-loc>Glasgow, UK</conf-loc>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3290605.3300831"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3290605.3300831</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jacobs</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pradier</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>McCoy</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Perlis</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Doshi-Velez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Gajos</surname>
              <given-names>KZ</given-names>
            </name>
          </person-group>
          <article-title>How machine-learning recommendations influence clinician treatment selections: the example of the antidepressant selection</article-title>
          <source>Transl Psychiatry</source>
          <year>2021</year>
          <month>02</month>
          <day>04</day>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>108</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41398-021-01224-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41398-021-01224-x</pub-id>
          <pub-id pub-id-type="medline">33542191</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41398-021-01224-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC7862671</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Varshney</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Human-centered explainable AI (XAI): from algorithms to user experiences</article-title>
          <source>arXiv. preprint posted online on October 20, 2021</source>
          <year>2021</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2110.10790"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2110.10790</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Steinfeld</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmerman</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Unremarkable AI: fitting intelligent decision support into critical, clinical decision-making processes</article-title>
          <source>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems</source>
          <year>2019</year>
          <conf-name>CHI '19</conf-name>
          <conf-date>May 4-9, 2019</conf-date>
          <conf-loc>Glasgow, UK</conf-loc>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3290605.3300468"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3290605.3300468</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salih</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Raisi‐Estabragh</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Galazzo</surname>
              <given-names>IB</given-names>
            </name>
            <name name-style="western">
              <surname>Radeva</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Petersen</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Lekadir</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Menegaz</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>A perspective on explainable artificial intelligence methods: SHAP and LIME</article-title>
          <source>Adv Intell Syst</source>
          <year>2024</year>
          <month>06</month>
          <day>27</day>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>62</fpage>
          <pub-id pub-id-type="doi">10.1002/aisy.202400304</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alabi</surname>
              <given-names>RO</given-names>
            </name>
            <name name-style="western">
              <surname>Elmusrati</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leivo</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Almangush</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mäkitie</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Machine learning explainability in nasopharyngeal cancer survival using LIME and SHAP</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <month>06</month>
          <day>02</day>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>8984</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-35795-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-35795-0</pub-id>
          <pub-id pub-id-type="medline">37268685</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-35795-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10238539</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frasca</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>La Torre</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pravettoni</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cutica</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Explainable and interpretable artificial intelligence in medicine: a systematic bibliometric review</article-title>
          <source>Discov Artif Intell</source>
          <year>2024</year>
          <month>02</month>
          <day>27</day>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1007/S44163-024-00114-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Delitzas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chatzidimitriou</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Symeonidis</surname>
              <given-names>AL</given-names>
            </name>
          </person-group>
          <article-title>Calista: a deep learning-based system for understanding and evaluating website aesthetics</article-title>
          <source>Int J Hum Comput Stud</source>
          <year>2023</year>
          <month>07</month>
          <volume>175</volume>
          <fpage>103019</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2023.103019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Papadopoulos</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Soflano</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chaudy</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Adejo</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Connolly</surname>
              <given-names>TM</given-names>
            </name>
          </person-group>
          <article-title>A systematic review of technologies and standards used in the development of rule-based clinical decision support systems</article-title>
          <source>Health Technol</source>
          <year>2022</year>
          <month>05</month>
          <day>27</day>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>713</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.1007/S12553-022-00672-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>RV</given-names>
            </name>
            <name name-style="western">
              <surname>Malenica</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bishara</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hubbard</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Pirracchio</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Clinical artificial intelligence quality improvement: towards continual monitoring and updating of AI algorithms in healthcare</article-title>
          <source>NPJ Digit Med</source>
          <year>2022</year>
          <month>05</month>
          <day>31</day>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>66</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-022-00611-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-022-00611-y</pub-id>
          <pub-id pub-id-type="medline">35641814</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-022-00611-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC9156743</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Muschalik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fumagalli</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hammer</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hüllermeier</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Agnostic explanation of model change based on feature importance</article-title>
          <source>Künstl Intell</source>
          <year>2022</year>
          <month>07</month>
          <day>12</day>
          <volume>36</volume>
          <issue>3-4</issue>
          <fpage>211</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1007/S13218-022-00766-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prado</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Saeed</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Keller</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Vallez</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gregg</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Benini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Llewellynn</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ouerhani</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dahyot</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pazos</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Bonseyes AI pipeline—bringing AI to you: end-to-end integration of data, algorithms, and deployment tools?</article-title>
          <source>ACM Trans Internet Things</source>
          <year>2020</year>
          <month>08</month>
          <day>04</day>
          <volume>1</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>25</lpage>
          <pub-id pub-id-type="doi">10.1145/3403572</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaspers</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Smeulers</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vermeulen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Peute</surname>
              <given-names>LW</given-names>
            </name>
          </person-group>
          <article-title>Effects of clinical decision-support systems on practitioner performance and patient outcomes: a synthesis of high-quality systematic review findings</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2011</year>
          <month>05</month>
          <day>01</day>
          <volume>18</volume>
          <issue>3</issue>
          <fpage>327</fpage>
          <lpage>34</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/21422100"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/amiajnl-2011-000094</pub-id>
          <pub-id pub-id-type="medline">21422100</pub-id>
          <pub-id pub-id-type="pii">amiajnl-2011-000094</pub-id>
          <pub-id pub-id-type="pmcid">PMC3078663</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmadian</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>van Engen-Verheul</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bakhshi-Raiez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Peek</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cornet</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>de Keizer</surname>
              <given-names>NF</given-names>
            </name>
          </person-group>
          <article-title>The role of standardized data and terminological systems in computerized clinical decision support systems: literature review and survey</article-title>
          <source>Int J Med Inform</source>
          <year>2011</year>
          <month>02</month>
          <volume>80</volume>
          <issue>2</issue>
          <fpage>81</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2010.11.006</pub-id>
          <pub-id pub-id-type="medline">21168360</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(10)00226-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Achour</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Dojat</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rieux</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bierling</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lepage</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>A UMLS-based knowledge acquisition tool for rule-based clinical decision support system development</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2001</year>
          <month>07</month>
          <day>01</day>
          <volume>8</volume>
          <issue>4</issue>
          <fpage>351</fpage>
          <lpage>60</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://hal.archives-ouvertes.fr/inserm-00402405"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/jamia.2001.0080351</pub-id>
          <pub-id pub-id-type="medline">11418542</pub-id>
          <pub-id pub-id-type="pmcid">PMC130080</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vorisek</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Lehne</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Klopfenstein</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Mayer</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bartschke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Thun</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Fast Healthcare Interoperability Resources (FHIR) for interoperability in health research: systematic review</article-title>
          <source>JMIR Med Inform</source>
          <year>2022</year>
          <month>07</month>
          <day>19</day>
          <volume>10</volume>
          <issue>7</issue>
          <fpage>e35724</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2022/7/e35724/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/35724</pub-id>
          <pub-id pub-id-type="medline">35852842</pub-id>
          <pub-id pub-id-type="pii">v10i7e35724</pub-id>
          <pub-id pub-id-type="pmcid">PMC9346559</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Longoni</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bonezzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Morewedge</surname>
              <given-names>CK</given-names>
            </name>
          </person-group>
          <article-title>Resistance to medical artificial intelligence</article-title>
          <source>J Consum Res</source>
          <year>2019</year>
          <volume>46</volume>
          <issue>4</issue>
          <fpage>629</fpage>
          <lpage>50</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://academic.oup.com/jcr/article-abstract/46/4/629/5485292"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jcr/ucz013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shaikh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The long road ahead: navigating obstacles and building bridges for clinical integration of artificial intelligence technologies</article-title>
          <source>J Med Artif Intell</source>
          <year>2025</year>
          <month>3</month>
          <volume>8</volume>
          <fpage>7</fpage>
          <pub-id pub-id-type="doi">10.21037/jmai-24-148</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Siewiorek</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Smailagic</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bernardino</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bermúdez</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A human-AI collaborative approach for clinical decision making on rehabilitation assessment</article-title>
          <source>Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems</source>
          <year>2021</year>
          <conf-name>CHI '21</conf-name>
          <conf-date>May 8-13, 2021</conf-date>
          <conf-loc>Yokohama, Japan</conf-loc>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3411764.3445472"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3411764.3445472</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>LaMonica</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Davenport</surname>
              <given-names>TA</given-names>
            </name>
            <name name-style="western">
              <surname>Ottavio</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rowe</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Cross</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Iorfino</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>TA</given-names>
            </name>
            <name name-style="western">
              <surname>Easton</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Melsness</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hickie</surname>
              <given-names>IB</given-names>
            </name>
          </person-group>
          <article-title>Optimising the integration of technology-enabled solutions to enhance primary mental health care: a service mapping study</article-title>
          <source>BMC Health Serv Res</source>
          <year>2021</year>
          <month>01</month>
          <day>15</day>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>68</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmchealthservres.biomedcentral.com/articles/10.1186/s12913-021-06069-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12913-021-06069-0</pub-id>
          <pub-id pub-id-type="medline">33451328</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12913-021-06069-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC7811218</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goh</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gallo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hom</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Strong</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Weng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kerman</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cool</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Kanjee</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Parsons</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Ahuja</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Milstein</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Olson</surname>
              <given-names>APJ</given-names>
            </name>
            <name name-style="western">
              <surname>Rodman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Large language model influence on diagnostic reasoning: a randomized clinical trial</article-title>
          <source>JAMA Netw Open</source>
          <year>2024</year>
          <month>10</month>
          <day>01</day>
          <volume>7</volume>
          <issue>10</issue>
          <fpage>e2440969</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jamanetwork.com/journals/jamanetworkopen/fullarticle/10.1001/jamanetworkopen.2024.40969"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.40969</pub-id>
          <pub-id pub-id-type="medline">39466245</pub-id>
          <pub-id pub-id-type="pii">2825395</pub-id>
          <pub-id pub-id-type="pmcid">PMC11519755</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dapkins</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Prescott</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ladino</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Anderman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McCaleb</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Colella</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Gore</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fontil</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Szerencsy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Blecker</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A dynamic clinical decision support tool to improve primary care outcomes in a high-volume, low-resource setting</article-title>
          <source>NEJM Catal</source>
          <year>2024</year>
          <month>03</month>
          <day>20</day>
          <volume>5</volume>
          <issue>4</issue>
          <fpage>63</fpage>
          <pub-id pub-id-type="doi">10.1056/cat.23.0366</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Machado</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Burr</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cowls</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Taddeo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The ethics of AI in health care: a mapping review</article-title>
          <source>Soc Sci Med</source>
          <year>2020</year>
          <month>09</month>
          <volume>260</volume>
          <fpage>113172</fpage>
          <pub-id pub-id-type="doi">10.1016/j.socscimed.2020.113172</pub-id>
          <pub-id pub-id-type="medline">32702587</pub-id>
          <pub-id pub-id-type="pii">S0277-9536(20)30391-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moullin</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Dickson</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Stadnick</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Rabin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Aarons</surname>
              <given-names>GA</given-names>
            </name>
          </person-group>
          <article-title>Systematic review of the Exploration, Preparation, Implementation, Sustainment (EPIS) framework</article-title>
          <source>Implement Sci</source>
          <year>2019</year>
          <month>01</month>
          <day>05</day>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://implementationscience.biomedcentral.com/articles/10.1186/s13012-018-0842-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13012-018-0842-6</pub-id>
          <pub-id pub-id-type="medline">30611302</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13012-018-0842-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC6321673</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khedkar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gandhi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Shinde</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Subramanian</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Dash</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Acharya</surname>
              <given-names>BR</given-names>
            </name>
            <name name-style="western">
              <surname>Mittal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abraham</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kelemen</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep learning and explainable AI in healthcare using EHR</article-title>
          <source>Deep Learning Techniques for Biomedical and Health Informatics</source>
          <year>2020</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>129</fpage>
          <lpage>48</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abell</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Naicker</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rodwell</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Donovan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Baysari</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Blythe</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Parsons</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>McPhail</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Identifying barriers and facilitators to successful implementation of computerized clinical decision support systems in hospitals: a NASSS framework-informed scoping review</article-title>
          <source>Implement Sci</source>
          <year>2023</year>
          <month>07</month>
          <day>26</day>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>32</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://implementationscience.biomedcentral.com/articles/10.1186/s13012-023-01287-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13012-023-01287-y</pub-id>
          <pub-id pub-id-type="medline">37495997</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13012-023-01287-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC10373265</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kawamoto</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Houlihan</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Balas</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Lobach</surname>
              <given-names>DF</given-names>
            </name>
          </person-group>
          <article-title>Improving clinical practice using clinical decision support systems: a systematic review of trials to identify features critical to success</article-title>
          <source>BMJ</source>
          <year>2005</year>
          <month>04</month>
          <day>02</day>
          <volume>330</volume>
          <issue>7494</issue>
          <fpage>765</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/15767266"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.38398.500764.8F</pub-id>
          <pub-id pub-id-type="medline">15767266</pub-id>
          <pub-id pub-id-type="pii">bmj.38398.500764.8F</pub-id>
          <pub-id pub-id-type="pmcid">PMC555881</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Embí</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Matheny</surname>
              <given-names>ME</given-names>
            </name>
          </person-group>
          <article-title>Sustainable deployment of clinical prediction tools-a 360° approach to model maintenance</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2024</year>
          <month>04</month>
          <day>19</day>
          <volume>31</volume>
          <issue>5</issue>
          <fpage>1195</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38422379"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocae036</pub-id>
          <pub-id pub-id-type="medline">38422379</pub-id>
          <pub-id pub-id-type="pii">7616485</pub-id>
          <pub-id pub-id-type="pmcid">PMC11031208</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abdul</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vermeulen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>BY</given-names>
            </name>
            <name name-style="western">
              <surname>Kankanhalli</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Trends and trajectories for explainable, accountable and intelligible systems: an HCI research agenda</article-title>
          <source>Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems</source>
          <year>2018</year>
          <conf-name>CHI '18</conf-name>
          <conf-date>April 21-26, 2018</conf-date>
          <conf-loc>Montreal, QC</conf-loc>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3173574.3174156"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3173574.3174156</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Biemann</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pattichis</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Kell</surname>
              <given-names>DB</given-names>
            </name>
          </person-group>
          <article-title>What do we need to build explainable AI systems for the medical domain?</article-title>
          <source>arXiv. Preprint posted online on December 28, 2017</source>
          <year>2017</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1712.09923v1"/>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
