<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e71236</article-id>
      <article-id pub-id-type="pmid">40455564</article-id>
      <article-id pub-id-type="doi">10.2196/71236</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Trust, Trustworthiness, and the Future of Medical AI: Outcomes of an Interdisciplinary Expert Workshop</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Schlicker</surname>
            <given-names>Nadine</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Fitzek</surname>
            <given-names>Sebastian</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Goisauf</surname>
            <given-names>Melanie</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of ELSI Services and Research</institution>
            <institution>Biobanking and Biomolecular Resources Research Infrastructure Consortium</institution>
            <addr-line>Neue Stiftingtalstrasse 2/B/6</addr-line>
            <addr-line>Graz, 8010</addr-line>
            <country>Austria</country>
            <phone>43 664 88 72 18 73</phone>
            <email>melanie.goisauf@bbmri-eric.eu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3909-8071</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Cano Abadía</surname>
            <given-names>Mónica</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7726-9222</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Akyüz</surname>
            <given-names>Kaya</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2444-2095</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Bobowicz</surname>
            <given-names>Maciej</given-names>
          </name>
          <degrees>Dr med</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3608-1960</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Buyx</surname>
            <given-names>Alena</given-names>
          </name>
          <degrees>Prof Dr</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5726-7633</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Colussi</surname>
            <given-names>Ilaria</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1250-114X</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Fritzsche</surname>
            <given-names>Marie-Christine</given-names>
          </name>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8056-2462</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Lekadir</surname>
            <given-names>Karim</given-names>
          </name>
          <degrees>Prof Dr</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9456-1612</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Marttinen</surname>
            <given-names>Pekka</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff6" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7078-7927</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Mayrhofer</surname>
            <given-names>Michaela Th</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6932-0473</ext-link>
        </contrib>
        <contrib id="contrib11" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Meszaros</surname>
            <given-names>Janos</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff7" ref-type="aff">7</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2899-4268</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of ELSI Services and Research</institution>
        <institution>Biobanking and Biomolecular Resources Research Infrastructure Consortium</institution>
        <addr-line>Graz</addr-line>
        <country>Austria</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>2nd Department of Radiology</institution>
        <institution>Gdańsk Medical University</institution>
        <addr-line>Gdansk</addr-line>
        <country>Poland</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Institute of History and Ethics in Medicine</institution>
        <institution>TUM School of Medicine and Health</institution>
        <institution>Technical University of Munich</institution>
        <addr-line>Munich</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Science, Technology and Society (STS)</institution>
        <institution>School of Social Sciences and Technology</institution>
        <institution>Technical University of Munich</institution>
        <addr-line>Munich</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Institució Catalana de Recerca i Estudis Avançats (ICREA)</institution>
        <institution>Department of Mathematics and Computer Science</institution>
        <institution>Universitat de Barcelona</institution>
        <addr-line>Barcelona</addr-line>
        <country>Spain</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Department of Computer Science</institution>
        <institution>Aalto University</institution>
        <addr-line>Espoo</addr-line>
        <country>Finland</country>
      </aff>
      <aff id="aff7">
        <label>7</label>
        <institution>Division of Clinical Pharmacology and Pharmacotherapy</institution>
        <institution>Department of Pharmaceutical and Pharmacological Sciences</institution>
        <institution>KU Leuven</institution>
        <addr-line>Leuven</addr-line>
        <country>Belgium</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Melanie Goisauf <email>melanie.goisauf@bbmri-eric.eu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>2</day>
        <month>6</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e71236</elocation-id>
      <history>
        <date date-type="received">
          <day>13</day>
          <month>1</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>16</day>
          <month>3</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>4</day>
          <month>4</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>21</day>
          <month>4</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Melanie Goisauf, Mónica Cano Abadía, Kaya Akyüz, Maciej Bobowicz, Alena Buyx, Ilaria Colussi, Marie-Christine Fritzsche, Karim Lekadir, Pekka Marttinen, Michaela Th Mayrhofer, Janos Meszaros. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 02.06.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e71236" xlink:type="simple"/>
      <abstract>
        <p>Trustworthiness has become a key concept for the ethical development and application of artificial intelligence (AI) in medicine. Various guidelines have formulated key principles, such as fairness, robustness, and explainability, as essential components to achieve trustworthy AI. However, conceptualizations of trustworthy AI often emphasize technical requirements and computational solutions, frequently overlooking broader aspects of fairness and potential biases. These include not only algorithmic bias but also human, institutional, social, and societal factors, which are critical to foster AI systems that are both ethically sound and socially responsible. This viewpoint article presents an interdisciplinary approach to analyzing trust in AI and trustworthy AI within the medical context, focusing on (1) social sciences and humanities conceptualizations and legal perspectives on trust and (2) their implications for trustworthy AI in health care. It focuses on real-world challenges in medicine that are often underrepresented in theoretical discussions to propose a more practice-oriented understanding. Insights were gathered from an interdisciplinary workshop with experts from various disciplines involved in the development and application of medical AI, particularly in oncological imaging and genomics, complemented by theoretical approaches related to trust in AI. Results emphasize that, beyond common issues of bias and fairness, knowledge and human involvement are essential for trustworthy AI. Stakeholder engagement throughout the AI life cycle emerged as crucial, supporting a human- and multicentered framework for trustworthy AI implementation. Findings emphasize that trust in medical AI depends on providing meaningful, user-oriented information and balancing knowledge with acceptable uncertainty. Experts highlighted the importance of confidence in the tool's functionality, specifically that it performs as expected. Trustworthiness was shown to be not a feature but rather a relational process, involving humans, their expertise, and the broader social or institutional contexts in which AI tools operate. Trust is dynamic, shaped by interactions among individuals, technologies, and institutions, and ultimately centers on people rather than tools alone. Tools are evaluated based on reliability and credibility, yet trust fundamentally relies on human connections. The article underscores the development of AI tools that are not only technically sound but also ethically robust and broadly accepted by end users, contributing to more effective and equitable AI-mediated health care. Findings highlight that building AI trustworthiness in health care requires a human-centered, multistakeholder approach with diverse and inclusive engagement. To promote equity, we recommend that AI development teams involve all relevant stakeholders at every stage of the AI lifecycle—from conception, technical development, clinical validation, and real-world deployment.</p>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>trust</kwd>
        <kwd>trustworthy AI</kwd>
        <kwd>medicine</kwd>
        <kwd>ethics of AI</kwd>
        <kwd>interdisciplinarity</kwd>
        <kwd>stakeholder engagement</kwd>
        <kwd>human-centered AI</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Trustworthiness has become a key requirement in the development and application of ethical artificial intelligence (AI). This is highlighted by the European Commission High-Level Expert Group on AI (AI HLEG) Guidelines for Trustworthy AI [<xref ref-type="bibr" rid="ref1">1</xref>] and emphasized by the World Health Organization (WHO) [<xref ref-type="bibr" rid="ref2">2</xref>] for effective AI integration in health care. In addition, cross-cutting initiatives such as FUTURE-AI (Fairness, Universality, Traceability, Usability, Robustness, and Explainability–artificial intelligence) aim to guide AI developments toward trustworthiness, focusing on principles of fairness, universality, traceability, usability, robustness, and explainability [<xref ref-type="bibr" rid="ref3">3</xref>]. The discourse on trustworthy AI often focuses on defining the conditions under which it can be achieved. Scholars are developing practical approaches for realizing trustworthy AI through guidelines or regulations [<xref ref-type="bibr" rid="ref4">4</xref>], and some institutions even offer certification possibilities (eg, IEEE CertifAIEd). Such efforts neglect that ethical AI cannot be guaranteed solely through following principles [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>], or that trustworthy AI cannot only be defined in terms of technical conditions and solutions [<xref ref-type="bibr" rid="ref7">7</xref>], such as fairness objectives [<xref ref-type="bibr" rid="ref8">8</xref>]. Indeed, some authors have challenged the idea of achieving trustworthy AI by merely meeting technical criteria and have argued that trusting in AI is based on more human than technical aspects [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Besides, guidelines on the implementation of “trustworthy AI” in medicine lack consensus on what defines a user’s trust in AI [<xref ref-type="bibr" rid="ref11">11</xref>].</p>
      <p>The significance of the discourses around trust in AI and the trustworthiness of AI within the scientific discussion on ethical AI is rarely reflected in its conceptualization [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>], which is crucial as AI “can increase systemic risks of harm, raise the possibility of errors with severe consequences, and amplify complex ethical and societal issues” [<xref ref-type="bibr" rid="ref14">14</xref>]. In scientific literature, the concepts of “trust” and “trustworthiness” are often used interchangeably, despite repeated efforts to define them [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. A more thorough analysis is needed on the situatedness of trust in medical AI and the trustworthiness of medical AI, given that trust plays a critical role in situations of risk, vulnerability, and uncertainty, circumstances frequently encountered in the medical context. In this context, trust needs to be understood as a complex relational concept that involves several trustor-trustee relationships, such as trust in technology, institutions, and persons, for example, scientists who trust each other, patients who trust scientists, and health care professionals [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>].</p>
      <p>Trustworthiness of medical AI also depends on how certain challenges are being managed: for instance, biases in the training data that lead to biases in algorithms, lack of transparency or explainability in how an AI system decides, securing sensitive data such as medical data, as well as ensuring human oversight and continuous monitoring [<xref ref-type="bibr" rid="ref19">19</xref>]. Against the backdrop of evidence showing that algorithms employed in health care can encode, reinforce, and exacerbate existing inequalities within the health care system [<xref ref-type="bibr" rid="ref20">20</xref>], which poses a particular risk to vulnerable patients [<xref ref-type="bibr" rid="ref21">21</xref>], identifying and mitigating biases, especially racial and gender biases [<xref ref-type="bibr" rid="ref22">22</xref>], is key for trust [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Failure to do so could erode public trust in health systems and hinder the adoption of AI systems in health care [<xref ref-type="bibr" rid="ref25">25</xref>]. Consequently, addressing bias must go beyond the prevailing focus on computational factors and fairness of machine learning algorithms and must take all forms of potential bias into account, including human, institutional, and societal factors [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
      <p>This viewpoint article presents an interdisciplinary approach to analyzing trust in AI and trustworthy AI within the medical context and proposes a practice-oriented conceptualization. Adopting a multidisciplinary understanding of trust and trustworthiness, it builds on the outcomes of a workshop that brought together experts from various disciplines involved in the development and application of medical AI, particularly in oncological imaging and genomics. The aim was to address analytical gaps and explore the concepts of trust and trustworthiness. This interdisciplinary and contextualized standpoint informs the analysis of trust and trustworthiness in medical AI from knowledge production to technology development and use. In doing so, the article puts an emphasis on the social and legal conceptualizations of trust and their implications for trustworthy AI in medicine.</p>
    </sec>
    <sec>
      <title>Trust in Humanities and Social Sciences</title>
      <p>While there is a plethora of literature on trust and trustworthiness of AI [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref27">27</xref>] and in medicine in particular [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>], a deeper conceptualization of the terms used is needed for specific practices in medicine and health care.</p>
      <p>Across different philosophical approaches (eg, virtue ethics or deontological ethics), trust is considered interpersonal and relational, involving risk and moral responsiveness to patients’ vulnerability. Carter [<xref ref-type="bibr" rid="ref31">31</xref>] notes that illness induces feelings of vulnerability in patients and caregivers, highlighting the need for a mature exploration of trust, distinct from related concepts like reliance, confidence, or faith. Baier [<xref ref-type="bibr" rid="ref32">32</xref>] defines trust as a mix of reliance, confidence, and dependence. Reliance involves trusting another’s competence, while trust involves depending on their goodwill. Baier asserts that everyone is interdependent, relying on others for care. This aligns with more recent literature, which emphasizes our constitutive vulnerability and interdependence [<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref36">36</xref>]. Baier [<xref ref-type="bibr" rid="ref32">32</xref>] also highlights the role of power relations, stressing that morality requires trust and consideration of potential exploitation in unequal power dynamics.</p>
      <p>Sociological perspectives on trust, particularly those of Giddens and Luhmann [<xref ref-type="bibr" rid="ref37">37</xref>], explore the dynamics of institutional and interpersonal trust. Institutional trust, such as in the health care system, is influenced by trust in its representatives, like doctors. Giddens emphasizes the role of the health care professional’s appearance and professionalism in shaping patient expectations and legitimizing the medical system. He asserts that trust in “flesh-and-blood” representatives informs trust in the system and is necessary to manage partial understanding due to uncertainty. Trust is not needed in situations with complete knowledge. Luhmann, however, views trust as a medium that reduces social complexity, enabling interactions within and by the system. He distinguishes between trust and confidence: trust involves past experiences and perceived risks, whereas confidence relies solely on expectation without considering alternatives [<xref ref-type="bibr" rid="ref37">37</xref>].</p>
      <p>Trust has been a central topic in the social sciences’ examination of science and knowledge production, emphasizing the interplay between individuals and social institutions. Understanding trust requires recognizing its foundation in social relations and exploring its interactive nature, including the role of technology and the importance of tacit, embodied, and situated knowledge [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. These elements incorporate social, practical, human, and bodily factors into the comprehension of knowledge production and mutual trust.</p>
      <p>Science and technology studies (STS) conceptualize trust in relational terms, particularly focusing on the publics of science and technology. A significant shift has been the critique of the “deficit model,” which assumes public mistrust stems from a lack of understanding [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. This critique has led to a transformation in building trust through communicative practices, moving from one-directional communication to models of engagement that recognize the diversity of publics. Despite this, critics note the persistent reemergence of the deficit model even in newer efforts [<xref ref-type="bibr" rid="ref42">42</xref>]. Concerns about emerging technologies’ risks and uncertainties highlight the need to reflect on modernity’s inherent crises and their relation to policymaking, accountability, transparency, and expertise [<xref ref-type="bibr" rid="ref43">43</xref>]. STS scholars emphasize the importance of considering various forms of expertise, including lay expertise [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. Cases of public acceptance or rejection of technologies like nuclear energy, nanotechnology, and genetically modified organisms provide insights into contemporary AI developments [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>]. One-way communication does not automatically increase public trust; technologies often carry risks not immediately apparent to technologists, necessitating reflexivity [<xref ref-type="bibr" rid="ref48">48</xref>] and a global perspective [<xref ref-type="bibr" rid="ref49">49</xref>]. Hence, building trust would require transparent, adaptable governance systems and strong trust relations between individuals and institutions, respecting the variety of expertise.</p>
      <p>In examining the intersection of trust and AI within medicine, it may be relevant to translate insights into trust offered by the humanities and social sciences into this emerging field. <xref ref-type="table" rid="table1">Table 1</xref> presents a synthesis of classical approaches to trust outlined above, highlighting their relevance and application of AI in health care.</p>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>Translation of selected classical approaches to trust in the humanities and social sciences into the field of AI<sup>a</sup> and medicine.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="350"/>
          <col width="650"/>
          <thead>
            <tr valign="top">
              <td>Approach</td>
              <td>Translation</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Trust as a relational and moral concept (Carter [<xref ref-type="bibr" rid="ref31">31</xref>], Baier [<xref ref-type="bibr" rid="ref32">32</xref>])</td>
              <td>Trust in AI is viewed as a relationship in which ethical behavior, transparency, and accountability are essential. Trust is not only about the system’s performance but also about the intentions and actions of those who design and manage AI systems.</td>
            </tr>
            <tr valign="top">
              <td>Trust involving risk and moral responsiveness (Pellegrino &#38; Thomasma [<xref ref-type="bibr" rid="ref50">50</xref>], Emanuel &#38; Emanuel [<xref ref-type="bibr" rid="ref51">51</xref>])</td>
              <td>Trust in AI involves stakeholders assuming risks, such as data privacy risks or misdiagnoses, with the expectation that AI systems will function ethically, respecting user autonomy and promoting well-being.</td>
            </tr>
            <tr valign="top">
              <td>Foundational trust in doctor-patient relationships (Kittay &#38; Meyers [<xref ref-type="bibr" rid="ref52">52</xref>])</td>
              <td>In AI-enhanced health care, trust is crucial for integrating AI tools into patient care, affecting how patients perceive and cooperate with AI-driven diagnostics and treatment plans.</td>
            </tr>
            <tr valign="top">
              <td>Institutional trust (Giddens, Luhmann in Meyer et al [<xref ref-type="bibr" rid="ref37">37</xref>])</td>
              <td>Trust in AI within institutions depends on the trustworthiness of those who deploy and manage AI systems, influencing public and professional trust in the technology’s utility and safety.</td>
            </tr>
            <tr valign="top">
              <td>Dynamic trust shaped by social relations, technology, and experience (Collins [<xref ref-type="bibr" rid="ref39">39</xref>], Haraway [<xref ref-type="bibr" rid="ref38">38</xref>], Meyer et al [<xref ref-type="bibr" rid="ref37">37</xref>])</td>
              <td>Trust in AI is dynamic, shaped by ongoing interactions between users, developers, and AI systems. This includes how technology adapts to social expectations and how it is embedded in social practices.</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table1fn1">
            <p><sup>a</sup>AI: artificial intelligence.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
    </sec>
    <sec>
      <title>Legal Perspectives on Trust and Trustworthiness</title>
      <p>In legal studies, the concept of trust is often overlooked [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>] except for “trust law” in common law systems, which involves a fiduciary relationship where a “trustor” transfers property or rights to a trustee [<xref ref-type="bibr" rid="ref55">55</xref>]. “Good faith” and “due diligence” embody elements of trust, implying confidence in a person or entity. For example, in contract law, good faith is essential, presuming honest and fair dealings between parties. Thus, trust is implicitly part of good faith. When trust is violated, liability rules restore balance [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>].</p>
      <p>Some authors argue that the law substitutes for trust [<xref ref-type="bibr" rid="ref58">58</xref>] or emerges where trust is lacking [<xref ref-type="bibr" rid="ref59">59</xref>]. Law’s coercive and controlling nature can crowd out trust, especially “personal trust” in individuals or firms, which could mean that legislation cannot effectively promote trust. Instead, the law can define and establish rules for trustworthy behaviors. Greco [<xref ref-type="bibr" rid="ref60">60</xref>] challenges the idea that law and trust are incompatible, arguing that law can play a constructive role in fostering trust and serving as a bridge between citizens and institutions, thereby generating and sustaining social trust. In other words, the law not only imposes rules but also helps people feel safe, respected, and thus more inclined to trust the system in which they live. From a legal philosophy perspective, trust underpins the law’s existence: human relationships, which the law regulates, rely on trust. Sanctions rebuild violated trust, and rules emerge from trust, with the expectation of compliance. Thus, trust is foundational to the law.</p>
      <p>Like trust, trustworthiness is not inherently a legal concept. In the context of AI, the European Union (EU) has emphasized trustworthiness, specifically through the concept of “trustworthy AI” rather than focusing on “trust” itself. The Ethics Guidelines for Trustworthy AI by the AI HLEG [<xref ref-type="bibr" rid="ref1">1</xref>] define AI as being “lawful, ethical, and robust.” Smuha et al [<xref ref-type="bibr" rid="ref61">61</xref>] indicate three pillars of “Legally Trustworthy AI”: (1) responsibility allocation, where the regulation appropriately assigns accountability for the harms and wrongs resulting from AI systems; (2) a consistent legal framework, where the regulation establishes and maintains a unified legal structure accompanied by effective and legitimate enforcement mechanisms to secure and uphold the rule of law; and (3) democratic deliberation, where the regulation places democratic discussion at its center, ensuring public participation and other information rights. These pillars were crucial when the EU AI Act was drafted, aiming to establish a robust legal framework to foster the development of secure, trustworthy, and ethical AI. The EU AI Act, which entered into force on August 1, 2024, establishes a comprehensive regulatory framework for AI within the EU [<xref ref-type="bibr" rid="ref62">62</xref>]. The Act highlights that “in the health sector where the stakes for life and health are particularly high, increasingly sophisticated diagnostics systems and systems supporting human decisions should be reliable and accurate” (Recital 47). The AI Act takes a human-centric approach and recalls the guidelines for trustworthy AI by the AI HLEG (see Recital 27), namely: human agency and oversight, technical robustness and safety, privacy and data governance, transparency, diversity, nondiscrimination and fairness, societal and environmental well-being, and accountability. It states that these principles should be applied in the design and use of AI models and should serve as a basis for the drafting of codes of conduct under the Regulation.</p>
      <p>The AI Act mentions “trust” or “trustworthiness” 29 times, underscoring the significance of building trust in AI technologies. The health sector is significantly impacted by the EU AI Act, particularly due to the inclusion of AI systems. Moreover, the AI Act states that AI systems classified as medical devices under Medical Device Regulation (MDR) [<xref ref-type="bibr" rid="ref63">63</xref>] or In Vitro Device Regulation [<xref ref-type="bibr" rid="ref64">64</xref>]—and thus those AI systems used as medical devices—must comply with both the AI Act and existing medical device regulations. There are exemptions only for AI systems used exclusively for scientific research and premarket product development. In light of the AI Act, therefore, it seems that most commercial AI-enabled medical devices on the market used in radiology, being classified as Class IIa and up, under the MDR [<xref ref-type="bibr" rid="ref65">65</xref>], are classified as high risk under the AI Act [<xref ref-type="bibr" rid="ref66">66</xref>].</p>
      <p>These developments and the prominence that “trust” and “trustworthiness” have achieved in law and policy, while being scarcely defined, highlight the need for a comprehensive analysis, particularly from an interdisciplinary and practical perspective. Toward this end, this paper advances the discussion by examining how the EU AI Act gives legal meaning to the otherwise vague concept of “trustworthy AI.” It highlights how trust is not only a technical or ethical aspiration, but a legal objective shaped by rules on accountability, transparency, and human oversight, especially in health care settings.</p>
    </sec>
    <sec>
      <title>Synthesizing Interdisciplinary Knowledge</title>
      <p>To make better sense of issues around ethical AI as well as understandings and conditions of trust and trustworthy AI in medicine, we organized a 2-day workshop with 14 academic experts in data science, law, medicine, ethics, social science, and philosophy, which took place on May 5-6, 2022, in Berlin, Germany. The workshop participants were researchers representing four large European projects (EuCanImage, INTERVENE, Bigpicture, and BIOMAP) and other experts who contributed perspectives from philosophical and policy research. The focus was on how to implement the ethics of AI, drawing from experiences in oncological imaging and genomics. The workshop combined presentations and discussions on ethical, legal, societal, technical, and clinical aspects of medical AI.</p>
      <p>Inspired by focus group and stakeholder engagement methods [<xref ref-type="bibr" rid="ref67">67</xref>-<xref ref-type="bibr" rid="ref69">69</xref>], the workshop synthesized interdisciplinary inputs on explainability and trustworthiness of AI solutions, clinical applications in cancer imaging, polygenic risk score generation and use, biases in training and calibration of AI models, and algorithmic impact assessment, as well as moderated discussions between the experts. The workshop discussions were recorded based on the informed consent of all participants, and key discussion points were put on digital sticky notes and related to each other on a digital mural during the workshop.</p>
      <p>To deepen these discussions, the workshop participants were invited to partake in an online questionnaire consisting of five open-ended questions to share further thoughts and more specific insights into their field of expertise. Since trust and trustworthiness emerged as a crucial topic in the workshop discussions, the questions were on field-specific definitions of trust in the context of AI, criteria of trustworthy AI applications, for trusting relationships involving AI, and for trustworthiness as a valuable principle.</p>
      <p>Aspects of trustworthy AI in medicine were also guiding the analysis of the workshop discussions and questionnaire responses. The analysis team (MG, KA, MCA) interpreted the workshop discussions and questionnaire answers in several meetings and conducted thematic analysis [<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>] to unpack the requirements, conditions, and challenges around trustworthy AI that are presented in the following sections of this article.</p>
      <p>While topics like bias and fairness—often central and technically-focused within AI ethics discussions—have received considerable attention, we aim here to highlight equally critical yet more socially and practically oriented themes, specifically the importance of knowledge and human involvement. These dimensions, as detailed by practitioners, offer deeper insights into real-world challenges within the medical field.</p>
    </sec>
    <sec>
      <title>The More We Know, the More We Trust?</title>
      <p>In the scientific discourse on AI, the trustworthiness of an AI system is closely tied to the requirement that the decision-making process must be transparent, explainable, and understandable, to allow medical professionals to make informed decisions [<xref ref-type="bibr" rid="ref72">72</xref>]. This should include that AI systems communicate their limitations, for example, their inability to provide an accurate answer for a given patient or, in general, having a greater accuracy for certain patient groups, as well as include proper uncertainty quantification for their results [<xref ref-type="bibr" rid="ref73">73</xref>]. This allows doctors to assess the appropriateness of using the AI tool for specific patients.</p>
      <p>The need for transparency resonates with the fact that medical knowledge production inherently involves a degree of uncertainty. In scientific knowledge production, organized skepticism is part of the ethos of science [<xref ref-type="bibr" rid="ref74">74</xref>]; however, the temporalities of clinical practice and scientific knowledge production differ. For AI, providing comprehensive explanations of internal processes may often be unrealistic [<xref ref-type="bibr" rid="ref75">75</xref>]. Besides, striving for more explainable AI systems could reduce their efficacy, depending on the explainability techniques applied. For example, if a model is designed to be explainable from the outset using a simple model structure, like a linear additive model, its predictive performance could be compromised. On the other hand, even a simple model based on causal relationships verified through careful interpretation and domain knowledge may be robust and generalizable [<xref ref-type="bibr" rid="ref76">76</xref>], but could be difficult to achieve for complex data types such as images or text. Hence, the pursuit of trustworthy AI raises questions about the balance between transparency and complexity.</p>
      <p>Workshop outcomes indicate that what makes a difference to the trustworthiness of medical AI systems is not necessarily more, but more meaningful, situation- and user-oriented information as well as a good balance between knowledge and acceptable uncertainty. Experts highlighted that it is essential to ensure that the AI tool functions exactly as it is expected. Rather than detailed insights into the inner workings of an AI system, clarifying what knowledge is needed by which actors and areas of application has been emphasized. For instance, in the clinical context, it was deemed essential to know how the AI tool was built, eg, the type of dataset that was used to train the algorithm, its limitations and biases, the accuracy of the performance, but also the uncertainty of the outcome.</p>
      <p>User-oriented knowledge takes several information needs of different stakeholders into account, such as developers, users, and patients, and these vary. For instance, developers benefit from multiple explainability techniques to identify biases to make sure that the predictions are not based on spurious correlations in the training data. In this respect, medical doctors might benefit from highlighting the part of the x-ray image that indicates the presence of cancer. From the patient’s perspective, explainability is of interest primarily through the attending physician’s judgment and experience. Patients may trust their physician more than they rely on the explainability of the AI system.</p>
      <p>The requirement for specific types of knowledge reflects the relational aspects associated with explainability and AI trustworthiness. Several quantitative and qualitative explainability methods are used in AI development to understand why and how AI systems make specific predictions [<xref ref-type="bibr" rid="ref77">77</xref>]. However, it is crucial to involve physicians during the tool development and testing stages in a human-in-the-loop manner. As domain experts, physicians use explainability tools to verify the accuracy of system predictions based on correct premises.</p>
      <p>Explainability methods are essential for AI-based software as a medical device. Daily operation of the tool does not necessarily require explainability, though it becomes useful if medical professionals disagree with its predictions. In such cases, explainability algorithms help verify the prediction premises. Local visualization methods like heat maps or attention maps, supported by uncertainty metrics, are commonly used in image-based analysis as they are easier for nondevelopers to understand. Clinicians can then make informed decisions on whether to accept or reject the predictions based on their knowledge and judgment. However, few doctors are currently aware of how to review the technical aspects of AI tools. Therefore, it is essential that all information in training materials is presented in an understandable way, using methods and metrics that are familiar to health care professionals, and that professional associations are involved as institutional stakeholders.</p>
      <p>Medical AI technologies should include alert systems to warn users when cases fall “out of scope” or outside the tool’s “intended use,” ensuring the tool is not applied to patients it was not designed for, thereby preventing potential harm. For instance, if an AI system for breast cancer screening was primarily trained on elderly patients with low-density breasts, its performance may be inferior for young patients with high-density breasts. Doctors should be informed of these limitations during training and should be alerted when the system is used in these cases. In this regard, AI tools in oncological imaging must meet the AI Act’s high-risk requirements, including transparency about dataset limitations, essential for building trust among clinicians. Similarly, genomics tools such as polygenic risk scores [<xref ref-type="bibr" rid="ref13">13</xref>] and biomarkers based on multiomics data [<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref79">79</xref>] raise concerns about bias and generalizability, and in line with the obligations for documentation and human oversight of the AI Act, these risks should be addressed, and trustworthy use in clinical practice should be supported.</p>
      <p>The additional training for medical professionals in the use of AI should include methods for quick and reliable assessment of tools in daily practice. This will enhance technical literacy in health care, demystify AI’s potential, and clarify the tools’ technical capabilities, thereby increasing trust. Nonetheless, the required training should be sufficient for safe use but limited in terms of attention and time consumption. AI-driven software, as another set of tools in daily practice, should integrate seamlessly into workflows without causing major interruptions. Doctors should be able to focus on patient care and medical knowledge rather than learning AI’s inner workings. Therefore, adequate education and training at the time of AI-tool implementation, along with clear documentation, explainability, uncertainty metrics, and alert systems, are essential for building trust in AI solutions among health care professionals and other users.</p>
    </sec>
    <sec>
      <title>Situating Humans: Human-in-the-Loop, Human in the Center, Humans in Loops of Trust</title>
      <p>Approaches from the ethics of AI perspective emphasize that AI should involve human features, that humans need to be in the loop of AI processes, or that humans need to have the last word in decision-making to ensure the trustworthiness of an AI system [<xref ref-type="bibr" rid="ref80">80</xref>]. According to the EU AI Act, AI should be human-centric, and thus, it should involve human-in-the-loop approaches that ensure the involvement of humans in all stages of medical AI development and testing. As mentioned earlier, trust is related to several interpersonal relationships and institutional conditions, and in this case, also to automated or autonomous technologies. Therefore, rather than asking <italic>how</italic> humans can be integrated into the AI loop, the situatedness of humans in terms of where and when within the entanglement of social-technological and institutional relations in connection to trust should receive further attention.</p>
      <p>Workshop outcomes emphasize that a tool’s trustworthiness is established through trust in humans (and their expertise), such as developers or physicians, and relationships. Moreover, trusting relationships are framed by trust in the social system or institutional framework in which it is embedded. Therefore, trust can be considered a dynamic process involving several actors and interactions among individuals, technologies, and institutions. Tackling issues around trust in medical AI necessitates emphasizing its multifaceted character, situatedness, and contextuality. In this regard, trust evolves as a dynamic process, spanning machine reliability, human relationships, and broader scientific skepticism. In addition, it also has a temporal dimension, as past experiences significantly influence perceptions of AI reliability in medicine.</p>
      <p>In all its complex multidimensionality, which was discussed during the workshop, trust places humans at the center with the claim that we trust people, not just tools. Tools are judged on their reliability and credibility. For instance, an AI tool is credible and reliable when the system functions in a way that the individual expects and expectations themselves are cascading, considering that the designer, producer, the user (eg, medical doctor or the patient) may have different expectations that can be tied to each other as well as their own situatedness. The integration of AI into real-world medical settings relies on its promised functionality, such as accurate disease prediction or classification. This includes defining features, protocols, and patient-centered considerations during the design and implementation phases, including quality control after implementation. Ultimately, trust is built by humans, such as developers and physicians, who collaborate in increasingly multidisciplinary and international teams is also reenacted in settings that cannot be completely predefined and restricted.</p>
      <p>Various examples of AI tools used in clinical settings allow insights into how human is situated. For instance, against a rapidly expanding plethora of AI tools, the clearance of these by the Food and Drug Administration in the US context relies on categorization of tools as low-risk and high-risk [<xref ref-type="bibr" rid="ref81">81</xref>]. In the clinical context, however, many factors impact how risks are understood in practice, such as whether the tool is used to assist with a process under the clinician’s complete control, such as a tool assisting a cardiologist with drawing the contours of the heart for consultation. On the contrary, there are also cases where the clinician must assume the reliability of tools, where institutional structures, such as health ministries and researchers, have already considered potential biases of the datasets that informed the algorithm, as well as during the entire development, validation, and auditing process. While the algorithmic impact assessment is expected to be performed by other experts before tools make it to clinical practice, the consequences of the opposite could be drastic for the patients and doctors: for instance, a tool that does not consider the breast glandular tissue of individuals according to varying lifestyles and demographics may directly impact the outcome of diagnosis or intervention.</p>
      <p>The integration of AI tools into human medical professionals’ practices presents a challenge as AI systems and medical professionals are intrinsically different in terms of the registers that they rely on. AI systems rely on mathematical rules, functions, and statistical algorithms, enabling their performance to be assessed with mathematical calculability. However, a major concern regarding the credibility of AI in medicine is the accurate translation of these mathematical capabilities into meaningful clinical value. One aspect that is relevant here is accuracy. The possible expectation for health care professionals to verify all AI-driven recommendations as an additional task to their daily routine to avoid harm in some patients, rather than only those flagged as potentially erroneous, would pose a significant impediment to the trustworthiness and widespread adoption of these technologies as standalone tools without human oversight.</p>
      <p>The balance between sensitivity and specificity is particularly challenging in medical domains, such as oncology. Screening tools, like those used for breast cancer detection, where the prevalence of cancer in the examined samples is low, require high sensitivity to ensure that all potential cases are identified, even at the expense of a higher rate of false positives. The sensitivity of the AI tool should at least match, if not exceed, the average sensitivity of radiologists in national screening programs [<xref ref-type="bibr" rid="ref82">82</xref>]. In such scenarios, the false-positive results can be addressed through subsequent verification procedures, such as biopsies. While the cost of a false positive result may be less harmful than missing the cancer, it can still have a significant impact on patients receiving misleading information [<xref ref-type="bibr" rid="ref83">83</xref>], exemplifying the multidimensionality of AI’s use. Conversely, in applications, such as those aimed at distinguishing small liver tumors from other lesions, high specificity is essential for early and accurate diagnosis, enabling timely treatment and improved patient outcomes.</p>
      <p>The question, therefore, is: what level of accuracy and precision would be deemed “good enough” for specific clinical applications to foster trust and drive the wider adoption of AI tools in medicine? Addressing this challenge requires human understanding and judgment of the trade-offs among sensitivity, specificity, and the practical implications for health care professionals and patients.</p>
    </sec>
    <sec>
      <title>Conclusions</title>
      <p>This article centers on a synthesis of multidisciplinary discussions on trustworthy AI during an expert workshop, which allows us to underscore the complexities surrounding this topic in the evolving landscape of medical technology. The findings highlighted the nexus of knowledge on trust, emphasizing the situatedness of the human. Thus, conceptualizing and building trustworthiness in AI requires a comprehensive, multifaceted approach.</p>
      <p>Trust and trustworthiness are not legal concepts. However, trustworthiness can be a feature of an AI system, and trust can be a purpose and effect of a law. Such trust can be reached by providing standards for trustworthy AI, including (1) transparent rules on roles, responsibilities, and procedures for AI development and by enforcing those rules through liability norms as well as through clear consequences for violation of duties; (2) quality and security features for AI tools; and (3) opportunities for public participation and debate. Clear, well-implemented, and effectively enforced laws contribute to public trust. Therefore, AI laws should prioritize defining roles, procedures, responsibilities, and liabilities alongside establishing efficient systems for control and enforcement. For example, the EU AI Act focuses on trustworthiness to foster trust through regulation [<xref ref-type="bibr" rid="ref84">84</xref>]. Our article demonstrates that scholars from various domains converge on the understanding that achieving trust in AI is complex and involves individuals, making this legal expectation a desirable mission reflected in global AI regulations.</p>
      <p>Contrary to the “move fast, break things” innovation maxim, setting high standards for quality and security is essential to maintaining the reliability and integrity of AI tools. Trust in AI is not an inherent feature, but a belief held by users. Trust involves the procedures, steps, and individuals behind the creation, use, and maintenance of AI tools, including developers, health care professionals, and update teams. Therefore, AI developers must build reliable systems to earn public trust and uphold their reputation. User trust relies on the AI’s trustworthiness, which depends on the transparency and verifiability of its development processes. To incorporate elements of human oversight by design, developers, for instance, strive more and more to visualize explainability in user interfaces when users interact with AI [<xref ref-type="bibr" rid="ref85">85</xref>].</p>
      <p>Historically, medicine embraced the authority of individual experts. In the 20th century, the concept of the “eminent expert” was replaced by evidence-based medicine (EBM) [<xref ref-type="bibr" rid="ref86">86</xref>], which relies on scientific evidence produced by the broader scientific community. EBM establishes trust in new diagnostic and treatment methods. Similarly, the credibility of medical AI tools will grow with increasing good-quality evidence and defined accuracy and precision levels needed for specific tasks.</p>
      <p>Researchers in the field of ethics of AI in medicine must strive for accuracy and precision by providing clear definitions for concepts, such as trustworthiness or trust in specific contexts, and situating them within broader societal issues. It is crucial to analyze the complex relationship between trustworthiness, trust, and explainability [<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref88">88</xref>] and to find out what kind of explanations are required for specific situations and applications in medicine to adapt procedures accordingly. Interdisciplinary research, involving social scientists and clinicians, is crucial to incorporating clinical concepts [<xref ref-type="bibr" rid="ref89">89</xref>]. This interdisciplinary approach involves a layered understanding, where ethical, societal, and legal issues from AI and clinical applications add further risks and complexities when combined [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
      <p>Experts highlighted that trust in AI systems is bolstered not only by understanding how these tools operate but also by ensuring a balance between knowledge and acceptable uncertainty. Users need confidence that AI functions as intended.</p>
      <p>Explainability can also play a role in the users’ and patients’ right to know, which affects their autonomy and agency to make informed decisions [<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref91">91</xref>]. Transparency is crucial; however, our findings highlight the importance of providing context-specific, stakeholder-relevant information. Failure to uphold this right can result in epistemic injustices [<xref ref-type="bibr" rid="ref92">92</xref>-<xref ref-type="bibr" rid="ref95">95</xref>], especially for marginalized groups, who may be denied knowledge that may affect their rights and well-being. Upholding the right to know helps prevent epistemic injustices and enhances patients’ ability to comprehend the rationale behind diagnoses, treatment recommendations, and the use of AI tools in their care.</p>
      <p>Our findings emphasize that establishing AI trustworthiness in health care requires a robust, human-centered, multistakeholder approach. Historically, AI tools have been predominantly engineered with limited input beyond technical development teams, often with minimal involvement from health care professionals. Documented real-world examples that fulfill all the requirements for trustworthy AI discussed in this article are scarce, and most scientific research focuses on evaluating some dimensions of trustworthiness, for example, by providing methodology to assess AI tools in diverse populations [<xref ref-type="bibr" rid="ref96">96</xref>]. It is worth noticing that major private AI companies do report involving ethics and safety teams as part of their development, with parallels to our discussion, including techniques such as the use of external red-teaming to ensure privacy and safety, stakeholder engagement, and frameworks to assess biases and other social and ethical risks (for instance, Google DeepMind’s three-layered framework for evaluating the social and ethical risks of AI systems). However, the fact that many of the state-of-the-art AI models still remain black-box in terms of their inner workings and training data poses a challenge to their wider use in health care. In this regard, several engagement techniques can be used to collect adequate insights and continuous feedback from stakeholders. It is crucial that engagement activities are diverse and inclusive to ensure the AI tool is designed for all and to promote equity in AI-mediated health care.</p>
      <p>Engaging only stakeholders with high education and digital proficiency may result in tools that are inaccessible to individuals with low digital literacy, which may impact health outcomes [<xref ref-type="bibr" rid="ref97">97</xref>]. Including vulnerable groups and minorities in AI design and development can help develop tools that are respectful of diverse needs and contexts. Hence, we recommend that AI development teams should involve all relevant stakeholders at all stages of the AI life cycle, that is, conception, technical development, clinical validation, and real-world deployment (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>).</p>
      <boxed-text id="box1" position="float">
        <title>Authors’ recommendations for involving relevant stakeholders across all stages of the AI (artificial intelligence) life cycle.</title>
        <p>The authors provide recommendations for involving relevant stakeholders at each stage of the AI life cycle, emphasizing continuous engagement to ensure transparency, accountability, and inclusiveness throughout the development and deployment of AI systems.</p>
        <p><bold>AI design phase</bold>:</p>
        <p>Depending on the AI application, a range of health care professionals, such as general practitioners, specialists (domain experts), health care managers, nurses, and technicians, should be engaged for requirements elicitation. This includes defining the intended use of AI tools, clinical endpoints, success criteria, and the specific requirements for trustworthiness and transparency. Furthermore, clinicians can help specify the most adequate approaches for explainability, the types of explanations needed, and the conditions under which alerts or warnings should be issued.</p>
        <p>Beyond clinicians, patient engagement is vital for identifying user needs, preferences, and potential barriers to trust and adherence to AI-mediated care. Ethicists and social scientists should also play a crucial role during conception, especially for anticipating the application-specific ethical and social impacts of the AI tools, such as misalignments with fundamental rights, effects on deskilling, changes in power relationships, and alterations in human behaviors as AI is integrated into care settings. Considering the approaches of trustworthy AI in <xref ref-type="table" rid="table1">Table 1</xref>, they can also apply qualitative research methods to examine the intentions and actions of the AI developers, understand the AI-mediated doctor-patient relationship, and assess the trustworthiness and perspectives of the institutions involved.</p>
        <p><bold>AI development phase</bold>:</p>
        <p>At this stage, the AI team should focus on translating the stakeholder-defined requirements and identified risks into development strategies, including mitigation measures. This includes compiling diverse and representative training datasets, employing machine learning methods that minimize potential biases, and developing AI-human interfaces that enhance user interaction and comprehension of the AI system. It is also important to ensure that the AI tool’s technical development considers existing care models for seamless integration into real-world practice, thereby adding value without disrupting established workflows. During the development phase, it is important that stakeholders continue to be engaged, so they can monitor the technical developments and provide continuous feedback on the AI tool’s anticipated level of trustworthiness.</p>
        <p><bold>AI validation phase</bold>:</p>
        <p>During the validation phase, it is important to assess the AI tool’s trustworthiness across multiple dimensions, including robustness under real-world conditions, level of transparency and explainability, fairness concerning diverse groups, usability in practice, and ethical and social compliance. This phase should continue to engage all stakeholders, including social scientists to evaluate the socio-behavioral implications of the AI tools on end users, such as whether the tools and their explanations enhance or diminish user confidence and trust, affect users’ ability to retain judgment when using the AI tool, and improve or degrade doctor-patient relationships.</p>
        <p>
          <bold>AI deployment phase:</bold>
        </p>
        <p>Once the tool is validated, certified, and deployed, a multistakeholder team must continue to monitor its performance and impact in real-world settings. This includes conducting periodic evaluations and audits to identify any performance degradation or emerging ethical issues, implementing logging systems to enhance traceability and accountability, and ensuring robust human oversight mechanisms are in place. These steps are crucial for maintaining user trust and ensuring that human autonomy is respected, demonstrating that there is adequate governance surrounding the use, maintenance, and oversight of the AI tool.</p>
      </boxed-text>
      <p>By adhering to a human-centered, multicentered framework, AI development teams can create tools that are not only technically efficient but also ethically sound and broadly accepted by all relevant users, groups, and institutions. This inclusive approach ensures that AI systems are developed with an in-depth understanding of the various contexts in which they will operate, leading to more effective and equitable AI-mediated health care.</p>
      <p>Recent years have witnessed a surge of foundation models, that is, AI models trained with massive data and computational resources, which can also solve medical problems out-of-the-box. Though not the focus of this article, the aspects of trust discussed here can also be relevant when pretraining foundation models or fine-tuning them for deployment in health care use cases with additional local data. Studies focusing on the trustworthiness of foundation models have started to emerge [<xref ref-type="bibr" rid="ref98">98</xref>], and we expect more work in this direction in the future.</p>
      <p>Finally, future research should examine how the legal notion of “trustworthy AI” under the EU AI Act is interpreted and implemented across different member states, especially in clinical contexts. Comparative legal analysis could reveal how national competent authorities enforce trust-related obligations, such as human oversight or transparency in medical AI systems. Furthermore, empirical studies are needed to assess whether regulatory compliance translates into perceived trust among clinicians and patients. For instance, guidelines and recommendations, whether they have been established top down by policy makers or bottom up by the scientific community and practitioners, need to be tested to see if they are the effective and practical governance tools they are intended to be. This requires an interdisciplinary effort by asking if their uptake by individuals as well as institutions is meaningful in practice. Consider, exemplarily, the assessment of the added value of the inclusion of patient and citizen groups in the design, validation, and deployment phases of AI systems and the development of standardized frameworks for measuring trust across stakeholders and exploring how such trust is cultivated or undermined over time. These also involve evaluating the various roles of stakeholders and experts in these processes, particularly with regard to responsibility, distribution of roles, and power. To determine whether trust-building is effective, new methodologies will be required and should go beyond the quantitative measurement of key performance indicators and allow for thorough qualitative assessments that allow agile management.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AI HLEG</term>
          <def>
            <p>High-Level Expert Group on Artificial Intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">EBM</term>
          <def>
            <p>evidence-based medicine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EU</term>
          <def>
            <p>European Union</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">FUTURE-AI</term>
          <def>
            <p>Fairness, Universality, Traceability, Usability, Robustness, and Explainability–artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MDR</term>
          <def>
            <p>Medical Device Regulation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">STS</term>
          <def>
            <p>science and technology studies</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">WHO</term>
          <def>
            <p>World Health Organization</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was supported by the European Union’s Horizon 2020 projects EuCanImage (grant agreement No 952103) and INTERVENE (grant agreement No 101016775), as well as by the Innovative Medicines Initiative 2 Joint Undertaking project Bigpicture (grant agreement No 945358) and BIOMAP, which has received funding from the Innovative Medicines Initiative 2 Joint Undertaking (JU; grant agreement No 821511). The JU receives support from the European Union’s Horizon 2020 research and innovation program and EFPIA. This publication reflects only the author’s view and the JU is not responsible for any use that may be made of the information it contains. PM received funding from the Research Council of Finland (Finnish Center for Artificial Intelligence FCAI, and grants 352986, 358246, NextGenerationEU). Where authors are identified as personnel of the Biobanking and BioMolecular resources Research Infrastructure (BBMRI-ERIC), the authors alone are responsible for the views expressed in this article, and they do not necessarily represent the decisions, policy, or views of BBMRI-ERIC. Special thanks to Verena Borecký for proofreading the article.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Ethics guidelines for trustworthy AI</article-title>
          <source>European Commission</source>
          <year>2019</year>
          <access-date>2021-12-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai">https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>Ethics and governance of artificial intelligence for health: WHO guidance</article-title>
          <source>World Health Organization</source>
          <year>2021</year>
          <access-date>3024-10-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://iris.who.int/bitstream/handle/10665/341996/9789240029200-eng.pdf?sequence=1">https://iris.who.int/bitstream/handle/10665/341996/9789240029200-eng.pdf?sequence=1</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lekadir</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Porras</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare</article-title>
          <source>BMJ</source>
          <year>2025</year>
          <volume>388</volume>
          <fpage>r340</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bmj.com/lookup/pmidlookup?view=long&#38;pmid=39961614"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.r340</pub-id>
          <pub-id pub-id-type="medline">39961614</pub-id>
          <pub-id pub-id-type="pmcid">PMC11832024</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Serban</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Blom</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hoos</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Visser</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Practices for engineering trustworthy machine learning applications</article-title>
          <source>2021 IEEE/ACM 1st Workshop on AI Engineering - Software Engineering for AI (WAIN)</source>
          <year>2021</year>
          <publisher-loc>Piscataway, NJ</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>97</fpage>
          <lpage>100</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mittelstadt</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Principles alone cannot guarantee ethical AI</article-title>
          <source>Nat Mach Intell</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>11</issue>
          <fpage>501</fpage>
          <lpage>507</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-019-0114-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cowls</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Taddeo</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>How to design AI for social good: seven essential factors</article-title>
          <source>Ethics, Governance, and Policies in Artificial Intelligence</source>
          <year>2021</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>125</fpage>
          <lpage>151</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hagendorff</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The ethics of AI ethics: an evaluation of guidelines</article-title>
          <source>Minds Mach</source>
          <year>2020</year>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>99</fpage>
          <lpage>120</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s11023-020-09517-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11023-020-09517-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zink</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Fair regression for health care spending</article-title>
          <source>Biometrics</source>
          <year>2020</year>
          <volume>76</volume>
          <issue>3</issue>
          <fpage>973</fpage>
          <lpage>982</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31860120"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/biom.13206</pub-id>
          <pub-id pub-id-type="medline">31860120</pub-id>
          <pub-id pub-id-type="pmcid">PMC7540596</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DeCamp</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tilburt</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Why we cannot trust artificial intelligence in medicine</article-title>
          <source>Lancet Digital Health</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>8</issue>
          <fpage>e390</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/S2589-7500(19)30197-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(19)30197-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hatherley</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Limits of trust in medical AI</article-title>
          <source>J Med Ethics</source>
          <year>2020</year>
          <volume>46</volume>
          <issue>7</issue>
          <fpage>478</fpage>
          <lpage>481</lpage>
          <pub-id pub-id-type="doi">10.1136/medethics-2019-105935</pub-id>
          <pub-id pub-id-type="medline">32220870</pub-id>
          <pub-id pub-id-type="pii">medethics-2019-105935</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gille</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jobin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>What we talk about when we talk about trust: Theory of trust for AI in healthcare</article-title>
          <source>Intell Based Med</source>
          <year>2020</year>
          <volume>1-2</volume>
          <fpage>100001</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.ibmed.2020.100001"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ibmed.2020.100001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goisauf</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cano Abadía</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Ethics of AI in radiology: a review of ethical and societal implications</article-title>
          <source>Front Big Data</source>
          <year>2022</year>
          <volume>5</volume>
          <issue>850383</issue>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35910490"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdata.2022.850383</pub-id>
          <pub-id pub-id-type="medline">35910490</pub-id>
          <pub-id pub-id-type="pmcid">PMC9329694</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fritzsche</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Akyüz</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cano Abadía</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McLennan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Marttinen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mayrhofer</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Buyx</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Ethical layering in AI-driven polygenic risk scores-new complexities, new challenges</article-title>
          <source>Front Genet</source>
          <year>2023</year>
          <volume>14</volume>
          <fpage>1098439</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36816027"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fgene.2023.1098439</pub-id>
          <pub-id pub-id-type="medline">36816027</pub-id>
          <pub-id pub-id-type="pii">1098439</pub-id>
          <pub-id pub-id-type="pmcid">PMC9933509</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Geis</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Brady</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Spencer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ranschaert</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Jaremko</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Langer</surname>
              <given-names>SG</given-names>
            </name>
            <name name-style="western">
              <surname>Kitts</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Birch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shields</surname>
              <given-names>WF</given-names>
            </name>
            <name name-style="western">
              <surname>van den Hoven van Genderen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kotter</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gichoya</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Cook</surname>
              <given-names>TS</given-names>
            </name>
            <name name-style="western">
              <surname>Morgan</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Safdar</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Kohli</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Ethics of artificial intelligence in radiology: summary of the joint European and North American multisociety statement</article-title>
          <source>Can Assoc Radiol J</source>
          <year>2019</year>
          <volume>70</volume>
          <issue>4</issue>
          <fpage>329</fpage>
          <lpage>334</lpage>
          <pub-id pub-id-type="doi">10.1016/j.carj.2019.08.010</pub-id>
          <pub-id pub-id-type="medline">31585825</pub-id>
          <pub-id pub-id-type="pii">S0846-5371(19)30130-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hawley</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Trust, distrust and commitment</article-title>
          <source>Noûs</source>
          <year>2012</year>
          <volume>48</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>20</lpage>
          <pub-id pub-id-type="doi">10.1111/nous.12000</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Trustworthiness</article-title>
          <source>Ethics</source>
          <year>2012</year>
          <volume>123</volume>
          <issue>1</issue>
          <fpage>61</fpage>
          <lpage>85</lpage>
          <pub-id pub-id-type="doi">10.1086/667838</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wyatt</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Harris</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Illness online: Self-reported data and questions of trust in medical and social research</article-title>
          <source>Theory Cult Soc</source>
          <year>2013</year>
          <volume>30</volume>
          <issue>4</issue>
          <fpage>131</fpage>
          <lpage>150</lpage>
          <pub-id pub-id-type="doi">10.1177/0263276413485900</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bijker</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Sauerwein</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Bijker</surname>
              <given-names>WE</given-names>
            </name>
          </person-group>
          <article-title>Controlled human malaria infection trials: how tandems of trust and control construct scientific knowledge</article-title>
          <source>Soc Stud Sci</source>
          <year>2016</year>
          <volume>46</volume>
          <issue>1</issue>
          <fpage>56</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1177/0306312715619784</pub-id>
          <pub-id pub-id-type="medline">26983172</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Werner-Felmayer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Minari</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schicktanz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Raz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sharon</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Data-intensive medicine and healthcare: ethical and social implications in the era of artificial intelligence and automated decision making</article-title>
          <source>Frontiers in Genetics</source>
          <year>2023</year>
          <publisher-loc>Lausanne</publisher-loc>
          <publisher-name>Frontiers Media SA</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Owens</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Those designing healthcare algorithms must become actively anti-racist</article-title>
          <source>Nat Med</source>
          <year>2020</year>
          <volume>26</volume>
          <issue>9</issue>
          <fpage>1327</fpage>
          <lpage>1328</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32908272"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41591-020-1020-3</pub-id>
          <pub-id pub-id-type="medline">32908272</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-020-1020-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC7810137</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Quinn</surname>
              <given-names>TP</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobs</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Senadeera</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Coghlan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The three ghosts of medical AI: can the black-box present deliver?</article-title>
          <source>Artif Intell Med</source>
          <year>2022</year>
          <volume>124</volume>
          <fpage>102158</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.artmed.2021.102158"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2021.102158</pub-id>
          <pub-id pub-id-type="medline">34511267</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(21)00151-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rasheed</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Qayyum</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ghaly</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Fuqaha</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Razi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Qadir</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Explainable, trustworthy, and ethical machine learning for healthcare: a survey</article-title>
          <source>Comput Biol Med</source>
          <year>2022</year>
          <volume>149</volume>
          <fpage>106043</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0010-4825(22)00756-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106043</pub-id>
          <pub-id pub-id-type="medline">36115302</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(22)00756-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Cellini</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Charpignon</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Dee</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Dernoncourt</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Eber</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>WG</given-names>
            </name>
            <name name-style="western">
              <surname>Moukheiber</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Schirmer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Situ</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Paguio</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wawira</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>S</given-names>
            </name>
            <collab>for MIT Critical Data</collab>
          </person-group>
          <article-title>Sources of bias in artificial intelligence that perpetuate healthcare disparities-a global review</article-title>
          <source>PLOS Digit Health</source>
          <year>2022</year>
          <volume>1</volume>
          <issue>3</issue>
          <fpage>e0000022</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812532"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000022</pub-id>
          <pub-id pub-id-type="medline">36812532</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-21-00034</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931338</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seyyed-Kalantari</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>McDermott</surname>
              <given-names>MBA</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>IY</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Underdiagnosis bias of artificial intelligence algorithms applied to chest radiographs in under-served patient populations</article-title>
          <source>Nat Med</source>
          <year>2021</year>
          <volume>27</volume>
          <issue>12</issue>
          <fpage>2176</fpage>
          <lpage>2182</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34893776"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41591-021-01595-0</pub-id>
          <pub-id pub-id-type="medline">34893776</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-021-01595-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC8674135</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DeCamp</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lindvall</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Latent bias and the implementation of artificial intelligence in medicine</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2020</year>
          <volume>27</volume>
          <issue>12</issue>
          <fpage>2020</fpage>
          <lpage>2023</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32574353"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocaa094</pub-id>
          <pub-id pub-id-type="medline">32574353</pub-id>
          <pub-id pub-id-type="pii">5859726</pub-id>
          <pub-id pub-id-type="pmcid">PMC7727353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Vassilev</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Greene</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Perine</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Burt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Towards a Standard for Identifying and Managing Bias in Artificial Intelligence</article-title>
          <source>NIST Special Publication 1270</source>
          <year>2022</year>
          <access-date>2024-11-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.dwt.com/-/media/files/blogs/artificial-intelligence-law-advisor/2022/03/nist-sp-1270--identifying-and-managing-bias-in-ai.pdf">https://www.dwt.com/-/media/files/blogs/artificial-intelligence-law-advisor/2022/03/nist-sp-1270--identifying-and-managing-bias-in-ai.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simion</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kelp</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Trustworthy artificial intelligence</article-title>
          <source>AJPH</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>8</fpage>
          <pub-id pub-id-type="doi">10.1007/s44204-023-00063-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Starke</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Misplaced trust and distrust: how not to engage with medical artificial intelligence</article-title>
          <source>Camb Q Healthc Ethics</source>
          <year>2022</year>
          <volume>33</volume>
          <issue>3</issue>
          <fpage>360</fpage>
          <lpage>369</lpage>
          <pub-id pub-id-type="doi">10.1017/s0963180122000445</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grote</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Trustworthy medical AI systems need to know when they don't know</article-title>
          <source>J Med Ethics</source>
          <year>2021</year>
          <volume>47</volume>
          <fpage>337</fpage>
          <lpage>338</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1136/medethics-2021-107463"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/medethics-2021-107463</pub-id>
          <pub-id pub-id-type="medline">33849959</pub-id>
          <pub-id pub-id-type="pii">medethics-2021-107463</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grote</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Berens</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>On the ethics of algorithmic decision-making in healthcare</article-title>
          <source>J Med Ethics</source>
          <year>2020</year>
          <volume>46</volume>
          <issue>3</issue>
          <fpage>205</fpage>
          <lpage>211</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://jme.bmj.com/lookup/pmidlookup?view=long&#38;pmid=31748206"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/medethics-2019-105586</pub-id>
          <pub-id pub-id-type="medline">31748206</pub-id>
          <pub-id pub-id-type="pii">medethics-2019-105586</pub-id>
          <pub-id pub-id-type="pmcid">PMC7042960</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Trust, power, and vulnerability: a discourse on helping in nursing</article-title>
          <source>Nurs Clin North Am</source>
          <year>2009</year>
          <volume>44</volume>
          <issue>4</issue>
          <fpage>393</fpage>
          <lpage>405</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/19850176/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cnur.2009.07.012</pub-id>
          <pub-id pub-id-type="medline">19850176</pub-id>
          <pub-id pub-id-type="pii">S0029-6465(09)00055-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baier</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Trust and antitrust</article-title>
          <source>Ethics</source>
          <year>1986</year>
          <volume>96</volume>
          <issue>2</issue>
          <fpage>231</fpage>
          <lpage>260</lpage>
          <pub-id pub-id-type="doi">10.1086/292745</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Butler</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>Frames of War: When is Life Grievable?</source>
          <year>2016</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Verso</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Butler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gambetti</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Sabsay</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <source>Vulnerability in Resistance</source>
          <year>2016</year>
          <publisher-loc>Durham, NC</publisher-loc>
          <publisher-name>Duke University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ferrarese</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Vulnerability and critical theory</article-title>
          <source>BRP Crit Theory</source>
          <year>2016</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>88</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1163/24519529-12340002"/>
          </comment>
          <pub-id pub-id-type="doi">10.1163/24519529-12340002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mackenzie</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Mackenzie</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Dodds</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Vulnerability: New Essays in Ethics Feminist Philosophy</source>
          <year>2014</year>
          <publisher-loc>England</publisher-loc>
          <publisher-name>Oxford University Press</publisher-name>
          <fpage>33</fpage>
          <lpage>59</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ward</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Coveney</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Trust in the health system: an analysis and extension of the social theories of Giddens and Luhmann</article-title>
          <source>Health Sociol Rev</source>
          <year>2014</year>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>177</fpage>
          <lpage>186</lpage>
          <pub-id pub-id-type="doi">10.5172/hesr.451.17.2.177</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haraway</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>Situated knowledges: the science question in feminism and the privilege of partial perspective</article-title>
          <source>Fem Stud</source>
          <year>1988</year>
          <volume>14</volume>
          <issue>3</issue>
          <fpage>575</fpage>
          <lpage>599</lpage>
          <pub-id pub-id-type="doi">10.2307/3178066</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>HM</given-names>
            </name>
          </person-group>
          <article-title>The TEA set: Tacit knowledge and scientific networks</article-title>
          <source>Science Studies</source>
          <year>1974</year>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>165</fpage>
          <lpage>185</lpage>
          <pub-id pub-id-type="doi">10.1177/030631277400400203</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wynne</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Public uptake of science: a case for institutional reflexivity</article-title>
          <source>Public Underst Sci</source>
          <year>1993</year>
          <volume>2</volume>
          <issue>4</issue>
          <fpage>321</fpage>
          <lpage>337</lpage>
          <pub-id pub-id-type="doi">10.1088/0963-6625/2/4/003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Irwin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wynne</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <source>Misunderstanding science?: The Public Reconstruction of Science and Technology</source>
          <year>1996</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Cambridge University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wynne</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Public engagement as a means of restoring public trust in science--hitting the notes, but missing the music?</article-title>
          <source>Community Genet</source>
          <year>2006</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>211</fpage>
          <lpage>220</lpage>
          <pub-id pub-id-type="doi">10.1159/000092659</pub-id>
          <pub-id pub-id-type="medline">16741352</pub-id>
          <pub-id pub-id-type="pii">92659</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <source>World at Risk</source>
          <year>2009</year>
          <publisher-loc>Cambridge</publisher-loc>
          <publisher-name>Polity</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wynne</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Misunderstood misunderstanding: social identities and public uptake of science</article-title>
          <source>Public Underst Sci</source>
          <year>2016</year>
          <volume>1</volume>
          <issue>3</issue>
          <fpage>281</fpage>
          <lpage>304</lpage>
          <pub-id pub-id-type="doi">10.1088/0963-6625/1/3/004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hilgartner</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Science on Stage: Expert Advice as Public Drama</source>
          <year>2000</year>
          <publisher-loc>Stanford</publisher-loc>
          <publisher-name>Stanford University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Felt</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Jasanoff</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SH</given-names>
            </name>
          </person-group>
          <article-title>Keeping technologies out: Sociotechnical imaginaries and the formation of Austria's technopolitical identity</article-title>
          <source>Dreamscapes of Modernity: Sociotechnical Imaginaries and the Fabrication of Power</source>
          <year>2015</year>
          <publisher-loc>Chicago</publisher-loc>
          <publisher-name>University of Chicago Press</publisher-name>
          <fpage>103</fpage>
          <lpage>125</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rogers-Hayden</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pidgeon</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Moving engagement “upstream”? Nanotechnologies and the royal society and royal academy of engineering's inquiry</article-title>
          <source>Public Underst Sci</source>
          <year>2007</year>
          <volume>16</volume>
          <issue>3</issue>
          <fpage>345</fpage>
          <lpage>364</lpage>
          <pub-id pub-id-type="doi">10.1177/0963662506076141</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jasanoff</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Technologies of humility: citizen participation in governing science</article-title>
          <source>Minerva</source>
          <year>2003</year>
          <volume>41</volume>
          <issue>3</issue>
          <fpage>223</fpage>
          <lpage>244</lpage>
          <pub-id pub-id-type="doi">10.1023/A:1025557512320</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jasanoff</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hurlbut</surname>
              <given-names>JB</given-names>
            </name>
          </person-group>
          <article-title>A global observatory for gene editing</article-title>
          <source>Nature</source>
          <year>2018</year>
          <volume>555</volume>
          <issue>7697</issue>
          <fpage>435</fpage>
          <lpage>437</lpage>
          <pub-id pub-id-type="doi">10.1038/d41586-018-03270-w</pub-id>
          <pub-id pub-id-type="medline">29565415</pub-id>
          <pub-id pub-id-type="pii">d41586-018-03270-w</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pellegrino</surname>
              <given-names>ED</given-names>
            </name>
            <name name-style="western">
              <surname>Thomasma</surname>
              <given-names>DC</given-names>
            </name>
          </person-group>
          <article-title>The conflict between autonomy and beneficence in medical ethics: proposal for a resolution</article-title>
          <source>J Contemp Health Law Policy</source>
          <year>1987</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>23</fpage>
          <lpage>46</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://scholarship.law.edu/jchlp/vol3/iss1/5"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Emanuel</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Emanuel</surname>
              <given-names>LL</given-names>
            </name>
          </person-group>
          <article-title>Four models of the physician-patient relationship</article-title>
          <source>JAMA</source>
          <year>1992</year>
          <volume>267</volume>
          <issue>16</issue>
          <fpage>2221</fpage>
          <lpage>2226</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.1992.03480160079038</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Kittay</surname>
              <given-names>EF</given-names>
            </name>
            <name name-style="western">
              <surname>Meyers</surname>
              <given-names>DT</given-names>
            </name>
          </person-group>
          <source>Women and Moral Theory</source>
          <year>1987</year>
          <publisher-loc>Totowa, NJ</publisher-loc>
          <publisher-name>Rowman &#38; Littlefield</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cotterrell</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Trusting in law: legal and moral concepts of trust</article-title>
          <source>Curr Le Probl</source>
          <year>1993</year>
          <volume>46</volume>
          <issue>2</issue>
          <fpage>75</fpage>
          <lpage>95</lpage>
          <pub-id pub-id-type="doi">10.1093/clp/46.part_2.75</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cross</surname>
              <given-names>FB</given-names>
            </name>
          </person-group>
          <article-title>Law and trust</article-title>
          <source>Georgetown Law Journal</source>
          <year>2004</year>
          <volume>93</volume>
          <issue>5</issue>
          <fpage>1457</fpage>
          <lpage>1545</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/298545726_Law_and_trust"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="web">
          <article-title>Trusts</article-title>
          <source>Faculty of Law, University of Oxford</source>
          <access-date>2024-08-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.law.ox.ac.uk/content/trusts">https://www.law.ox.ac.uk/content/trusts</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Powell</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>2 Good faith in contracts 1</article-title>
          <source>Curr Leg Probl</source>
          <year>1956</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>16</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1093/clp/9.1.16</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brownsword</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hird</surname>
              <given-names>NJ</given-names>
            </name>
            <name name-style="western">
              <surname>Howells</surname>
              <given-names>GG</given-names>
            </name>
          </person-group>
          <source>Good Faith in Contract: Concept and Context</source>
          <year>1999</year>
          <publisher-loc>Dartmouth</publisher-loc>
          <publisher-name>Ashgate</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ribstein</surname>
              <given-names>LE</given-names>
            </name>
          </person-group>
          <article-title>Law v. Trust</article-title>
          <source>SSRN</source>
          <year>2001</year>
          <volume>81</volume>
          <issue>3</issue>
          <fpage>553</fpage>
          <lpage>590</lpage>
          <pub-id pub-id-type="doi">10.2139/ssrn.247224</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hult</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Creating trust by means of legislation – a conceptual analysis and critical discussion</article-title>
          <source>Theory Pract Legis</source>
          <year>2018</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1080/20508840.2018.1434934</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greco</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>La Legge Della Fiducia: alle Radici del Diritto</source>
          <year>2021</year>
          <publisher-loc>Laterza</publisher-loc>
          <publisher-name>Roma</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smuha</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed-Rengers</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Harkens</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>MacLaren</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Piselli</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yeung</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>How the EU can achieve legally trustworthy AI: a response to the European Commission's proposal for an artificial intelligence act</article-title>
          <source>SSRN. Preprint posted online August 5,</source>
          <year>2021</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ssrn.com/abstract=3899991"/>
          </comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.3899991</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="web">
          <article-title>Corrigendum</article-title>
          <source>European Parliament</source>
          <year>2024</year>
          <access-date>2024-08-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.europarl.europa.eu/doceo/document/TA-9-2024-0138-FNL-COR01_EN.pdf">https://www.europarl.europa.eu/doceo/document/TA-9-2024-0138-FNL-COR01_EN.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="web">
          <article-title>EUR-Lex: access to European Union law</article-title>
          <source>European Union</source>
          <year>2024</year>
          <access-date>2024-09-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX%3A32017R0745">https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX%3A32017R0745</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="web">
          <article-title>EUR-Lex: access to European Union law</article-title>
          <source>European Union</source>
          <access-date>2024-09-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://eur-lex.europa.eu/eli/reg/2017/746/oj">https://eur-lex.europa.eu/eli/reg/2017/746/oj</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="web">
          <article-title>Products</article-title>
          <source>Radiology Health AI Register</source>
          <access-date>2024-09-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://radiology.healthairegister.com/products/">https://radiology.healthairegister.com/products/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Busch</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kather</surname>
              <given-names>JN</given-names>
            </name>
            <name name-style="western">
              <surname>Johner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Moser</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Bressem</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Navigating the European union artificial intelligence act for healthcare</article-title>
          <source>NPJ Digit Med</source>
          <year>2024</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>210</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-024-01213-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-024-01213-6</pub-id>
          <pub-id pub-id-type="medline">39134637</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-024-01213-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC11319791</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murtagh</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Minion</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Turner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Blell</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ochieng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Murtagh</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Roberts</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Butters</surname>
              <given-names>OW</given-names>
            </name>
            <name name-style="western">
              <surname>Burton</surname>
              <given-names>PR</given-names>
            </name>
          </person-group>
          <article-title>The ECOUTER methodology for stakeholder engagement in translational research</article-title>
          <source>BMC Med Ethics</source>
          <year>2017</year>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedethics.biomedcentral.com/articles/10.1186/s12910-017-0167-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12910-017-0167-z</pub-id>
          <pub-id pub-id-type="medline">28376776</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12910-017-0167-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC5379503</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bloor</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Frankland</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Robson</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <source>Focus Groups in Social Research</source>
          <year>2002</year>
          <publisher-loc>Thousand Oaks</publisher-loc>
          <publisher-name>Sage</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Butters</surname>
              <given-names>OW</given-names>
            </name>
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Minion</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Turner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Murtagh</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Digital methodology to implement the ECOUTER engagement process</article-title>
          <source>F1000Res</source>
          <year>2016</year>
          <volume>5</volume>
          <fpage>1307</fpage>
          <pub-id pub-id-type="doi">10.12688/f1000research.8786.1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Clarke</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>What can "thematic analysis" offer health and wellbeing researchers?</article-title>
          <source>Int J Qual Stud Health Well-being</source>
          <year>2014</year>
          <volume>9</volume>
          <fpage>26152</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.tandfonline.com/doi/10.3402/qhw.v9.26152?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.3402/qhw.v9.26152</pub-id>
          <pub-id pub-id-type="medline">25326092</pub-id>
          <pub-id pub-id-type="pii">26152</pub-id>
          <pub-id pub-id-type="pmcid">PMC4201665</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Terry</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hayfield</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Clarke</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Willig</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Stainton</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Thematic analysis</article-title>
          <source>The SAGE Handbook of Qualitative Research in Psychology</source>
          <year>2017</year>
          <publisher-loc>Thousand Oaks</publisher-loc>
          <publisher-name>Sage</publisher-name>
          <fpage>17</fpage>
          <lpage>37</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Markus</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kors</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rijnbeek</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The role of explainability in creating trustworthy artificial intelligence for health care: a comprehensive survey of the terminology, design choices, and evaluation strategies</article-title>
          <source>J Biomed Inform</source>
          <year>2021</year>
          <volume>113</volume>
          <fpage>103655</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(20)30283-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2020.103655</pub-id>
          <pub-id pub-id-type="medline">33309898</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(20)30283-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abdar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pourpanah</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rezazadegan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ghavamzadeh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fieguth</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Khosravi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Acharya</surname>
              <given-names>Ur</given-names>
            </name>
            <name name-style="western">
              <surname>Makarenkov</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Nahavandi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A review of uncertainty quantification in deep learning: techniques, applications and challenges</article-title>
          <source>Inf Fusion</source>
          <year>2021</year>
          <volume>76</volume>
          <fpage>243</fpage>
          <lpage>297</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.inffus.2021.05.008"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2021.05.008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Merton</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>The normative structure of science</article-title>
          <source>The sociology of science: Theoretical and empirical investigations</source>
          <year>1973</year>
          <publisher-loc>Chicago</publisher-loc>
          <publisher-name>University of Chicago Press</publisher-name>
          <fpage>267</fpage>
          <lpage>278</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bjerring</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Busch</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and patient-centered decision-making</article-title>
          <source>Philos. Technol</source>
          <year>2020</year>
          <volume>34</volume>
          <issue>2</issue>
          <fpage>349</fpage>
          <lpage>371</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-019-00391-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Janzing</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Schölkopf</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <source>Elements of Causal Inference: Foundations and Learning Algorithms</source>
          <year>2017</year>
          <publisher-loc>Cambridge, Massachusetts</publisher-loc>
          <publisher-name>The MIT Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Champendal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dos Reis</surname>
              <given-names>CS</given-names>
            </name>
          </person-group>
          <article-title>A scoping review of interpretability and explainability concerning artificial intelligence methods in medical imaging</article-title>
          <source>Eur J Radiol</source>
          <year>2023</year>
          <volume>169</volume>
          <fpage>111159</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0720-048X(23)00473-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ejrad.2023.111159</pub-id>
          <pub-id pub-id-type="medline">37976760</pub-id>
          <pub-id pub-id-type="pii">S0720-048X(23)00473-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hangel</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Buyx</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fritzsche</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The interrelation of scientific, ethical, and translational challenges for precision medicine with multimodal biomarkers - a qualitative expert interview study in dermatology research</article-title>
          <source>Heliyon</source>
          <year>2024</year>
          <volume>10</volume>
          <issue>13</issue>
          <fpage>e31723</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2405-8440(24)07754-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e31723</pub-id>
          <pub-id pub-id-type="medline">39040296</pub-id>
          <pub-id pub-id-type="pii">S2405-8440(24)07754-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC11260963</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fritzsche</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Buyx</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Hangel</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Mapping ethical and social aspects of biomarker research and its application in atopic dermatitis and psoriasis: a systematic review of reason</article-title>
          <source>J Eur Acad Dermatol Venereol</source>
          <year>2022</year>
          <volume>36</volume>
          <issue>8</issue>
          <fpage>1201</fpage>
          <lpage>1213</lpage>
          <pub-id pub-id-type="doi">10.1111/jdv.18128</pub-id>
          <pub-id pub-id-type="medline">35366351</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="web">
          <article-title>High-level expert group on Artificial Intelligence</article-title>
          <source>European Commission</source>
          <year>2019</year>
          <access-date>2022-11-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ec.europa.eu/futurium/en/system/files/ged/ai_hleg_policy_and_investment_recommendations.pdf">https://ec.europa.eu/futurium/en/system/files/ged/ai_hleg_policy_and_investment_recommendations.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Muehlematter</surname>
              <given-names>UJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bluethgen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vokinger</surname>
              <given-names>KN</given-names>
            </name>
          </person-group>
          <article-title>FDA-cleared artificial intelligence and machine learning-based medical devices and their 510(k) predicate networks</article-title>
          <source>Lancet Digit Health</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>9</issue>
          <fpage>e618</fpage>
          <lpage>e626</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2589-7500(23)00126-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00126-7</pub-id>
          <pub-id pub-id-type="medline">37625896</pub-id>
          <pub-id pub-id-type="pii">S2589-7500(23)00126-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lehman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Arao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sprague</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Buist</surname>
              <given-names>DSM</given-names>
            </name>
            <name name-style="western">
              <surname>Kerlikowske</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Henderson</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Onega</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tosteson</surname>
              <given-names>ANA</given-names>
            </name>
            <name name-style="western">
              <surname>Rauscher</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Miglioretti</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>National performance benchmarks for modern screening digital mammography: update from the breast cancer surveillance consortium</article-title>
          <source>Radiology</source>
          <year>2017</year>
          <volume>283</volume>
          <issue>1</issue>
          <fpage>49</fpage>
          <lpage>58</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/27918707"/>
          </comment>
          <pub-id pub-id-type="doi">10.1148/radiol.2016161174</pub-id>
          <pub-id pub-id-type="medline">27918707</pub-id>
          <pub-id pub-id-type="pmcid">PMC5375631</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rasmussen</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Siersma</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Malmqvist</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Brodersen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Psychosocial consequences of false positives in the Danish lung cancer CT screening trial: a nested matched cohort study</article-title>
          <source>BMJ Open</source>
          <year>2020</year>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>e034682</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmjopen.bmj.com/lookup/pmidlookup?view=long&#38;pmid=32503869"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjopen-2019-034682</pub-id>
          <pub-id pub-id-type="medline">32503869</pub-id>
          <pub-id pub-id-type="pii">bmjopen-2019-034682</pub-id>
          <pub-id pub-id-type="pmcid">PMC7279658</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tamò‐Larrieux</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Guitton</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mayer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lutz</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Regulating for trust: can law establish trust in artificial intelligence?</article-title>
          <source>Regul Gov</source>
          <year>2023</year>
          <volume>18</volume>
          <issue>3</issue>
          <fpage>780</fpage>
          <lpage>801</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/rego.12568"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/rego.12568</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zatloukal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Is human oversight to AI systems still possible?</article-title>
          <source>N Biotechnol</source>
          <year>2025</year>
          <volume>85</volume>
          <fpage>59</fpage>
          <lpage>62</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1871-6784(24)00563-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.nbt.2024.12.003</pub-id>
          <pub-id pub-id-type="medline">39675423</pub-id>
          <pub-id pub-id-type="pii">S1871-6784(24)00563-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>Evidence-Based Medicine Working Group</collab>
          </person-group>
          <article-title>Evidence-based medicine. A new approach to teaching the practice of medicine</article-title>
          <source>JAMA</source>
          <year>1992</year>
          <volume>268</volume>
          <issue>17</issue>
          <fpage>2420</fpage>
          <lpage>2425</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.1992.03490170092032</pub-id>
          <pub-id pub-id-type="medline">1404801</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ferrario</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Loi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Viganò</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>In AI we trust incrementally: a multi-layer model of trust to analyze human-artificial intelligence interactions</article-title>
          <source>Philos Technol</source>
          <year>2019</year>
          <volume>33</volume>
          <issue>3</issue>
          <fpage>523</fpage>
          <lpage>539</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-019-00378-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arbelaez Ossa</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Starke</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lorenzini</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vogt</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Shaw</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Elger</surname>
              <given-names>BS</given-names>
            </name>
          </person-group>
          <article-title>Re-focusing explainability in medicine</article-title>
          <source>Digit Health</source>
          <year>2022</year>
          <volume>8</volume>
          <fpage>20552076221074488</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076221074488?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076221074488</pub-id>
          <pub-id pub-id-type="medline">35173981</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076221074488</pub-id>
          <pub-id pub-id-type="pmcid">PMC8841907</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lekadir</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>Alejandro F</given-names>
            </name>
            <name name-style="western">
              <surname>Porras</surname>
              <given-names>Antonio R</given-names>
            </name>
            <name name-style="western">
              <surname>Glocker</surname>
              <given-names>Ben</given-names>
            </name>
            <name name-style="western">
              <surname>Cintas</surname>
              <given-names>Celia</given-names>
            </name>
            <name name-style="western">
              <surname>Langlotz</surname>
              <given-names>Curtis P</given-names>
            </name>
            <name name-style="western">
              <surname>Weicken</surname>
              <given-names>Eva</given-names>
            </name>
            <name name-style="western">
              <surname>Asselbergs</surname>
              <given-names>Folkert W</given-names>
            </name>
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>Fred</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>Gary S</given-names>
            </name>
            <name name-style="western">
              <surname>Kaissis</surname>
              <given-names>Georgios</given-names>
            </name>
            <name name-style="western">
              <surname>Tsakou</surname>
              <given-names>Gianna</given-names>
            </name>
            <name name-style="western">
              <surname>Buvat</surname>
              <given-names>Irène</given-names>
            </name>
            <name name-style="western">
              <surname>Kalpathy-Cramer</surname>
              <given-names>Jayashree</given-names>
            </name>
            <name name-style="western">
              <surname>Mongan</surname>
              <given-names>John</given-names>
            </name>
            <name name-style="western">
              <surname>Schnabel</surname>
              <given-names>Julia A</given-names>
            </name>
            <name name-style="western">
              <surname>Kushibar</surname>
              <given-names>Kaisar</given-names>
            </name>
            <name name-style="western">
              <surname>Riklund</surname>
              <given-names>Katrine</given-names>
            </name>
            <name name-style="western">
              <surname>Marias</surname>
              <given-names>Kostas</given-names>
            </name>
            <name name-style="western">
              <surname>Amugongo</surname>
              <given-names>Lameck M</given-names>
            </name>
            <name name-style="western">
              <surname>Fromont</surname>
              <given-names>Lauren A</given-names>
            </name>
            <name name-style="western">
              <surname>Maier-Hein</surname>
              <given-names>Lena</given-names>
            </name>
            <name name-style="western">
              <surname>Cerdá-Alberich</surname>
              <given-names>Leonor</given-names>
            </name>
            <name name-style="western">
              <surname>Martí-Bonmatí</surname>
              <given-names>Luis</given-names>
            </name>
            <name name-style="western">
              <surname>Cardoso</surname>
              <given-names>M Jorge</given-names>
            </name>
            <name name-style="western">
              <surname>Bobowicz</surname>
              <given-names>Maciej</given-names>
            </name>
            <name name-style="western">
              <surname>Shabani</surname>
              <given-names>Mahsa</given-names>
            </name>
            <name name-style="western">
              <surname>Tsiknakis</surname>
              <given-names>Manolis</given-names>
            </name>
            <name name-style="western">
              <surname>Zuluaga</surname>
              <given-names>Maria A</given-names>
            </name>
            <name name-style="western">
              <surname>Fritzsche</surname>
              <given-names>Marie-Christine</given-names>
            </name>
            <name name-style="western">
              <surname>Camacho</surname>
              <given-names>Marina</given-names>
            </name>
            <name name-style="western">
              <surname>Linguraru</surname>
              <given-names>Marius George</given-names>
            </name>
            <name name-style="western">
              <surname>Wenzel</surname>
              <given-names>Markus</given-names>
            </name>
            <name name-style="western">
              <surname>De Bruijne</surname>
              <given-names>Marleen</given-names>
            </name>
            <name name-style="western">
              <surname>Tolsgaard</surname>
              <given-names>Martin G</given-names>
            </name>
            <name name-style="western">
              <surname>Goisauf</surname>
              <given-names>Melanie</given-names>
            </name>
            <name name-style="western">
              <surname>Cano Abadía</surname>
              <given-names>Mónica</given-names>
            </name>
            <name name-style="western">
              <surname>Papanikolaou</surname>
              <given-names>Nikolaos</given-names>
            </name>
            <name name-style="western">
              <surname>Lazrak</surname>
              <given-names>Noussair</given-names>
            </name>
            <name name-style="western">
              <surname>Pujol</surname>
              <given-names>Oriol</given-names>
            </name>
            <name name-style="western">
              <surname>Osuala</surname>
              <given-names>Richard</given-names>
            </name>
            <name name-style="western">
              <surname>Napel</surname>
              <given-names>Sandy</given-names>
            </name>
            <name name-style="western">
              <surname>Colantonio</surname>
              <given-names>Sara</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>Smriti</given-names>
            </name>
            <name name-style="western">
              <surname>Klein</surname>
              <given-names>Stefan</given-names>
            </name>
            <name name-style="western">
              <surname>Aussó</surname>
              <given-names>Susanna</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>Wendy A</given-names>
            </name>
            <name name-style="western">
              <surname>Salahuddin</surname>
              <given-names>Zohaib</given-names>
            </name>
            <name name-style="western">
              <surname>Starmans</surname>
              <given-names>Martijn P A</given-names>
            </name>
            <collab>FUTURE-AI Consortium</collab>
          </person-group>
          <article-title>FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare</article-title>
          <source>BMJ</source>
          <year>2025</year>
          <month>02</month>
          <day>05</day>
          <volume>388</volume>
          <fpage>e081554</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bmj.com/lookup/pmidlookup?view=long&#38;pmid=39909534"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj-2024-081554</pub-id>
          <pub-id pub-id-type="medline">39909534</pub-id>
          <pub-id pub-id-type="pmcid">PMC11795397</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giesen</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The patient's right to know--a comparative law perspective</article-title>
          <source>Med Law</source>
          <year>1993</year>
          <volume>12</volume>
          <issue>6-8</issue>
          <fpage>553</fpage>
          <lpage>565</lpage>
          <pub-id pub-id-type="medline">8183063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Inkeroinen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Virtanen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Stolt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leino-Kilpi</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Patients' right to know: a scoping review</article-title>
          <source>J Clin Nurs</source>
          <year>2023</year>
          <volume>32</volume>
          <issue>15-16</issue>
          <fpage>4311</fpage>
          <lpage>4324</lpage>
          <pub-id pub-id-type="doi">10.1111/jocn.16603</pub-id>
          <pub-id pub-id-type="medline">36550593</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fricker</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Kidd</surname>
              <given-names>IJ</given-names>
            </name>
            <name name-style="western">
              <surname>Medina</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pohlhaus</surname>
              <given-names>GJ</given-names>
            </name>
          </person-group>
          <article-title>Evolving concepts of epistemic injustice</article-title>
          <source>The Routledge handbook of epistemic injustice</source>
          <year>2017</year>
          <publisher-loc>United Kingdom</publisher-loc>
          <publisher-name>Routledge</publisher-name>
          <fpage>53</fpage>
          <lpage>60</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carel</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kidd</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Epistemic injustice in healthcare: a philosophial analysis</article-title>
          <source>Med Health Care Philos</source>
          <year>2014</year>
          <volume>17</volume>
          <issue>4</issue>
          <fpage>529</fpage>
          <lpage>540</lpage>
          <pub-id pub-id-type="doi">10.1007/s11019-014-9560-2</pub-id>
          <pub-id pub-id-type="medline">24740808</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pozzi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Durán</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>From ethics to epistemology and back again: informativeness and epistemic injustice in explanatory medical machine learning</article-title>
          <source>AI &#38; Soc</source>
          <year>2024</year>
          <volume>40</volume>
          <issue>2</issue>
          <fpage>299</fpage>
          <lpage>310</lpage>
          <pub-id pub-id-type="doi">10.1007/s00146-024-01875-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Perdomo Reyes</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Injusticia epistémica y reproducción de sesgos de género en la inteligencia artificial</article-title>
          <source>Rev Iberoam Cienc Tecnol Soc</source>
          <year>2024</year>
          <volume>19</volume>
          <issue>56</issue>
          <fpage>89</fpage>
          <lpage>100</lpage>
          <pub-id pub-id-type="doi">10.52712/issn.1850-0013-555</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fajtl</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Welikala</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Barman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chambers</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bolter</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Olvera-Barrios</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shakespeare</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Egan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Owen</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Tufail</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rudnicka</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>Trustworthy evaluation of clinical AI for analysis of medical images in diverse populations</article-title>
          <source>NEJM AI</source>
          <year>2024</year>
          <volume>1</volume>
          <issue>9</issue>
          <pub-id pub-id-type="doi">10.1056/aioa2400353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kemp</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Trigg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Beatty</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dhillon</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Maeder</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>PAH</given-names>
            </name>
            <name name-style="western">
              <surname>Koczwara</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Health literacy, digital health literacy and the implementation of digital health technologies in cancer care: the need for a strategic approach</article-title>
          <source>Health Promot J Austr</source>
          <year>2021</year>
          <volume>32 Suppl 1</volume>
          <fpage>104</fpage>
          <lpage>114</lpage>
          <pub-id pub-id-type="doi">10.1002/hpja.387</pub-id>
          <pub-id pub-id-type="medline">32681656</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rezai</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dou</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>A survey on trustworthiness in foundation models for medical image analysis</article-title>
          <source>arXiv:240715851</source>
          <year>2024</year>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
