<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e83407</article-id><article-id pub-id-type="doi">10.2196/83407</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Anticipating Moral and Economic Considerations, Opportunities, and Potential Frictions for AI in Medical Imaging: Multistakeholder Cocreation Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Schilder</surname><given-names>Martin Bastiaan</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Keyser</surname><given-names>Alexandra</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>van Hees</surname><given-names>Susan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sbrizzi</surname><given-names>Alessandro</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Boon</surname><given-names>Wouter Pieter Christiaan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Computational Imaging Group for MR Therapy and Diagnostics, Center for Image Sciences, University Medical Center Utrecht</institution><addr-line>Heidelberglaan 100</addr-line><addr-line>Utrecht</addr-line><country>The Netherlands</country></aff><aff id="aff2"><institution>Faculty of Geosciences, Copernicus Institute of Sustainable Development, Utrecht University</institution><addr-line>Utrecht</addr-line><country>The Netherlands</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Stone</surname><given-names>Alicia</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Zhang</surname><given-names>Jun</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Shingru</surname><given-names>Pratik</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Martin Bastiaan Schilder, MSc, Computational Imaging Group for MR Therapy and Diagnostics, Center for Image Sciences, University Medical Center Utrecht, Heidelberglaan 100, Utrecht, 3508GA, The Netherlands, 31 887569270; <email>m.b.schilder-2@umcutrecht.nl</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>25</day><month>2</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e83407</elocation-id><history><date date-type="received"><day>02</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>31</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Martin Bastiaan Schilder, Alexandra Keyser, Susan van Hees, Alessandro Sbrizzi, Wouter Pieter Christiaan Boon. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 25.2.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e83407"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI) promises to significantly impact daily radiology practices. Numerous studies have already been conducted that anticipate this potentially disruptive innovation. So far, these studies have mainly focused on single topics, such as &#x201C;trust,&#x201D; or investigating perspectives of single stakeholder groups, such as &#x201C;radiologists.&#x201D;</p></sec><sec><title>Objective</title><p>This study aims to explore future directions for AI in radiology by incorporating perspectives of a heterogeneous group of stakeholders on a broad spectrum of moral and economic topics. It also aims to cocreate and reflect with a broad range of stakeholders on viable implementation scenarios for scalable AI applications in radiology in the Netherlands, thereby identifying potential opportunities and frictions, with a focus on moral and economic considerations.</p></sec><sec sec-type="methods"><title>Methods</title><p>To inform the workshop design, a nonsystematic narrative literature search was performed to deepen our understanding of key moral and economic considerations at play in the field of radiology and AI. Workshop participants, representing a wide range of actors including radiologists, innovators, and patient representatives, were selected using purposive sampling. Data were collected in a cocreation workshop. In 3 subsequent rounds, mixed over 3 breakout groups, a total of 17 participants were asked to (1) map what they considered important moral and economic considerations, (2) envision possible future scenarios for AI in radiology, and (3) discuss opportunities, frictions, and routes to success. Transcribed recordings were coded and cross-checked.</p></sec><sec sec-type="results"><title>Results</title><p>Workshop participants envision future AI-driven scenarios, ranging from extramural imaging departments for increased accessibility to health care, to multimodal data integration for human-centered AI-enhanced diagnostics. Seven themes emerge from the discussions during the workshop: (1) trust and efficiency of AI technologies, (2) responsibilities in clinical decision-making when AI is involved, (3) diagnosis as a one-off versus an iterative process, (4) regulations as a requirement or a restriction, (5) economic benefits or drawbacks, (6) trade-off between amount of information required and patient privacy, and (7) environmental considerations.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Reflecting on the 7 emerging themes, we identify three overarching topics: (1) human-AI collaboration and trust, (2) governance, regulation, and ethical safeguards, and (3) value creation and sustainability. These topics highlight the need to balance technological advancements with ethical responsibility, institutional accountability, and societal benefit. They also underscore the importance of designing AI systems that not only perform well but are also trusted and aligned with clinical workflows and patient values. These overarching themes offer a lens through which future research and policy can navigate the complex interplay between innovation, regulation, and real-world implementation. Future research is needed to validate the generalizability of the results across various countries and health care settings.</p></sec></abstract><kwd-group><kwd>cocreation</kwd><kwd>stakeholder perspectives</kwd><kwd>radiology</kwd><kwd>artificial intelligence</kwd><kwd>responsible research and innovation</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>As health care systems worldwide face increasing demand and complexity, the integration of advanced technologies such as artificial intelligence (AI) is gaining momentum. Many clinical experts and sector specialists consider AI to have transformative potential for the field of radiology [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. They argue, for instance, that AI offers the potential to enhance diagnostic accuracy, to support early disease detection, and to personalize patient care through applications such as image segmentation, anomaly detection, and predictive modeling [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. These applications could automate routine tasks and uncover subtle patterns that can be missed by the human eye [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref8">8</xref>] and help radiologists to focus more on complex clinical decisions such as diagnosing intricate disease presentations [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>There is a growing body of literature discussing the promises of AI in radiology from different perspectives. From a moral perspective, many scholars propose AI to contribute to safer, more patient-centered care by reducing diagnostic errors and unnecessary interventions [<xref ref-type="bibr" rid="ref9">9</xref>]. Economically, proponents argue that AI can contribute to streamlining radiology workflows by improving productivity, shortening turnaround times, and lowering health care costs through early detection of disease and more efficient resource allocation [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. Alongside these promises, scholars warn that we should remain sensitive to risks and challenges, such as algorithm opacity and profit-driven motives, created by an emerging role of AI in health care [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>Despite these promises, much of the existing research on the role of AI in radiology remains limited in scope. So far, various studies have explored AI in radiology, mostly from a single perspective, such as trust or acceptance [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Moreover, existing research has primarily explored these perspectives within only a single stakeholder group or a limited range of groups, such as radiologists, innovators, or patients [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. These studies also have predominantly applied retrospective methods, such as surveys and questionnaires [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. Although these earlier studies offer valuable insights, there is a need for research broadening the perspectives and stakeholders. Adopting a broader approach enables the examination of interactions among stakeholders while situating their perspectives within the wider set of trade-offs and contextual factors.</p></sec><sec id="s1-2"><title>Objective</title><p>The study aimed to cocreate and reflect with a broad range of stakeholders on viable implementation scenarios for scalable AI applications in radiology in the Netherlands, thereby identifying potential opportunities and frictions, with a focus on moral and economic considerations. Drawing on insights from responsible innovation, we endeavored to include a wide variety of views in an early stage of the emergence of a technology, anticipating potential implications and frictions, aiming to increase chances for successful innovations [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. The inclusion of varied perspectives, especially those of patients [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>], has been widely advocated as essential to shaping relevant research agendas and outcomes [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. Early-stage engagement should increase opportunities for aligning technology development not only with clinical needs and regulatory requirements but also to include current experiences and care practices, anticipate expectations and underlying values [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref33">33</xref>], and answer questions regarding commercialization [<xref ref-type="bibr" rid="ref34">34</xref>]. Engaging stakeholders encourages iterative feedback, increasing the chance of AI systems to evolve alongside real-world needs and cultivate shared ownership of the innovation process [<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>During a cocreation workshop, stakeholders were asked to build concrete implementation scenarios for AI in imagined future intra- and extramural settings and to reflect on expected opportunities and potential frictions created by moral and economic considerations. The forward-looking, participatory approach that we used contributed to a more comprehensive understanding of possible future pathways, which we present in the &#x201C;Results&#x201D; section. Subsequently, we describe the 7 themes that emerged from these scenarios, touching on various moral and economic considerations, involving among others the role of the human (radiologist and patients), as well as issues related to regulations, economic considerations, and the environmental impact. Finally, the &#x201C;Discussion&#x201D; section further reflects on these themes, subsequently concluding with 3 key implications for futures of AI in radiology.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>Our research approach reflects starting points from responsible innovation. We started with a nonsystematic narrative literature search to identify existing topics and gaps in the scientific discourse. These topics served as input for the cocreation workshop in which future scenarios for AI in radiology were developed. In the workshop, we took an explorative, anticipatory, and cocreation approach, in which stakeholders were allowed to raise and reflect on interconnected topics, rather than addressing these topics in isolation.</p></sec><sec id="s2-2"><title>Identification of Workshop Topics</title><p>To gain an initial understanding and to provide the research team with context of the recurring topics and stakeholders in the field of AI, radiology, and business models, we conducted a nonsystematic narrative literature search aimed at identifying key topics and trends in the field [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. In light of the fact that we aimed for a broad group of topics and stakeholders, we were a priori interested in both moral and economic considerations. Resultingly, our approach included keyword-based searches in primarily PubMed and Google Scholar using search terms such as &#x201C;Radiology,&#x201D; &#x201C;AI,&#x201D; &#x201C;Transparency,&#x201D; and &#x201C;Value proposition.&#x201D; These terms were iteratively refined as understanding of the topic evolved and, in the end, also included &#x201C;Algorithmic fairness&#x201D; and &#x201C;Revenue streams&#x201D; as search terms, as is common practice in a narrative review [<xref ref-type="bibr" rid="ref36">36</xref>]. We also used forward and backward snowballing from key publications to capture additional influential works. In addition, we browsed the digital archives of leading journals in the field, including <italic>Journal of Medical Internet Research</italic> (and relevant sister journals), <italic>European Radiology</italic>, and <italic>Insights into Imaging</italic>. This flexible and exploratory search strategy suited our objective of mapping conceptual developments and identifying recurrent topics across the literature rather than providing an exhaustive or systematic synthesis. <xref ref-type="other" rid="box1">Textbox 1</xref> provides the main findings of this review.</p><boxed-text id="box1"><title> Topics covering moral and economic considerations coming from the literature search.</title><p>In the literature search, we focused on artificial intelligence (AI), radiology, and business models, and both moral and economic considerations came forward. Radiologists increasingly rely on digital tools, including AI-based tools, for various functions [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. This raises moral and economic concerns related to discussions about, for example, trust between the patient and physician [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>], supervision and accountability in clinical decision-making [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>], lacking or unclear reimbursement structures [<xref ref-type="bibr" rid="ref42">42</xref>], and high upfront costs [<xref ref-type="bibr" rid="ref43">43</xref>]. In these discussions, some recurring topics came forward.</p><p>Trust is often mentioned in literature as playing a critical role in the field of radiology. Where, traditionally, trust between a patient and a physician was deemed important, another dimension is added with the introduction of AI, and the definition of trust is increasingly broadened to both the clinicians&#x2019; and patients&#x2019; trust in the technology&#x2019;s performance and reliability [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref49">49</xref>]. However, trust is a complex concept that relates to interconnected discussions on, among others, fairness, safety, explainability, and transparency [<xref ref-type="bibr" rid="ref50">50</xref>]. To increase fairness and safeguard vulnerable populations, managing risks is essential [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. AI models often reflect biases inherent to insufficiently diverse training datasets [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Resultingly, model performance can deteriorate if applied to a different population. Explainability and transparency are fundamental to building trust in AI systems [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>] and can be considered key fairness strategies to mitigate bias and improve accountability [<xref ref-type="bibr" rid="ref30">30</xref>]. Here, the distinction is that interpretability means that a radiologist can understand and make sense of the outputs of the AI system, whereas explainability means that the AI system also provides the radiologist with a rationale or reasoning behind the results [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>Despite their importance, many AI models suffer from the &#x201C;black box problem,&#x201D; where decision-making processes can be opaque even to developers [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. Obstruction of error identification through a lack of transparency compromises trust and accountability [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. The General Data Protection Regulation mandates transparency and explainability in automated decision-making, but many systems struggle to meet this standard [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. On the topic of data, while the General Data Protection Regulation, for instance, grants patients ownership over personal data through, for example, obliging data users to obtain informed consent, uncertainties persist regarding data reuse and consent withdrawal [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>]. Ownership thus means that a person or legal entity has the full and inherent right to decide what happens with the data without requiring consent from anyone else. By contrast, controls refer to the delegated ability to use or manage data within boundaries agreed upon by the owner, granted through informed consent. Developers, users, and health care providers must navigate complex questions about responsible data storage and use [<xref ref-type="bibr" rid="ref49">49</xref>]. Transparency in data management could further support trust, yet proprietary algorithms and cross-disciplinary misalignments between developers and health care practitioners create significant challenges [<xref ref-type="bibr" rid="ref58">58</xref>]. It has therefore been advocated that radiologists must develop new skills to effectively collaborate with AI, which requires additional training [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>] and potentially a shift in roles and responsibilities [<xref ref-type="bibr" rid="ref61">61</xref>]. Concerns about overreliance on AI systems, loss of diagnostic expertise, and shifts in job satisfaction are frequently noted [<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref64">64</xref>]. Additionally, unresolved issues in assigning accountability for AI-assisted errors complicate the integration into clinical workflows, raising questions about liability and autonomy among developers, users, and insurers [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>].</p><p>Business models for AI applications in radiology outlined in the literature reviewed include perpetual licensing, pay-per-use, and subscription-based models [<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref66">66</xref>], with most companies offering subscription or hybrid pricing models [<xref ref-type="bibr" rid="ref65">65</xref>]. Deployment and pricing strategies have not yet converged to a preferred or market-dominant standard, and most vendors offer multiple options. Literature discusses the shift of software as a medical device from product-centric to platform-based models [<xref ref-type="bibr" rid="ref67">67</xref>]. This idea of platformization could potentially facilitate easier (vendor-neutral) compatibility and integrability within existing hospital infrastructure [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref69">69</xref>]. In essence, all businesses in the medical software industry operate in a highly dynamic environment with constant challenges, including disruptive innovation, strict regulations, sustainability concerns, and uncertainties [<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. To navigate this landscape and mitigate risk, a more dynamic business model has been proposed, tailored to the specific demands of the software as medical device domain. Long-term stability of business models can be achieved through diversified revenue streams and optimizing operational efficiency, regardless of market conditions [<xref ref-type="bibr" rid="ref71">71</xref>]. Hospitals also seek ways to fund AI-supported services. Some sources point to missing reimbursement structures or high upfront costs, hindering adoption [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. There is, however, a need for more research on sustainable business models for hospitals that balance adoption costs with strong value propositions and reimbursement frameworks [<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref73">73</xref>] and value assessment through real-world clinical validation [<xref ref-type="bibr" rid="ref74">74</xref>].</p></boxed-text><p>The insights from this nonsystematic narrative literature search were synthesized into topics, prompts, and discussion points, which served as input for the cocreation workshop, and they were used to create workshop canvasses. The workshop canvasses can be found in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref>. These canvases guided participant engagement and helped ensure that the workshop addressed research-relevant issues by suggesting conversation topics, while remaining flexible for participant input.</p></sec><sec id="s2-3"><title>Workshop Design and Participants</title><p>Since it is argued that integration of AI into radiology may provide a transformative opportunity for health care delivery, we further explored opportunities for AI in radiology in a multistakeholder workshop. Workshop participants were selected using purposive sampling to ensure the inclusion of individuals relevant to the research objectives. We aimed to include participants from diverse stakeholder backgrounds. As a point of departure, participants from an earlier workshop with a diverse panel of stakeholder backgrounds [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref61">61</xref>] were approached first. In addition, we explicitly looked for expertise and relevant types of stakeholders that were missing, were underrepresented, and regarded as important in relation to the workshop&#x2019;s themes. This search included reaching out to contacts in the authors&#x2019; own network, for example, through LinkedIn and institutional websites. Prospective participants were also allowed to suggest relevant candidates from their own network. The patient representatives were members of the hospital patient council and were approached by hospital staff for participation. Participation was voluntary aside from a compensation for travel-related expenses. Potential selection bias was balanced by having participants representing very different perspectives.</p><p>In the workshop, patient representatives (n=2), radiologists (n=2), laboratory technicians (n=2), a technology developer (n=1), clinical researchers (n=2), industry employees (imaging hardware company employee: n=2; medical software company employee: n=1), a funding body employee (n=1), a clinical physicist (n=1), a business developer (n=1), and social scientists (n=2) participated, totaling 17 participants. The sessions were moderated by the researchers (MBS, SH, and WB). Next to discussing from their expert point of view, all participants were also invited to reflect on questions from a general citizen&#x2019;s point of view, allowing for flexible conversations between the participants.</p></sec><sec id="s2-4"><title>Ethical Considerations</title><p>The research protocol was assessed and exempted from medical ethical review by the Medical Ethics Board of University Medical Center Utrecht (Declaration METC no. 22&#x2010;475/DB, d.d. 1 March 2022) [<xref ref-type="bibr" rid="ref75">75</xref>]. This exemption was determined in accordance with the Dutch Medical Research Involving Human Subjects Act and the definitions and guidance provided by the Central Committee on Research Involving Human Subjects [<xref ref-type="bibr" rid="ref76">76</xref>]. All participants gave written and verbal informed consent prior to the workshop and had the opportunity to react to a written workshop report after the workshop. In the analysis, the data were coded, meaning that participants received a study number. In this paper, we refer to the participants with that same code, although sometimes referring to their respective stakeholder background. Patient representatives received compensation for travel-related expenses and were offered a voucher of &#x20AC;25 (approximately US $27 at the time of the workshop).</p></sec><sec id="s2-5"><title>Data Collection</title><p>The cocreation workshop took place in June 2024 and lasted 3 hours. Participants were asked in three rounds to (1) map and reflect on moral and economic considerations they considered important when thinking about the field of radiology and AI, (2) cocreate concrete scenarios, envisioning plausible futures for radiology with AI in the not too long future (year 2040), to elaborate on the broader implications of their considerations to the field, and (3) think about opportunities and potential frictions related to these scenarios and how to deal with or prevent them.</p><p>The 15-year time frame was chosen to allow participants to envision a future where significant transformation could happen, while still being close enough to make the ideas feel actionable. We asked our participants to envision scenarios <italic>postadoption</italic> of AI in radiology, intentionally probing the workshop participants not to focus on pragmatic topics, such as hardware requirements, because our aim was to explore strategic and value-driven perspectives rather than operational constraints. We wanted participants to think beyond immediate technical hurdles and envision how AI could reshape workflows, roles, and patient care once adoption challenges have been resolved. In 3 breakout groups, each with 5 or 6 participants, scenarios were cocreated and discussed. In round 1, the participants were asked to discuss their respective considerations in groups of 2 or 3 participants. For rounds 2 and 3, we provided the participants with a worksheet canvas to guide the discussion. The questions on the canvasses were loosely based on the topics we found in our literature search (<xref ref-type="other" rid="box1">Textbox 1</xref>). The canvasses can be found in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref>. We explicitly told participants that these sheets could be used as inspiration for conversations, and if other ideas came to mind, that they were free to explore them. No further prompts were provided, except for those related to time management of the workshop. If questions on interpretation of the canvasses came forward, the session moderators answered the question neutrally and aimed not to steer the discussion in a certain direction. In case of dissent between the participants, the session moderators did not intervene, as disagreements between the participants could potentially reveal interesting frictions between various stakeholder backgrounds. All discussions were audio recorded, the filled-out worksheet canvasses were stored, and notes were taken by the researchers (MBS, SH, and WB). A more elaborate quotebook is provided in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p></sec><sec id="s2-6"><title>Data Analysis</title><p>The workshop was held in Dutch. All audio data were automatically transcribed using Amberscript (Amberscript BV), reread, checked, and corrected where necessary. Transcripts were then automatically translated to English using Microsoft Word (Microsoft Corporation), again reread, checked, and corrected where necessary. The transcripts were independently coded. MBS and SH, as native Dutch speakers, coded the original transcripts in order to stay close to the original meaning, and AK, as a native English speaker, coded the translated transcripts. The codes were subsequently compared and refined using qualitative analysis software NVivo (version 14; Lumivero). Data were analyzed using a semi-inductive coding approach, guided by predefined categories, but remaining flexible to the identification of emergent codes as new themes arose from the data. The initial codes were related to the topics we identified in the literature search and that we described in <xref ref-type="other" rid="box1">Textbox 1</xref>, namely, function, performance, supervision or autonomy, responsibility, explainability, transparency, training and job roles, algorithm performance, user and patient security, data management, accountability, bias, value assessment, business model aspects, value proposition, funding, platformization, and compatibility and integrability. Additionally, during the analysis, we found that &#x201C;patient communication&#x201D; was emerging as a new and recurring topic. This code was subsequently added to the codebook. Our analysis focused on moral and economic considerations, opportunities, and frictions, which materialized in the scenarios. To conform with scientific rigor, consistency between the researchers was ensured, as the researchers discussed their findings after initially independently coding the transcripts. That is, these peer debriefing sessions aided in ensuring that our interpretations were firmly grounded in the data. During these peer debriefing sessions, the researchers systematically compared their coding decisions for each transcript, focusing on whether the chosen codes accurately captured the meaning of the data and whether the interpretations were consistent across researchers. They reviewed the rationale behind each assigned code, discussed eventual discrepancies, and clarified ambiguous segments. Disagreements were openly discussed, and alternative perspectives were considered before reaching a shared interpretation, ensuring that the final coding reflected consensus. This iterative dialogue served to refine the coding framework and enhance the credibility of the analysis. Furthermore, a workshop report inspired by a preliminary analysis of the data was shared with the participants, providing them with an opportunity to disagree with the researchers&#x2019; interpretation of the data or add thoughts that came after the workshop.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Cocreation Workshop</title><p>In this section, we share the findings, drawing on insights from the multistakeholder cocreation workshop. Many topics came forward, including questions regarding sharing or balancing responsibilities between humans and technology, power and influence in relation to data, ownership, access and trust, and questions regarding costs and payments. Developing and reflecting on the scenarios helped identify expectations. Different stakeholders brought forward different perspectives in the discussions. For example, in groups that included a patient representative, the importance of having a human radiologist involved came forward.</p><p>Most participants shared the expectation that between now and 2040 much will change and probably quite rapidly, referring to fast developments surrounding general purpose AI tools such as ChatGPT. Participants in our multistakeholder workshop considered in their scenario either intramural improvement of radiology processes or the extramural screening and early detection of disease. This resulted in 3 scenarios sketched out in the next paragraphs: AI copilot, scanner on tour, and Alzheimer disease (AD) screening. We present each scenario following a similar structure; first, we report what participants believe could be a good use case of AI (<italic>materialization</italic>), after which we share their <italic>moral and economic considerations</italic>, <italic>opportunities,</italic> and <italic>frictions</italic>. As a way of analysis, we highlight emerging themes from the scenarios.</p></sec><sec id="s3-2"><title>The 3 Scenarios</title><sec id="s3-2-1"><title>Scenario 1. Partner in Diagnosis: Envisioning AI as the Radiologist&#x2019;s Trusted Copilot</title><sec id="s3-2-1-1"><title>Materialization</title><p>Participants envisioned that in 2040 AI would be integrated into every hospital&#x2019;s radiology department, acting as a &#x201C;copilot&#x201D; to assist radiologists. Radiologists may grow from an executing role to a more supervisory role, sometimes required only to approve the AI&#x2019;s suggestion. In this scenario, AI can aid in the noninvasive characterization of a tumor, enriching radiological findings on computed tomography or magnetic resonance imaging (MRI) with a description of histology and tumor biology without requiring surgical procedures, such as biopsies. Or, in case a rich collection of patient data is already available, AI can integrate such multimodal data:</p><disp-quote><p>...also the integration with biomarkers, with pathology with those [foundational models], and the EHR [Electronic Health Record], gives a much more complete picture than just with those images.&#x201D; the workshop participant continued, arguing how that despite all clinicians already work together in the same system, she expects AI to be an enabler of actual integration : &#x201C;(...) I think that integration into AI of all those different things will also become very strong.</p><attrib>g3, s6</attrib></disp-quote><p>AI in this use case supports the radiologist with additional insights that can be used for improved diagnosis or treatment response monitoring. According to the participants, the added value can be demonstrated in independent clinical studies in which researchers, clinicians, and ethical evaluation boards should have their usual role. Explicating added value incentivizes medical professionals to include AI in guidelines and insurance companies to reimburse its use. AI can subsequently be used to collect relevant patient data and provide comprehensive interpretations, whereas the radiologist retains ultimate responsibility for diagnosis and treatment decisions.</p><p>Participants thought that software developers should be in the lead in setting up a quality management system. They can supply their product, for example, through a pay-per-view business model, which is integrated into Picture Archiving and Communication System or electronic health record along with other vendors&#x2019; products. Users were regarded as being in the lead for evaluating real-world, postmarket applications. Although data have the potential to be part of business models, the participants thought that patients should retain ownership of their data, emphasizing that these data should be anonymous and nontraceable.</p></sec><sec id="s3-2-1-2"><title>Moral and Economic Considerations</title><p>Participants deemed AI in radiology necessary to support the radiologist and underlined its potential, considering the hours spent on, for example, follow-up MRI scans without visible changes to the imaged pathology. Requirements formulated for this scenario were as follows: (1) The treating physician always remains responsible for the patient&#x2019;s health. Since human experts should always remain in control, physicians remain responsible for communicating with patients. (2) AI should benefit the patients and lead to improved health, while applying to as many patients as possible. (3) Trust in AI can be increased by observing well-performing models that provide added diagnostic value, which can convince radiologists to use and fully adopt these models. Trust in AI was considered a means of adoption for this technology. (4) The industry should focus on user-friendliness and insightful cost-benefit analyses to improve adoption. (5) Besides clear added value in the context of diagnosis and treatment, AI should have a positive cost-benefit balance.</p></sec><sec id="s3-2-1-3"><title>Opportunities</title><p>Assistance to radiologists was expected to add value by improving efficiency and accuracy in ongoing patient care and monitoring. Furthermore, added value was expected by the premise that AI could seamlessly merge multimodal data, contributing to disease detection, characterization of lesions, and monitoring treatment. This should add value, lead to lower health care costs, improve patient outcomes, or a combination of the aforementioned. Hospitals and health insurance companies benefit due to process optimization, resulting in lower costs and value-adding health care provision.</p></sec><sec id="s3-2-1-4"><title>Frictions</title><p>With AI as a copilot assisting, and sometimes even taking over from the radiologist, the chance of discrepancies arises. On the one hand, participants indicated that AI is likely to make fewer mistakes than a radiologist and that AI should be trusted. On the other hand, in case a radiologist reaches a wrong conclusion with the suggestion of AI, the radiologist should not be allowed to hide behind the output of the AI, which creates a friction between efficient use of AI and increased responsibility in case of mistakes. As a solution, the participants suggested that the radiologist always retains the ultimate responsibility for the conclusion of the radiology report. This still requires checking the output, but was it considered faster than going through multiple series of images? For patients, it was envisioned that AI would lead to increased demand for data, which could lead to unwanted use of personal data. To prevent this, data users should receive explicit and flexible consent, meaning that data can be used only for the purpose and duration that a patient allows. Furthermore, introducing AI in radiology was expected to increase the detection of incidental findings, many of which may lack clear clinical significance. This raises complex ethical challenges, particularly when balancing a patient&#x2019;s right not to know with the moral obligation to share potentially medically important findings. Participants in our study emphasized the importance of discussing the right not to know with the patients prior to imaging, as incidental findings can impose a psychological burden. They also stressed the necessity of keeping a human doctor in the loop, as the moral and clinical judgment required in these situations exceeded their expected capabilities of current AI systems and the demand for expertise.</p></sec></sec></sec><sec id="s3-3"><title>Scenario 2. Health Without Walls: AI and the Rise of (Mobile) Multimodal Screening</title><sec id="s3-3-1"><title>Materialization</title><p>The participants developed 2 separate ideas of extramural health checks: in one, they envisioned an environmentally sustainable, electric bus touring across the country, in a similar setup as the current population screening for breast cancer prevention in the Netherlands. Such a bus enables screening for a wide variety of potential diseases, which potentially benefits from large-scale screening programs. When AI detects a high-risk scan, the scan is forwarded to a radiologist, who decides whether to follow up. Within this scenario, it is possible to focus on the scan, or to combine the screening with blood or any other test, thereby combining data from multiple streams into a single risk assessment.</p><p>The other idea was focused on an extramural imaging, diagnosis, and treatment facility, which can, for instance, be located in a shopping mall. Citizens can visit the facility for a self-initiated check or after being invited due to specific risk factors, such as age. AI is subsequently used for fast diagnosis and forwards data to a radiologist if follow-up is required. As a consequence, there is still value in human intervention and responsibility. This scenario requires data to be shared with hospitals, for example, through electronic health records. A step further is to fully automate the diagnostic workflow by using large models that use large retrospective datasets and individual risk factors to screen for patients at risk and subsequently immediately treat correctly identified patients.</p></sec><sec id="s3-3-2"><title>Moral and Economic Considerations</title><p>Participants emphasized how important they consider trust and responsibility when thinking about AI and radiology. Questions raised by a social scientist and patient representative included, &#x201C;Is it responsible to leave decisions to computers?&#x201D; (g1-s2) and &#x201C;Who should be responsible for this?&#x201D; (g1-s1). The ownership of the process and of the data was questioned. According to participants, AI can have added value in this scenario, including alleviating burdens from hospital staff, improving patients&#x2019; quality of life with an earlier start of the treatment, and equal access to health care.</p><p>Moreover, participants suggested that additional value can be calculated in added quality-adjusted life years, which can be translated into societal gains. The primary benefit of these scenarios may be that early diagnosis reduces long-term health care costs by shifting care from acute to preventive services. For funders, for example, the government, of large-scale AI-supported imaging-based screening programs, this shift may represent potentially favorable cost-benefit ratios where upfront investing may yield substantial savings and productivity over time. Additionally, commercial gain is possible, for example, for companies providing the bus with diagnostic hardware and software. Participants discussed various types of business models. Software companies responsible for evidence generation and validation can charge subscription- or use-based per license. Companies supplying scanners often operate on a purchase or lease model, and decentralized centers or scanner-on-tour suppliers could cover costs via direct-to-consumer supply or health insurance. For quality, competition between companies was regarded as crucial, and hospitals could consider purchasing an intramural diagnostic platform that supports multiple products from different vendors. This creates an overarching platform with competing products. Companies developing the software suites and the AI application that can be integrated into them should be prime movers here. Participants discussed the potential for the program to eventually become institutionalized, similar to breast cancer screening. Business models were seen as increasingly promising, especially once such programs began to generate large-scale data that can be leveraged to train data-hungry models.</p><disp-quote><p>if you have an AI algorithm that is CE-marked [Conformit&#x00E9; Europ&#x00E9;enne], it doesn&#x2019;t learn from the data it receives, right? That&#x2019;s locked down. That&#x2019;s what the CE marking is for. It only goes into development mode when you have data to further develop it on, and then, you do an update, and you put that into the system. So, it&#x2019;s not like we always think that the AI just learns by itself.</p><attrib>g1, s3</attrib></disp-quote></sec><sec id="s3-3-3"><title>Opportunities</title><p>Participants agreed that AI could offer an opportunity for the field that is in need of change, with the number of scans being requested rapidly increasing.</p><disp-quote><p>There is a report out right now that anticipates continuous growth in the years to come. Drivers are an aging population, more chronically ill patients, but also the increase in technological possibilities. At the same time, there is a growing demand coming from patients and the general population to have more insight into their health. Extramural health checks might be an answer to this demand.</p><attrib>g1, s3</attrib></disp-quote><p>Participants envisioned a future in which AI could more and better support the role of medical professionals. Decentralized health care facilities can play a large role in the health care system and help decrease the growth of private companies offering whole-body scans, which was deemed undesirable as disparities in income and wealth may translate into unequal access to health care services, potentially leading to disparities in health outcomes.</p></sec><sec id="s3-3-4"><title>Frictions</title><p>Participants&#x2019; discussions shed light on a couple of frictions. First, they questioned whether the model should be organized in a centralized or decentralized manner, what the implications of decentralized testing would be for referring people to a hospital, and how data collected may or could be shared. Decentralized centers would need to build support within the broader network of health care institutions, hospitals, companies, and so on, for data sharing; they needed correct licensing, and interoperability must be addressed. Access and management of big data can create suspicions among patients, as they may think: &#x201C;your data no longer belongs to you&#x201D; [g1-s2].</p><p>Another potential friction arose around new role divisions and associated responsibilities. Laboratory technicians may outsource more tasks and perform fewer tasks themselves, radiologists may be hired on a consultancy (freelance) basis, and new roles may emerge (such as radiology assistants). This could increase the distance between the radiologist and the referring physician, which could complicate responsibilities and task distribution. A third friction point regarded sustainability:</p><disp-quote><p>What are we going to do? Is that desirable? So, I think we have to assess upfront, do you need a scan? Yes or no?</p><attrib>g1-s3</attrib></disp-quote><p>This question was answered later on:</p><disp-quote><p>AI can help with that as well. They can look in advance: from, does this patient need a scan? And then you are working more sustainably, because yes, the most sustainable scan is no scan.</p><attrib>g1, s1</attrib></disp-quote><p>On the one hand, there was a need to manage this rising demand, with patients starting to demand scans for more certainty</p><disp-quote><p>Because there are, of course, patients that ask for a scan, they [patients] want certainty.</p><attrib>g1, s5</attrib></disp-quote><p>versus the question of whether you need to better manage this demand. This can be done either by enlarging the role of AI and maximizing the use of the capacity of available scanners or by downsizing the demand by asking whether a scan is actually necessary, which is a decision AI can support in this scenario.</p></sec></sec><sec id="s3-4"><title>Scenario 3. Early AD Insight: Proactive AI for a Longer Independent Life</title><sec id="s3-4-1"><title>Materialization</title><p>A group of participants chose to build their scenario around a concrete use case. The participants quickly decided to further develop their scenario around the idea that in the future, it becomes possible to predict, by analyzing either MRI data or only multimodal data, whether a citizen later in their life is likely to develop AD. This is achieved by screening citizens from a certain age group for the disease, either upon invitation or upon the citizens&#x2019; request (similar to the previous screening scenario). The benefit of using AI in very early stages to detect or predict AD, participants mentioned, can be that (future) patients are helped and trained to live independently or with minimal help for longer. This approach should improve the quality of life of patients, alleviate some of the societal burden of caregivers for these future patients, and may lead to reduced costs:</p><disp-quote><p>Allowing Alzheimer&#x2019;s patients to function independently for as long as possible. [...] to keep support costs as low as possible.</p><attrib>g3, s3</attrib></disp-quote><p>Explainability was discussed as an important model aspect for patients. A patient representative argued that the model should be able to explain how it arrived at a certain decision. However, a participant with a background in pathology contextualized that this can be very task dependent. The researcher gave an example that was more related to her own field of expertise:</p><disp-quote><p>because for tumor detection, it [the AI] does not need to explain why something was flagged. ...Because we check it [the decision of the AI], so it is [in this case] AI-assisted. So I think: for this task, it does not need to explain what something is flagged for [...], because we check it [the flagged location].</p><attrib>g3, s6</attrib></disp-quote><p>The participants also briefly touched upon the business landscape, indicating that having multiple vendors would be a desirable aspect:</p><disp-quote><p>It&#x2019;s better to have multiple suppliers. Otherwise, we will have a monopolist.</p><attrib>g3, s3</attrib></disp-quote></sec><sec id="s3-4-2"><title>Moral and Economic Considerations</title><p>Participants found it important that implementing AI would be a meaningful addition to radiology, benefiting the individual patient. Thorough validation of the AI product can lead to improved trust in its use. Trust can also be enhanced through (human) explanation of the output. This means that either the algorithm should be able to explain its own output or radiologists should provide an explanation when a textual one is unavailable. Furthermore, patients should always remain in charge of their data and consecutive algorithmic outputs. That is, the data acquired upon clinical indication and the output of the algorithms should be shared only upon their voluntary consent. The participants also believed that individual patients or a society that shares personal data should monetarily benefit from commercial products. In other words, according to the participants, commercial parties should not be allowed to earn profits over products forever and profits should eventually flow back to society in some form, given that the AI product is essentially enabled by voluntarily provided personal data.</p></sec><sec id="s3-4-3"><title>Opportunities</title><p>The participants argued that AI should deliver a product that radiology currently cannot. That is, true added value is achieved when AI detects what a radiologist cannot see, or when AI predicts future events that are not yet visible as pathology at the time of imaging. Participants discussed that the role of AI in this scenario is to assist with screening. Furthermore, predicting disease before it can manifest can lead to improved quality of life, specifically for AD. The participants highlighted that this is especially the case due to the human and emotional side of a progressive disease such as AD.</p></sec><sec id="s3-4-4"><title>Frictions</title><p>The participants discussed the need for increased scanning capacity as a potentially negative consequence of AI. Granting all citizens the opportunity for screening for early detection of AD is costly, partly due to purchasing the scanners. However, using this additional scanning capacity can lead to improved quality of life for the patients, a lower societal burden for informal caregivers, and lower health care costs if cost-effectiveness of the screening is proven. The risk of incidental findings increases, though, which needs to be balanced with increased benefits of early detection. Whereas the industry employee argued that regulations can sometimes be experienced as burdensome, the patient representative found the regulations justifiable with respect to privacy, indicating that without strict regulations the patient representative may not share personal data at all.</p><p>Finally, the participants discussed potential consequences concerning insurance and autonomy: if AI predicts a future diagnosis such as AD, an insurance company may want access to those results, given that they may want to assess and price coverage accordingly. Also, the question was raised whether, &#x201C;will they [some authority] say automatically that you move to an Alzheimer&#x2019;s [care] house and you have to sell your home?&#x201D; [g3, s5]. According to the participants, these aspects were undesirable, and these open questions warranted some serious thought.</p></sec></sec><sec id="s3-5"><title>Emerging Themes</title><p>Seven common themes emerged from the 3 future scenarios, along with their moral and economic considerations, opportunities, and frictions. <xref ref-type="fig" rid="figure1">Figure 1</xref> summarizes the 7 emerging themes, recommendations based on the emerging themes, and 3 overarching themes.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Summary of the workshop, 7 emerging themes, corresponding recommendations, and 3 overarching themes. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e83407_fig01.png"/></fig><sec id="s3-5-1"><title>Emerging Theme 1: Trust Versus Effectiveness</title><p>Seemingly, trust in AI represents a critical trade-off. On the one hand, trust could enable AI to live up to its potential as a stand-alone technology to effectively address complex tasks, such as medical diagnosis or large-scale hospital workflow optimization. On the other hand, patient representatives feared risk for automation bias. Additionally, participants discussed that excessive reliance on AI systems can diminish human supervision, increasing the clinician&#x2019;s risk of automation bias and the erosion of critical decision-making capabilities. To break out of this trade-off and enhance trust, according to the participants, would often necessitate the adoption of interpretable and explainable AI systems, which radiologists can eventually check. Thus, achieving an optimal balance between trust and skepticism is essential to mitigate risks while harnessing the transformative potential of AI in radiology.</p></sec><sec id="s3-5-2"><title>Emerging Theme 2: Responsibilities in Clinical Decision-Making</title><p>Implementing AI was considered a shared responsibility among stakeholders, each contributing distinct perspectives and expertise. Policy makers play a central role in establishing regulatory frameworks that guide legal-ethical boundaries for AI development and deployment. Industry players produce and scale practical AI applications within the established boundaries set by the aforementioned policy makers. However, when it comes to clinical decision-making, the participants foresee that medical specialists and health care workers will ultimately be responsible for the health and safety of their patients, and clinicians also feel that way. To reach this goal, it could be beneficial for medical specialists to cooperate with more technically oriented hospital personnel to perform quality assurance tests, particularly to ascertain that AI performs equally well on local datasets compared with how industry markets the performance of their products. Furthermore, according to the participants, there might be a conflict of interest when industry performs its own quality assurance. Thus, local testing may be a critical requirement for safe deployment and responsible use of AI in radiology and, according to the workshop participants, should be performed by independent hospital staff.</p></sec><sec id="s3-5-3"><title>Emerging Theme 3: Diagnosis as a One-Off Versus an Iterative Interpretation</title><p>Given the rapid development of AI, radiological diagnosis is increasingly shaped by algorithmic output. These outputs are often treated as static and definitive, similar to finalized radiology reports. The static nature of AI&#x2019;s output presents 2 interrelated shortcomings. Radiological diagnosis is often not a fixed product but a dynamic process. It relies on expert interpretation and is influenced by evolving clinical information, such as neurological examinations and pathology results. Several participants noted that diagnosis often emerges through iterative reasoning and collaboration with other medical specialists. AI systems, which typically generate preanalyzed findings or diagnostic probabilities, risk oversimplifying this complexity. Participants suggested that integrating multimodal data could help AI better reflect current clinical practice. In addition, static outputs could lead to a greater demand for diagnostic certainty, potentially leading to unnecessary follow-up: by detecting more abnormalities and including findings that may never become clinically relevant, AI could lead to more scans, consultations, and interventions. A potential solution to both shortcomings that was suggested by participants is a &#x201C;human-in-the-loop approach,&#x201D; where the radiologist remains responsible for interpreting AI outputs within the broader clinical context. Collaborative frameworks in which AI complements rather than replaces human expertise were seen as essential. These should include clear protocols for follow-up, shared decision-making with patients, and the development of transparent and explainable AI systems to support trust and appropriate clinical action.</p></sec><sec id="s3-5-4"><title>Emerging Theme 4: Regulation as a Requirement or as a Restriction</title><p>Regulations play a dual role in technological innovation, acting both as a potential constraint and as a necessary enabler for progress, particularly in a fast-paced field such as AI in radiology. On the one hand, industry employees indicated that overly rigid or premature regulations can restrict innovation by imposing burdensome compliance requirements. On the other hand, patient representatives argued that well-designed regulatory frameworks are essential to ensure safety, fairness, and public trust&#x2014;key requirements for adopting novel technologies. For example, as indicated by patient representatives, robust protection of patient privacy not only follows an ethical imperative but also creates a secure environment for patients to share their data. In other words, effective regulations are required for patients to feel safe when consenting to sharing their personal data, which is the cornerstone for all AI developments. Adaptive and dynamic regulatory approaches, capable of evolving alongside technological advances, were thus considered crucial for maintaining this balance. Following our workshop results, participants regarded effective regulation as an enabler rather than an obstacle to innovation, because it provides the foundation for trust necessary for groundbreaking technologies to thrive responsibly.</p></sec><sec id="s3-5-5"><title>Emerging Theme 5: Economic Benefits or Drawbacks</title><p>The integration of AI in radiology presents significant economic opportunities and challenges. Following initial investments in hardware infrastructures and software packages, there is a wide variety of potential applications on a hospital level, societal level, and vendor level that could yield a net economic benefit. Participants emphasized that AI could significantly improve operational efficiency within hospitals. By automating routine tasks, such as appointment scheduling and administrative workflows, AI has the potential to reduce labor costs and streamline internal processes. These improvements may lead to better resource allocation and increased throughput, contributing to overall cost-effectiveness in clinical operations. At the societal level, the economic benefits of AI in radiology are diverse and potentially far-reaching. Participants pointed to several ways in which AI could contribute to more efficient health care delivery and improved population health. For example, timely diagnosis may enable earlier initiation of therapy, which can improve outcomes and reduce the need for prolonged or intensive treatment. Similarly, more personalized care, which could be supported by AI&#x2019;s ability to integrate and analyze complex data, may help avoid unnecessary interventions and enhance the overall effectiveness of health care services. These developments could contribute to long-term cost savings and a more sustainable use of health care resources. However, participants also cautioned that increased diagnostic sensitivity might lead to more incidental findings, which could raise demand for follow-up care and specialist input, potentially increasing overall health care expenditures. According to some participants, the responsibility for initial economic value assessments of AI systems lies with the vendors, who should estimate whether their solutions offer sufficient value before deployment. However, participants emphasized that independent researchers should conduct postdeployment evaluations. Similar to quality assurance processes, this separation is essential to avoid conflicts of interest and ensure unbiased assessments of economic performance.</p></sec><sec id="s3-5-6"><title>Emerging Theme 6: More Information at the Cost of Privacy</title><p>Unintended consequences from AI might arise when the decision becomes binding rather than advisory. This decision could drastically affect a patient&#x2019;s life. In the AD case, the participants discussed the extreme example of mandatory compliance with the AI decision for future patients when AD is predicted. Furthermore, if (by obligation) shared with insurers, these screening programs could have negative consequences for monthly health insurance premiums, thus penalizing individual citizens financially for factors beyond their control, which raises privacy, fairness, and proportionality concerns. Or, in the extreme case of extrapolating this issue, insurance companies may increase costs for patients at risk or deny (future) patients access to (additional) health and property insurances at all. Regulation should therefore be in place to prevent such a scenario from materializing. Moreover, patients have the right to not know. The participants agreed that this moral dilemma might be quite difficult for an AI to weigh, where there needs to be a good balance between medical necessity and patients&#x2019; rights and preferences. To counter this unintended consequence, human intervention might be the only solution.</p></sec><sec id="s3-5-7"><title>Emerging Theme 7: Environmental Considerations</title><p>Finally, it was discussed that AI is an energy-demanding technology. Combining AI with radiology for screening means that scarce resources might be used for citizens who may not (yet) express any disease symptoms. Thus, the participants discussed potential benefits for health care and the economy of AI in radiology should also be carefully weighed against the potential environmental impact. The monetary gain and gain in quality of life after the introduction of AI to radiology should therefore be proportionate to the environmental impact of additional interventions, such as screening in combination with AI.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>In this manuscript, we report and reflect on a qualitative cocreation workshop we designed and conducted, inspired by responsible innovation approach, aiming to explore future scenarios for AI in radiology, their opportunities and frictions, by involving a variety of stakeholders in the field of radiology. Interestingly, in imagining futures of AI in radiology, the majority of the groups, in one way or another, came up with a scenario to upscale radiology via screening or extramural imaging to early detect and prevent pathology from becoming a disease. However, there were differences in the imagined role of these applications, and of actors in the field, including governments, insurers, technology developers, and citizens. By including a wide variety of topics, we aimed to broaden the participants&#x2019; scope of thinking. Resultingly, our participants discussed moral and economic considerations, envisioned opportunities, and foresaw frictions arising related to interconnected topics on trust, responsibilities, workflows, regulatory requirements, dealing with economic benefits, and unintended consequences.</p></sec><sec id="s4-2"><title>Comparison With Prior Work</title><sec id="s4-2-1"><title>Trust Versus Effectiveness</title><p>In our analysis, we observed trust in AI as a complex topic the participants linked to, for example, performance and explainability. The participants perceived a paradoxical relationship between trust and effectiveness: the more trust they were willing to place in AI, the more AI could potentially operate as a stand-alone technology in radiology and fulfill its promise of improving clinical outcomes. Yet, this very trust introduces ethical tensions. As described by Singh et al [<xref ref-type="bibr" rid="ref77">77</xref>], the ethical paradox in medical practices arises when efforts to enhance health care through AI risk diminishing the humanistic aspect of care, such as empathy and interpersonal interactions, thereby potentially provoking skepticism and decreasing trust. Reflecting on scenarios 1 and 2, AI could be deployed for triaging or identifying patients at risk. As such, AI applications may allow clinicians to focus their limited time on more complex and emotionally sensitive cases, potentially improving overall effectiveness of care and patient well-being, which were expectations that were reported earlier as well [<xref ref-type="bibr" rid="ref37">37</xref>]. However, fully trusting AI may lead to dependency on AI or potential loss of human oversight. Similar concerns were also shared during the cocreation workshop. Patient representatives, among other stakeholders, confirmed that a human-in-the-loop was, in their opinion, still required. This idea was also shared in earlier research [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>], although in other reports, patients have also indicated that autonomous AI could be acceptable on the condition that AI outperforms a radiologist [<xref ref-type="bibr" rid="ref18">18</xref>]. Yet, patient representatives and other stakeholders seemed to have a positive attitude toward AI-assisted triaging, especially because the ultimate decision remains with a radiologist. The explainability of the AI-based results of the triaging could further help gain clinicians' confidence [<xref ref-type="bibr" rid="ref53">53</xref>]. Discussions suggest that following these conditions, applications of AI in radiology may contribute to enhancing efficiency without compromising safety or trust. Based on the links between effectiveness, trust, explainability, and the humanistic aspect of care, we recommend designing AI systems that support clinicians, ensuring human-led decision-making and communication, while leveraging AI for effectiveness.</p></sec><sec id="s4-2-2"><title>Responsibilities in Clinical Decision-Making</title><p>Participants in the workshop highlighted that there should not be conflicts of interest in the case of responsibilities. In other words, in environments where people must act responsibly to, for example, safeguard patients&#x2019; health, one should not be incentivized to act differently. For this reason, some participants emphasized that generating evidence for AI in early stages of development and real-world testing of AI should be performed by 2 distinct parties. Whereas industry was seen as responsible for product (co)development and early evidence generation, real-world implementation, thorough (local) testing, and longitudinal evaluation should be performed by independent hospital personnel, potentially in collaboration with more technically oriented personnel such as clinical physicists or technical physicians [<xref ref-type="bibr" rid="ref81">81</xref>]. Furthermore, there was a seeming distrust of commercial parties to perform their own prospective evaluation. Earlier research focused on <italic>how</italic> to perform postinstallation evaluation [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref82">82</xref>-<xref ref-type="bibr" rid="ref84">84</xref>]. However, this research did not dive into <italic>who</italic> should do so and what the possible impact on interconnected themes such as trust could be. Specifically, industry involvement should be limited in prospective evaluation, such that thorough evaluation can be performed with as little conflict of interest as possible. Our research demonstrates preliminary evidence that independent evaluation of AI in a clinical setting is pivotal to enhancing trust, and that trust can be achieved only by proper distribution of responsibilities along the innovation, installation, and evaluation chain. Therefore, we recommend that evaluation of AI is performed by hospital staff or independent external parties.</p></sec><sec id="s4-2-3"><title>Diagnosis as a One-Off Versus an Iterative Interpretation</title><p>The proposed function of the AI within the radiology system varied within our workshop, ranging from screening to acting as a copilot for the radiologist. AI tools in medical imaging have traditionally functioned as narrow, outcome-focused instruments, for example, detecting polyps, segmenting tumors, or flagging anomalies in isolation [<xref ref-type="bibr" rid="ref85">85</xref>]. However, radiology is fundamentally a process of evolving clinical interpretation, where patients&#x2019; histories, laboratory results, physical examinations, and ongoing changes in condition contextualize each series of images [<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>]. Multimodal data integration could enable AI to move beyond static predictions and could facilitate evolving model outputs [<xref ref-type="bibr" rid="ref88">88</xref>]. Furthermore, the inclusion of large language models (LLMs) could provide narrative understanding, enabling AI to synthesize diverse inputs into coherent clinical insights [<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref91">91</xref>]. In turn, a radiologist can check the output of the LLM or interact with the LLM, which could lead to further explainability and could foster trust, which also links back to emerging theme 1. This convergence transforms AI from a tool that offers answers into one that participates in reasoning. Importantly, this shift could allow AI to align more closely with the clinician&#x2019;s cognitive workflow, adapting dynamically as new data come in [<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref91">91</xref>]. It could also open the door to more explainable and human-centered AI systems that support, not replace, clinical judgment, as was also proposed earlier [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>]. Thus, we recommend prioritizing iterative and interactive AI systems where radiologists can validate and refine AI-generated insights, ensuring that AI acts as a reasoning partner rather than a black box tool.</p><p>Moreover, the participants deemed the integrability of AI tools within existing hospital systems as a facilitator for successful adoption, as was also reported in earlier research [<xref ref-type="bibr" rid="ref59">59</xref>]. Integrating these capabilities into unified platforms, rather than isolated tools, could thus further aid in the commercialization of AI in radiology. This process of platformization supports scalability, continuous learning, and interoperability [<xref ref-type="bibr" rid="ref69">69</xref>], potentially enabling a shift from a one-off solution to comprehensive decision support ecosystems that are clinically and economically sustainable. Finally, participants regarded having multiple vendors as beneficial to the health care system, encouraging competition in either performance or costs, potentially benefiting patients. To support this, platforms should remain vendor-neutral, allowing easy integration from multiple providers. Whereas earlier research recommends four components: (1) (automatic) system triggering, (2) retrieval of relevant imaging series, (3) application of algorithm pipeline, and (4) availability of the results in Picture Archiving and Communication System of dedicated viewing software [<xref ref-type="bibr" rid="ref92">92</xref>], we recommend conducting more research in the direction of cooperative guidelines that allow vendors to provide innovative solutions while remaining flexible for competition, potentially including regulatory sandbox environments, adaptive regulations, and value-based procurement.</p></sec><sec id="s4-2-4"><title>Regulation as a Requirement or as a Restriction</title><p>In some discussions, the role of regulations came forward. While industry employees regarded regulations as a potentially impeding innovation, patient representatives regarded regulations as a requirement to share data. This paradoxical friction was also described in a recently published paper. According to Singh et al [<xref ref-type="bibr" rid="ref77">77</xref>], the solution seems to be to strike the right balance between data accessibility and the obligation to safeguard patient confidentiality and to adhere to regulatory frameworks. This finding, previously reported among medical practitioners, was also identified in patient representatives, further confirming and reinforcing its broader relevance. The data paradox is quite complex: on the one hand, patient representatives in our workshop mostly preferred to retain ownership of their data. On the other hand, industry requires a diverse and up-to-date dataset, such that the AI products they develop generalize sufficiently well on various populations and do not suffer from data drift [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. In light of this data paradox, future work should explore how hospital-private sector collaborations can be structured to enable responsible data sharing, with strong privacy safeguards, transparency, and mutual benefit for both public and private stakeholders. Recent global frameworks, such as the World Health Organization&#x2019;s regulatory pillars for AI in health [<xref ref-type="bibr" rid="ref93">93</xref>], the EU AI Act&#x2019;s classification of medical imaging AI as &#x201C;high-risk&#x201D; [<xref ref-type="bibr" rid="ref94">94</xref>], and international consensus initiatives such as FUTURE-AI [<xref ref-type="bibr" rid="ref26">26</xref>], emphasize that striking this balance is not only desirable but also essential for compliance and trustworthiness. These frameworks advocate transparency, human oversight, and robust governance, which can guide collaborations toward ethical and legally sound data sharing.</p></sec><sec id="s4-2-5"><title>Economic Benefits or Drawbacks</title><p>During the workshop, participants raised the question of whether it was desirable for companies to earn money using voluntarily provided individual patient data without patient populations benefiting. A competitive landscape was described by the participants that could solve these issues. First of all, multiple vendors compete for the best product. The participants argued that this aspect should lead to better health outcomes for individual patients, linking to earlier argued ethical justification of the use of patient data if companies contribute to public health benefits [<xref ref-type="bibr" rid="ref95">95</xref>]. In other words, the profits earned using patient data are reinvested in the companies to improve existing products or to create new products, which benefit both public and private stakeholders. Second, vendors can compete for the lowest price, which benefits the health care system as a whole, as price competition can lead to lower health care costs. At the same time, a diverse vendor landscape may help mitigate risks associated with vendor lock-in [<xref ref-type="bibr" rid="ref96">96</xref>], ensuring that health care providers retain flexibility in choosing and switching between solutions as technology evolves and needs change. However, without more studies rigorously evaluating AI systems in real-world clinical settings, it is difficult to substantiate claims of added value [<xref ref-type="bibr" rid="ref74">74</xref>]. We therefore advocate for more studies that evaluate AI in a real-world setting and that include multiple angles of value creation, such as decreased reporting times, added number of relevant incidental findings, and the prevention of unnecessary follow-up scans or interventions. This should lead to elaborated business models for both vendors (suppliers) and hospitals (buyers). Upon positive results, these studies could further facilitate AI adoption in clinics and provide vendors with useful insights about the possibility to generate revenue with their products. One example comes from Brix et al [<xref ref-type="bibr" rid="ref11">11</xref>], who found that AI-based image reconstruction could lead to improved patient throughput, thereby obviating the need for an additional scanner. Such insights can help vendors to demonstrate the added value of their AI solutions, which may justify premium pricing and unlock new business models, especially when validated in real-world clinical environments.</p></sec></sec><sec id="s4-3"><title>More Information at the Cost of Privacy</title><p>Participants worried about data privacy, especially over the sharing of diagnostic and prognostic information inferred from AI with insurance companies, hinting that insurance companies might misuse this information. A recent review by Botha et al [<xref ref-type="bibr" rid="ref97">97</xref>] also endeavored to examine the implication of AI tools on insurance. Whereas many aspects concerning insurance coverage were discussed, including the potential rise in insurance costs with the introduction of AI in clinical settings, second-order effects such as sharing information of future disease with insurance companies were underrepresented [<xref ref-type="bibr" rid="ref97">97</xref>]. A perspective paper from 2018 by Char et al [<xref ref-type="bibr" rid="ref60">60</xref>] warned of this looming issue, and our results demonstrate that these worries are still unresolved. Thus, a deeper dive into this specific topic seems warranted. Emerging literature confirms these concerns, highlighting risks of insurance-related discrimination from predictive analytics [<xref ref-type="bibr" rid="ref98">98</xref>]. Regulatory frameworks such as World Health Organization&#x2019;s guidance [<xref ref-type="bibr" rid="ref93">93</xref>] and the EU AI Act [<xref ref-type="bibr" rid="ref94">94</xref>] stress the need for strong governance and transparency to prevent misuse of sensitive health data. Incorporating these principles into practice could help mitigate ethical risks while enabling responsible innovation.</p></sec><sec id="s4-4"><title>Environmental Considerations</title><p>Another worry that came forward was the energy consumption of AI, which according to the participants potentially makes it an environmentally unsustainable solution. Whereas it is likely a correct assumption that adding AI to the radiology workflow increases energy consumption compared with a workflow without AI, efforts to make both algorithms and hardware more energy-efficient might help reduce the energy consumption for each inference [<xref ref-type="bibr" rid="ref99">99</xref>]. Furthermore, AI could aid in reducing environmentally harmful gadolinium-based contrast agent use [<xref ref-type="bibr" rid="ref100">100</xref>], or can reconstruct images from accelerated (and thus less time-consuming and less energy-consuming) MRI acquisitions [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref101">101</xref>], which both potentially have a net positive effect on the environment, as it may result in fewer MRI systems in use and less energy consumption per scan. Thus, whereas the participants&#x2019; worries are justifiable, these second-order effects also need to be accounted for when assessing environmental impact for AI in radiology.</p></sec><sec id="s4-5"><title>Limitations and Future Directions</title><p>The main limitation of this study is that we draw on a singular empirical case study set in a Dutch tertiary care center. Future studies should revisit our research questions in different countries and look at other care settings, including trans- and extramural settings. These other care centers are of special importance, since multiple participants independently hinted at the potential for extramural imaging facilities. Also, no insurance company employees could attend the workshop, despite efforts to include at least one. Insurance companies are a key stakeholder in the field, so their perspectives are potentially valuable in discussions on the demand side (reimbursement and assessment of added therapeutic value). Next, in future research, we recommend including patient representatives again, as their insights were invaluable to this work. Finally, by checking the audio recordings in full by 2 of the authors (MBS and SH) and refining transcripts and translations, we aimed to minimize potential nuance loss. Although some loss of data could still have occurred, these steps helped preserve the richness of the participants&#x2019; contributions as much as possible.</p></sec><sec id="s4-6"><title>Conclusions</title><p>With this paper, we aimed to explore wider stakeholder perspectives on futures of AI in radiology and to discuss the moral and economic considerations, opportunities, and frictions. The interaction between participants resulted in scenarios about an AI copilot, AI-enhanced extramural scanning, and AI-based AD screening. It prompted debates on perspectives that may not have emerged from isolated stakeholder analyses. Seven themes emerged from the analysis: (1) trust and efficiency of AI technologies, (2) responsibilities in clinical decision-making when AI is involved, (3) diagnosis as a one-off versus an iterative process, (4) regulations as a requirement or a restriction, (5) economic benefits or drawbacks, (6) trade-off between amount of information required and patient privacy, and (7) environmental considerations. In reflecting on the 7 emerging themes from the multistakeholder cocreation workshop, 3 overarching topics can be identified that help reflect on the broader implications of AI in radiology.</p><p>First, trust emerged as a foundational concern in human-AI collaboration. Participants emphasized that in their opinion, AI should support, not replace, clinical decision-making. Trust was seen as conditional for AI to be applied in radiology efficiently, meaning that explainability and the continued presence of a human-in-the-loop are deemed important. This human-machine model aligns with radiological diagnoses of iterative nature and was considered essential for safe and ethical implementation. Second, regulatory and ethical safeguards were central to discussions on data use, responsibility, and privacy. Stakeholders expressed diverging views: while industry employees saw regulation as a potential restriction, patient representatives viewed regulations as a requirement for data sharing. The friction between innovation and regulation can be dealt with through adaptive regulatory frameworks that evolve alongside technological capabilities while safeguarding patient autonomy and algorithmic fairness. A third topic relates to value creation and sustainability. Participants discussed the potential for AI to improve efficiency, reduce costs, and enhance diagnostic accuracy. However, concerns were raised about incidental findings, data monetization, viability of business models, and the energy demand of AI systems. Platformization and vendor-neutral integration were seen as a way to foster innovation while maintaining affordability and scalability. Moreover, participants advocated for competitive ecosystems where multiple vendors could contribute to better outcomes and lower health care costs.</p><p>Together, these overarching topics offer a lens through which future research and policy can address the complex interplay of technological potential, ethical responsibility, and systemic sustainability in AI-driven radiology. Future research is needed to investigate the generalizability of our findings in different countries and settings, or to address open questions on real-world economic benefit and sustainability.</p></sec></sec></body><back><ack><p>The authors would like to thank their participants for their time and invaluable insights. Without them, their study would not have been possible.</p></ack><notes><sec><title>Funding</title><p>This publication is part of the project "Responsible implementation of quantitative MRI" (with project number 18749 of the research program HTSM MVI top-up, which is financed by the Dutch Research Council [NWO]).</p></sec><sec><title>Data Availability</title><p>The datasets analyzed during this study are not publicly available due to data policies. They</p><p>may be available from the corresponding author (MBS) per reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>MBS, SH, AK, and WB contributed to methodology design, formal data analysis, and draft of original manuscript. All authors participated in study conceptualization, review, and revision of manuscript.</p></fn><fn fn-type="conflict"><p>None declared</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AD</term><def><p>Alzheimer disease</p></def></def-item><def-item><term id="abb2">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb3">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb4">MRI</term><def><p>magnetic resonance imaging</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hassankhani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Amoukhteh</surname><given-names>M</given-names> </name><name name-style="western"><surname>Valizadeh</surname><given-names>P</given-names> </name><name name-style="western"><surname>Jannatdoust</surname><given-names>P</given-names> </name><name name-style="western"><surname>Sabeghi</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gholamrezanezhad</surname><given-names>A</given-names> </name></person-group><article-title>Radiology as a specialty in the era of artificial intelligence: a systematic review and meta-analysis on medical students, radiology trainees, and radiologists</article-title><source>Acad Radiol</source><year>2024</year><month>01</month><volume>31</volume><issue>1</issue><fpage>306</fpage><lpage>321</lpage><pub-id pub-id-type="doi">10.1016/j.acra.2023.05.024</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goisauf</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cano Abad&#x00ED;a</surname><given-names>M</given-names> </name></person-group><article-title>Ethics of AI in radiology: a review of ethical and societal implications</article-title><source>Front Big Data</source><year>2022</year><volume>5</volume><fpage>850383</fpage><pub-id pub-id-type="doi">10.3389/fdata.2022.850383</pub-id><pub-id pub-id-type="medline">35910490</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hosny</surname><given-names>A</given-names> </name><name name-style="western"><surname>Parmar</surname><given-names>C</given-names> </name><name name-style="western"><surname>Quackenbush</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schwartz</surname><given-names>LH</given-names> </name><name name-style="western"><surname>Aerts</surname><given-names>HJWL</given-names> </name></person-group><article-title>Artificial intelligence in radiology</article-title><source>Nat Rev Cancer</source><year>2018</year><month>08</month><volume>18</volume><issue>8</issue><fpage>500</fpage><lpage>510</lpage><pub-id pub-id-type="doi">10.1038/s41568-018-0016-5</pub-id><pub-id pub-id-type="medline">29777175</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mazurowski</surname><given-names>MA</given-names> </name></person-group><article-title>Artificial intelligence may cause a significant disruption to the radiology workforce</article-title><source>J Am Coll Radiol</source><year>2019</year><month>08</month><volume>16</volume><issue>8</issue><fpage>1077</fpage><lpage>1082</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2019.01.026</pub-id><pub-id pub-id-type="medline">30975611</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>T</given-names> </name><name name-style="western"><surname>Gu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>A foundation model for joint segmentation, detection and recognition of biomedical objects across nine modalities</article-title><source>Nat Methods</source><year>2025</year><month>01</month><volume>22</volume><issue>1</issue><fpage>166</fpage><lpage>176</lpage><pub-id pub-id-type="doi">10.1038/s41592-024-02499-w</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rezazade Mehrizi</surname><given-names>MH</given-names> </name><name name-style="western"><surname>van Ooijen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Homan</surname><given-names>M</given-names> </name></person-group><article-title>Applications of artificial intelligence (AI) in diagnostic radiology: a technography study</article-title><source>Eur Radiol</source><year>2021</year><month>04</month><volume>31</volume><issue>4</issue><fpage>1805</fpage><lpage>1811</lpage><pub-id pub-id-type="doi">10.1007/s00330-020-07230-9</pub-id><pub-id pub-id-type="medline">32945967</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Drogt</surname><given-names>J</given-names> </name><name name-style="western"><surname>Milota</surname><given-names>M</given-names> </name><name name-style="western"><surname>Veldhuis</surname><given-names>W</given-names> </name><name name-style="western"><surname>Vos</surname><given-names>S</given-names> </name><name name-style="western"><surname>Jongsma</surname><given-names>K</given-names> </name></person-group><article-title>The promise of AI for image-driven medicine: qualitative interview study of radiologists&#x2019; and pathologists&#x2019; perspectives</article-title><source>JMIR Hum Factors</source><year>2024</year><month>11</month><day>21</day><volume>11</volume><fpage>e52514</fpage><pub-id pub-id-type="doi">10.2196/52514</pub-id><pub-id pub-id-type="medline">39570627</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Najjar</surname><given-names>R</given-names> </name></person-group><article-title>Redefining radiology: a review of artificial intelligence integration in medical imaging</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>08</month><day>25</day><volume>13</volume><issue>17</issue><fpage>2760</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13172760</pub-id><pub-id pub-id-type="medline">37685300</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fasterholdt</surname><given-names>I</given-names> </name><name name-style="western"><surname>Naghavi-Behzad</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rasmussen</surname><given-names>BSB</given-names> </name><etal/></person-group><article-title>Value assessment of artificial intelligence in medical imaging: a scoping review</article-title><source>BMC Med Imaging</source><year>2022</year><month>10</month><day>31</day><volume>22</volume><issue>1</issue><fpage>187</fpage><pub-id pub-id-type="doi">10.1186/s12880-022-00918-y</pub-id><pub-id pub-id-type="medline">36316665</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hofmann</surname><given-names>P</given-names> </name><name name-style="western"><surname>Oesterle</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rust</surname><given-names>P</given-names> </name><name name-style="western"><surname>Urbach</surname><given-names>N</given-names> </name></person-group><article-title>Machine learning approaches along the radiology value chain&#x2014;rethinking value propositions</article-title><access-date>2026-02-09</access-date><conf-name>Proceedings of the 27th European Conference on Information Systems (ECIS) Stockholm &#x0026; Uppsala</conf-name><conf-date>Jun 8-14, 2019</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://aisel.aisnet.org/ecis2019_rp/158/">https://aisel.aisnet.org/ecis2019_rp/158/</ext-link></comment></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brix</surname><given-names>MAK</given-names> </name><name name-style="western"><surname>J&#x00E4;rvinen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bode</surname><given-names>MK</given-names> </name><etal/></person-group><article-title>Financial impact of incorporating deep learning reconstruction into magnetic resonance imaging routine</article-title><source>Eur J Radiol</source><year>2024</year><month>06</month><volume>175</volume><fpage>111434</fpage><pub-id pub-id-type="doi">10.1016/j.ejrad.2024.111434</pub-id><pub-id pub-id-type="medline">38520806</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bharadwaj</surname><given-names>P</given-names> </name><name name-style="western"><surname>Nicola</surname><given-names>L</given-names> </name><name name-style="western"><surname>Breau-Brunel</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Unlocking the value: quantifying the return on investment of hospital artificial intelligence</article-title><source>J Am Coll Radiol</source><year>2024</year><month>10</month><volume>21</volume><issue>10</issue><fpage>1677</fpage><lpage>1685</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2024.02.034</pub-id><pub-id pub-id-type="medline">38499053</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharon</surname><given-names>T</given-names> </name></person-group><article-title>When digital health meets digital capitalism, how many common goods are at stake?</article-title><source>Big Data Soc</source><year>2018</year><month>07</month><volume>5</volume><issue>2</issue><pub-id pub-id-type="doi">10.1177/2053951718819032</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Raz</surname><given-names>A</given-names> </name><name name-style="western"><surname>Minari</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schicktanz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sharon</surname><given-names>T</given-names> </name><name name-style="western"><surname>Werner-Felmayer</surname><given-names>G</given-names> </name></person-group><article-title>Editorial: data-intensive medicine and healthcare: ethical and social implications in the era of artificial intelligence and automated decision-making</article-title><source>Front Genet</source><year>2023</year><month>09</month><day>7</day><volume>14</volume><pub-id pub-id-type="doi">10.3389/fgene.2023.1280344</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name><name name-style="western"><surname>Bayrak</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence and human trust in healthcare: focus on clinicians</article-title><source>J Med Internet Res</source><year>2020</year><month>06</month><day>19</day><volume>22</volume><issue>6</issue><fpage>e15154</fpage><pub-id pub-id-type="doi">10.2196/15154</pub-id><pub-id pub-id-type="medline">32558657</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shevtsova</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>A</given-names> </name><name name-style="western"><surname>Boot</surname><given-names>IWA</given-names> </name><etal/></person-group><article-title>Trust in and acceptance of artificial intelligence applications in medicine: mixed methods study</article-title><source>JMIR Hum Factors</source><year>2024</year><month>01</month><day>17</day><volume>11</volume><fpage>e47031</fpage><pub-id pub-id-type="doi">10.2196/47031</pub-id><pub-id pub-id-type="medline">38231544</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rivard</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name></person-group><article-title>When desirability and feasibility go hand in hand: innovators&#x2019; perspectives on what is and is not responsible innovation in health</article-title><source>Journal of Responsible Innovation</source><year>2020</year><month>01</month><day>2</day><volume>7</volume><issue>1</issue><fpage>76</fpage><lpage>95</lpage><pub-id pub-id-type="doi">10.1080/23299460.2019.1622952</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fransen</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Kwee</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Rouw</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Patient perspectives on the use of artificial intelligence in prostate cancer diagnosis on MRI</article-title><source>Eur Radiol</source><year>2024</year><month>08</month><day>14</day><volume>35</volume><issue>2</issue><fpage>769</fpage><lpage>775</lpage><pub-id pub-id-type="doi">10.1007/s00330-024-11012-y</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hajiheydari</surname><given-names>N</given-names> </name><name name-style="western"><surname>Delgosha</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Saheb</surname><given-names>T</given-names> </name></person-group><article-title>AI in medical diagnosis: a contextualised study of patient motivations and concerns</article-title><source>Soc Sci Med</source><year>2025</year><month>04</month><volume>371</volume><fpage>117850</fpage><pub-id pub-id-type="doi">10.1016/j.socscimed.2025.117850</pub-id><pub-id pub-id-type="medline">40081168</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ongena</surname><given-names>YP</given-names> </name><name name-style="western"><surname>Haan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yakar</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kwee</surname><given-names>TC</given-names> </name></person-group><article-title>Patients&#x2019; views on the implementation of artificial intelligence in radiology: development and validation of a standardized questionnaire</article-title><source>Eur Radiol</source><year>2020</year><month>02</month><volume>30</volume><issue>2</issue><fpage>1033</fpage><lpage>1040</lpage><pub-id pub-id-type="doi">10.1007/s00330-019-06486-0</pub-id><pub-id pub-id-type="medline">31705254</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Becker</surname><given-names>CD</given-names> </name><name name-style="western"><surname>Kotter</surname><given-names>E</given-names> </name><name name-style="western"><surname>Fournier</surname><given-names>L</given-names> </name><name name-style="western"><surname>Mart&#x00ED;-Bonmat&#x00ED;</surname><given-names>L</given-names> </name><collab>European Society of Radiology (ESR)</collab></person-group><article-title>Current practical experience with artificial intelligence in clinical radiology: a survey of the European Society of Radiology</article-title><source>Insights Imaging</source><year>2022</year><month>12</month><volume>13</volume><issue>1</issue><fpage>107</fpage><pub-id pub-id-type="doi">10.1186/s13244-022-01247-y</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brady</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Allen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Chong</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Developing, purchasing, implementing and monitoring AI tools in radiology: practical considerations. A multi-society statement from the ACR, CAR, ESR, RANZCR &#x0026; RSNA</article-title><source>Insights Imaging</source><year>2024</year><volume>15</volume><issue>1</issue><fpage>16</fpage><pub-id pub-id-type="doi">10.1186/s13244-023-01541-3</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Owen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Stilgoe</surname><given-names>J</given-names> </name><name name-style="western"><surname>Macnaghten</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gorman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Fisher</surname><given-names>E</given-names> </name><name name-style="western"><surname>Guston</surname><given-names>D</given-names> </name></person-group><article-title>A framework for responsible innovation</article-title><source>Responsible Innovation: Managing the Responsible Emergence of Science and Innovation in Society</source><year>2013</year><publisher-name>Wiley</publisher-name><fpage>27</fpage><lpage>50</lpage><pub-id pub-id-type="doi">10.1002/9781118551424</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stilgoe</surname><given-names>J</given-names> </name><name name-style="western"><surname>Owen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Macnaghten</surname><given-names>P</given-names> </name></person-group><article-title>Developing a framework for responsible innovation</article-title><source>Res Policy</source><year>2013</year><month>11</month><volume>42</volume><issue>9</issue><fpage>1568</fpage><lpage>1580</lpage><pub-id pub-id-type="doi">10.1016/j.respol.2013.05.008</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vo</surname><given-names>V</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>G</given-names> </name><name name-style="western"><surname>Aquino</surname><given-names>YSJ</given-names> </name><name name-style="western"><surname>Carter</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Do</surname><given-names>QN</given-names> </name><name name-style="western"><surname>Woode</surname><given-names>ME</given-names> </name></person-group><article-title>Multi-stakeholder preferences for the use of artificial intelligence in healthcare: A systematic review and thematic analysis</article-title><source>Soc Sci Med</source><year>2023</year><month>12</month><volume>338</volume><fpage>116357</fpage><pub-id pub-id-type="doi">10.1016/j.socscimed.2023.116357</pub-id><pub-id pub-id-type="medline">37949020</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lekadir</surname><given-names>K</given-names> </name><name name-style="western"><surname>Frangi</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Porras</surname><given-names>AR</given-names> </name><etal/></person-group><article-title>FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare</article-title><source>BMJ</source><year>2025</year><month>02</month><day>5</day><volume>388</volume><fpage>e081554</fpage><pub-id pub-id-type="doi">10.1136/bmj-2024-081554</pub-id><pub-id pub-id-type="medline">39909534</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Ryan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kasun</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hogg</surname><given-names>J</given-names> </name><name name-style="western"><surname>Dunn</surname><given-names>LB</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>LW</given-names> </name></person-group><article-title>Physicians&#x2019; and machine learning researchers&#x2019; perspectives on ethical issues in the early development of clinical machine learning tools: qualitative interview study</article-title><source>JMIR AI</source><year>2023</year><month>10</month><day>30</day><volume>2</volume><fpage>e47449</fpage><pub-id pub-id-type="doi">10.2196/47449</pub-id><pub-id pub-id-type="medline">38875536</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Hees</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Schilder</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Keyser</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sbrizzi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kleinloog</surname><given-names>JPD</given-names> </name><name name-style="western"><surname>Boon</surname><given-names>WPC</given-names> </name></person-group><article-title>Exploring scenarios for implementing fast quantitative MRI</article-title><source>Eur J Radiol Open</source><year>2025</year><month>06</month><volume>14</volume><fpage>100658</fpage><pub-id pub-id-type="doi">10.1016/j.ejro.2025.100658</pub-id><pub-id pub-id-type="medline">40469716</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Strohm</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hehakaya</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ranschaert</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Boon</surname><given-names>WPC</given-names> </name><name name-style="western"><surname>Moors</surname><given-names>EHM</given-names> </name></person-group><article-title>Implementation of artificial intelligence (AI) applications in radiology: hindering and facilitating factors</article-title><source>Eur Radiol</source><year>2020</year><month>10</month><volume>30</volume><issue>10</issue><fpage>5525</fpage><lpage>5532</lpage><pub-id pub-id-type="doi">10.1007/s00330-020-06946-y</pub-id><pub-id pub-id-type="medline">32458173</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haque</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Islam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mikalef</surname><given-names>P</given-names> </name></person-group><article-title>Explainable Artificial Intelligence (XAI) from a user perspective: a synthesis of prior literature and problematizing avenues for future research</article-title><source>Technol Forecast Soc Change</source><year>2023</year><month>01</month><volume>186</volume><fpage>122120</fpage><pub-id pub-id-type="doi">10.1016/j.techfore.2022.122120</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ene</surname><given-names>IC</given-names> </name><name name-style="western"><surname>Arabi Belaghi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Koff</surname><given-names>D</given-names> </name><name name-style="western"><surname>Stein</surname><given-names>N</given-names> </name><name name-style="western"><surname>Santaguida</surname><given-names>P</given-names> </name></person-group><article-title>Stakeholders&#x2019; perspectives on the future of artificial intelligence in radiology: a scoping review</article-title><source>Eur Radiol</source><year>2022</year><month>03</month><volume>32</volume><issue>3</issue><fpage>1477</fpage><lpage>1495</lpage><pub-id pub-id-type="doi">10.1007/s00330-021-08214-z</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lukkien</surname><given-names>DRM</given-names> </name><name name-style="western"><surname>Ipakchian Askari</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stolwijk</surname><given-names>NE</given-names> </name><etal/></person-group><article-title>Making co-design more responsible: case study on the development of an AI-based decision support system in dementia care</article-title><source>JMIR Hum Factors</source><year>2024</year><month>07</month><day>31</day><volume>11</volume><fpage>e55961</fpage><pub-id pub-id-type="doi">10.2196/55961</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zondag</surname><given-names>AGM</given-names> </name><name name-style="western"><surname>Rozestraten</surname><given-names>R</given-names> </name><name name-style="western"><surname>Grimmelikhuijsen</surname><given-names>SG</given-names> </name><etal/></person-group><article-title>The effect of artificial intelligence on patient-physician trust: cross-sectional vignette study</article-title><source>J Med Internet Res</source><year>2024</year><month>05</month><day>28</day><volume>26</volume><fpage>e50853</fpage><pub-id pub-id-type="doi">10.2196/50853</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pacifico Silva</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Denis</surname><given-names>JL</given-names> </name></person-group><article-title>Introducing responsible innovation in health: a policy-oriented framework</article-title><source>Health Res Policy Syst</source><year>2018</year><month>09</month><day>10</day><volume>16</volume><issue>1</issue><fpage>90</fpage><pub-id pub-id-type="doi">10.1186/s12961-018-0362-5</pub-id><pub-id pub-id-type="medline">30200985</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Greenhalgh</surname><given-names>T</given-names> </name><name name-style="western"><surname>Thorne</surname><given-names>S</given-names> </name><name name-style="western"><surname>Malterud</surname><given-names>K</given-names> </name></person-group><article-title>Time to challenge the spurious hierarchy of systematic over narrative reviews?</article-title><source>Eur J Clin Investigation</source><year>2018</year><month>06</month><volume>48</volume><issue>6</issue><pub-id pub-id-type="doi">10.1111/eci.12931</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sukhera</surname><given-names>J</given-names> </name></person-group><article-title>Narrative reviews: flexible, rigorous, and practical</article-title><source>J Grad Med Educ</source><year>2022</year><month>08</month><day>1</day><volume>14</volume><issue>4</issue><fpage>414</fpage><lpage>417</lpage><pub-id pub-id-type="doi">10.4300/JGME-D-22-00480.1</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gundlack</surname><given-names>J</given-names> </name><name name-style="western"><surname>Negash</surname><given-names>S</given-names> </name><name name-style="western"><surname>Thiel</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Artificial intelligence in medical care&#x2014;patients&#x2019; perceptions on caregiving relationships and ethics: a qualitative study</article-title><source>Health Expect</source><year>2025</year><month>04</month><volume>28</volume><issue>2</issue><fpage>e70216</fpage><pub-id pub-id-type="doi">10.1111/hex.70216</pub-id><pub-id pub-id-type="medline">40094179</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baghdadi</surname><given-names>LR</given-names> </name><name name-style="western"><surname>Mobeirek</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Alhudaithi</surname><given-names>DR</given-names> </name><etal/></person-group><article-title>Patients&#x2019; attitudes toward the use of artificial intelligence as a diagnostic tool in radiology in Saudi Arabia: cross-sectional study</article-title><source>JMIR Hum Factors</source><year>2024</year><volume>11</volume><fpage>e53108</fpage><pub-id pub-id-type="doi">10.2196/53108</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ongena</surname><given-names>YP</given-names> </name><name name-style="western"><surname>Hommes</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kwee</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Yakar</surname><given-names>D</given-names> </name></person-group><article-title>A qualitative study to understand patient perspective on the use of artificial intelligence in radiology</article-title><source>J Am Coll Radiol</source><year>2019</year><month>10</month><volume>16</volume><issue>10</issue><fpage>1416</fpage><lpage>1419</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2018.12.043</pub-id><pub-id pub-id-type="medline">30878311</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajpurkar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>E</given-names> </name><name name-style="western"><surname>Banerjee</surname><given-names>O</given-names> </name><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>AI in health and medicine</article-title><source>Nat Med</source><year>2022</year><month>01</month><volume>28</volume><issue>1</issue><fpage>31</fpage><lpage>38</lpage><pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yu</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Beam</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Kohane</surname><given-names>IS</given-names> </name></person-group><article-title>Artificial intelligence in healthcare</article-title><source>Nat Biomed Eng</source><year>2018</year><month>10</month><volume>2</volume><issue>10</issue><fpage>719</fpage><lpage>731</lpage><pub-id pub-id-type="doi">10.1038/s41551-018-0305-z</pub-id><pub-id pub-id-type="medline">31015651</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Trivedi</surname><given-names>H</given-names> </name></person-group><article-title>The business of artificial intelligence in radiology has little to do with radiologists</article-title><source>J Am Coll Radiol</source><year>2022</year><month>04</month><volume>19</volume><issue>4</issue><fpage>564</fpage><lpage>566</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2022.01.006</pub-id><pub-id pub-id-type="medline">35176248</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mayo</surname><given-names>RC</given-names> </name><name name-style="western"><surname>Chang Sen</surname><given-names>LQ</given-names> </name><name name-style="western"><surname>Leung</surname><given-names>JWT</given-names> </name></person-group><article-title>Financing artificial intelligence in medical imaging: show me the money</article-title><source>J Am Coll Radiol</source><year>2020</year><month>01</month><volume>17</volume><issue>1 Pt B</issue><fpage>175</fpage><lpage>177</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2019.07.004</pub-id><pub-id pub-id-type="medline">31918877</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gille</surname><given-names>F</given-names> </name><name name-style="western"><surname>Jobin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ienca</surname><given-names>M</given-names> </name></person-group><article-title>What we talk about when we talk about trust: theory of trust for AI in healthcare</article-title><source>Intelligence-Based Medicine</source><year>2020</year><month>11</month><volume>1-2</volume><fpage>100001</fpage><pub-id pub-id-type="doi">10.1016/j.ibmed.2020.100001</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Steerling</surname><given-names>E</given-names> </name><name name-style="western"><surname>Siira</surname><given-names>E</given-names> </name><name name-style="western"><surname>Nilsen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>J</given-names> </name></person-group><article-title>Implementing AI in healthcare-the relevance of trust: a scoping review</article-title><source>Front Health Serv</source><year>2023</year><volume>3</volume><fpage>1211150</fpage><pub-id pub-id-type="doi">10.3389/frhs.2023.1211150</pub-id><pub-id pub-id-type="medline">37693234</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fernandez-Quilez</surname><given-names>A</given-names> </name></person-group><article-title>Deep learning in radiology: ethics of data and on the value of algorithm transparency, interpretability and explainability</article-title><source>AI Ethics</source><year>2023</year><month>02</month><volume>3</volume><issue>1</issue><fpage>257</fpage><lpage>265</lpage><pub-id pub-id-type="doi">10.1007/s43681-022-00161-9</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Daly</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Wiewiora</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hearn</surname><given-names>G</given-names> </name></person-group><article-title>Shifting attitudes and trust in AI: influences on organizational AI adoption</article-title><source>Technol Forecast Soc Change</source><year>2025</year><month>06</month><volume>215</volume><fpage>124108</fpage><pub-id pub-id-type="doi">10.1016/j.techfore.2025.124108</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tahtali</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Snijders</surname><given-names>CCP</given-names> </name><name name-style="western"><surname>Dirne</surname><given-names>C</given-names> </name><name name-style="western"><surname>Le Blanc</surname><given-names>PM</given-names> </name></person-group><article-title>Prioritizing trust in podiatrists&#x2019; preference for AI in supportive roles over diagnostic roles in health care: qualitative interview and focus group study</article-title><source>JMIR Hum Factors</source><year>2025</year><month>02</month><day>21</day><volume>12</volume><fpage>e59010</fpage><pub-id pub-id-type="doi">10.2196/59010</pub-id><pub-id pub-id-type="medline">39983118</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aldhafeeri</surname><given-names>FM</given-names> </name></person-group><article-title>Navigating the ethical landscape of artificial intelligence in radiography: a cross-sectional study of radiographers&#x2019; perspectives</article-title><source>BMC Med Ethics</source><year>2024</year><month>05</month><day>11</day><volume>25</volume><issue>1</issue><fpage>52</fpage><pub-id pub-id-type="doi">10.1186/s12910-024-01052-w</pub-id><pub-id pub-id-type="medline">38734602</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Linguraru</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Bakas</surname><given-names>S</given-names> </name><name name-style="western"><surname>Aboian</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Clinical, cultural, computational, and regulatory considerations to deploy AI in radiology: perspectives of RSNA and MICCAI experts</article-title><source>Radiol Artif Intell</source><year>2024</year><month>07</month><volume>6</volume><issue>4</issue><fpage>e240225</fpage><pub-id pub-id-type="doi">10.1148/ryai.240225</pub-id><pub-id pub-id-type="medline">38984986</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Williamson</surname><given-names>DFK</given-names> </name><etal/></person-group><article-title>Algorithmic fairness in artificial intelligence for medicine and healthcare</article-title><source>Nat Biomed Eng</source><year>2023</year><month>06</month><volume>7</volume><issue>6</issue><fpage>719</fpage><lpage>742</lpage><pub-id pub-id-type="doi">10.1038/s41551-023-01056-8</pub-id><pub-id pub-id-type="medline">37380750</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tejani</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>YS</given-names> </name><name name-style="western"><surname>Xi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Rayan</surname><given-names>JC</given-names> </name></person-group><article-title>Understanding and mitigating bias in imaging artificial intelligence</article-title><source>Radiographics</source><year>2024</year><month>05</month><volume>44</volume><issue>5</issue><fpage>e230067</fpage><pub-id pub-id-type="doi">10.1148/rg.230067</pub-id><pub-id pub-id-type="medline">38635456</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Din</surname><given-names>M</given-names> </name><name name-style="western"><surname>Daga</surname><given-names>K</given-names> </name><name name-style="western"><surname>Saoud</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Clinicians&#x2019; perspectives on the use of artificial intelligence to triage MRI brain scans</article-title><source>Eur J Radiol</source><year>2025</year><month>02</month><volume>183</volume><fpage>111921</fpage><pub-id pub-id-type="doi">10.1016/j.ejrad.2025.111921</pub-id><pub-id pub-id-type="medline">39805194</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Blasimme</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name><name name-style="western"><surname>Frey</surname><given-names>D</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><collab>Precise4Q consortium</collab></person-group><article-title>Explainability for artificial intelligence in healthcare: a multidisciplinary perspective</article-title><source>BMC Med Inform Decis Mak</source><year>2020</year><month>11</month><day>30</day><volume>20</volume><issue>1</issue><fpage>310</fpage><pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id><pub-id pub-id-type="medline">33256715</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Siala</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name></person-group><article-title>SHIFTing artificial intelligence to be responsible in healthcare: a systematic review</article-title><source>Soc Sci Med</source><year>2022</year><month>03</month><volume>296</volume><fpage>114782</fpage><pub-id pub-id-type="doi">10.1016/j.socscimed.2022.114782</pub-id><pub-id pub-id-type="medline">35152047</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martiniussen</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Larsen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Larsen</surname><given-names>ASF</given-names> </name><etal/></person-group><article-title>Norwegian radiologists&#x2019; expectations of artificial intelligence in mammographic screening&#x2014;a cross-sectional survey</article-title><source>Eur J Radiol</source><year>2023</year><month>10</month><volume>167</volume><fpage>111061</fpage><pub-id pub-id-type="doi">10.1016/j.ejrad.2023.111061</pub-id><pub-id pub-id-type="medline">37657381</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fehr</surname><given-names>J</given-names> </name><name name-style="western"><surname>Citro</surname><given-names>B</given-names> </name><name name-style="western"><surname>Malpani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lippert</surname><given-names>C</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name></person-group><article-title>A trustworthy AI reality-check: the lack of transparency of artificial intelligence products in healthcare</article-title><source>Front Digit Health</source><year>2024</year><month>02</month><day>20</day><volume>6</volume><pub-id pub-id-type="doi">10.3389/fdgth.2024.1267290</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martin</surname><given-names>C</given-names> </name><name name-style="western"><surname>DeStefano</surname><given-names>K</given-names> </name><name name-style="western"><surname>Haran</surname><given-names>H</given-names> </name><etal/></person-group><article-title>The ethical considerations including inclusion and biases, data protection, and proper implementation among AI in radiology and potential implications</article-title><source>Intelligence-Based Medicine</source><year>2022</year><volume>6</volume><fpage>100073</fpage><pub-id pub-id-type="doi">10.1016/j.ibmed.2022.100073</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abell</surname><given-names>B</given-names> </name><name name-style="western"><surname>Naicker</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rodwell</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Identifying barriers and facilitators to successful implementation of computerized clinical decision support systems in hospitals: a NASSS framework-informed scoping review</article-title><source>Implementation Sci</source><year>2023</year><month>07</month><day>26</day><volume>18</volume><issue>1</issue><fpage>32</fpage><pub-id pub-id-type="doi">10.1186/s13012-023-01287-y</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Char</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>NH</given-names> </name><name name-style="western"><surname>Magnus</surname><given-names>D</given-names> </name></person-group><article-title>Implementing machine learning in health care&#x2014;addressing ethical challenges</article-title><source>N Engl J Med</source><year>2018</year><month>03</month><day>15</day><volume>378</volume><issue>11</issue><fpage>981</fpage><lpage>983</lpage><pub-id pub-id-type="doi">10.1056/NEJMp1714229</pub-id><pub-id pub-id-type="medline">29539284</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Hees</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kleinloog</surname><given-names>JPD</given-names> </name><name name-style="western"><surname>Sbrizzi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Boon</surname><given-names>WPC</given-names> </name></person-group><article-title>Static Future Technologies, Dynamic Professionalism &#x2014; Co-creating Future Scenarios in Medical Imaging Practices</article-title><source>Postdigit Sci Educ</source><year>2024</year><month>03</month><volume>6</volume><issue>1</issue><fpage>135</fpage><lpage>153</lpage><pub-id pub-id-type="doi">10.1007/s42438-023-00444-2</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lombi</surname><given-names>L</given-names> </name><name name-style="western"><surname>Rossero</surname><given-names>E</given-names> </name></person-group><article-title>How artificial intelligence is reshaping the autonomy and boundary work of radiologists. A qualitative study</article-title><source>Sociol Health Illn</source><year>2024</year><month>02</month><volume>46</volume><issue>2</issue><fpage>200</fpage><lpage>218</lpage><pub-id pub-id-type="doi">10.1111/1467-9566.13702</pub-id><pub-id pub-id-type="medline">37573551</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheek</surname><given-names>D</given-names> </name><name name-style="western"><surname>Rezazade Mehrizi</surname><given-names>MohammadH</given-names> </name><name name-style="western"><surname>Ranschaert</surname><given-names>E</given-names> </name></person-group><article-title>Radiologists in the loop: the roles of radiologists in the development of AI applications</article-title><source>Eur Radiol</source><year>2021</year><month>10</month><volume>31</volume><issue>10</issue><fpage>7960</fpage><lpage>7968</lpage><pub-id pub-id-type="doi">10.1007/s00330-021-07879-w</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stogiannos</surname><given-names>N</given-names> </name><name name-style="western"><surname>Walsh</surname><given-names>G</given-names> </name><name name-style="western"><surname>Ohene-Botwe</surname><given-names>B</given-names> </name><etal/></person-group><article-title>R-AI-diographers: a European survey on perceived impact of AI on professional identity, careers, and radiographers&#x2019; roles</article-title><source>Insights Imaging</source><year>2025</year><month>02</month><day>17</day><volume>16</volume><issue>1</issue><fpage>43</fpage><pub-id pub-id-type="doi">10.1186/s13244-025-01918-6</pub-id><pub-id pub-id-type="medline">39962024</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Leeuwen</surname><given-names>KG</given-names> </name><name name-style="western"><surname>Schalekamp</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rutten</surname><given-names>M</given-names> </name><name name-style="western"><surname>van Ginneken</surname><given-names>B</given-names> </name><name name-style="western"><surname>de Rooij</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence in radiology: 100 commercially available products and their scientific evidence</article-title><source>Eur Radiol</source><year>2021</year><month>06</month><volume>31</volume><issue>6</issue><fpage>3797</fpage><lpage>3804</lpage><pub-id pub-id-type="doi">10.1007/s00330-021-07892-z</pub-id><pub-id pub-id-type="medline">33856519</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parikh</surname><given-names>RB</given-names> </name><name name-style="western"><surname>Helmchen</surname><given-names>LA</given-names> </name></person-group><article-title>Paying for artificial intelligence in medicine</article-title><source>NPJ Digit Med</source><year>2022</year><month>05</month><day>20</day><volume>5</volume><issue>1</issue><fpage>63</fpage><pub-id pub-id-type="doi">10.1038/s41746-022-00609-6</pub-id><pub-id pub-id-type="medline">35595986</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sengoku</surname><given-names>S</given-names> </name></person-group><article-title>Innovation process and industrial system of US Food and Drug Administration-approved software as a medical device: review and content analysis</article-title><source>J Med Internet Res</source><year>2023</year><month>11</month><day>24</day><volume>25</volume><fpage>e47505</fpage><pub-id pub-id-type="doi">10.2196/47505</pub-id><pub-id pub-id-type="medline">37999948</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Joshi</surname><given-names>H</given-names> </name></person-group><article-title>Enabling Next-Gen Healthcare: advanced interoperability and integration with AI, IoMT, and precision medicine</article-title><source>Int Adv Res J Sci Eng Technol</source><year>2024</year><month>08</month><day>20</day><volume>8</volume><issue>1</issue><fpage>89</fpage><lpage>95</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://iarjset.com/issues/volume-8-issue-1-january-2021">https://iarjset.com/issues/volume-8-issue-1-january-2021</ext-link></comment><pub-id pub-id-type="doi">10.17148/IARJSET.2021.8116</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Theriault-Lauzier</surname><given-names>P</given-names> </name><name name-style="western"><surname>Cobin</surname><given-names>D</given-names> </name><name name-style="western"><surname>Tastet</surname><given-names>O</given-names> </name><etal/></person-group><article-title>A responsible framework for applying artificial intelligence on medical images and signals at the point of care: the PACS-AI platform</article-title><source>Can J Cardiol</source><year>2024</year><month>10</month><volume>40</volume><issue>10</issue><fpage>1828</fpage><lpage>1840</lpage><pub-id pub-id-type="doi">10.1016/j.cjca.2024.05.025</pub-id><pub-id pub-id-type="medline">38885787</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name><name name-style="western"><surname>Daudelin</surname><given-names>G</given-names> </name><name name-style="western"><surname>Williams-Jones</surname><given-names>B</given-names> </name><name name-style="western"><surname>Denis</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Longo</surname><given-names>C</given-names> </name></person-group><article-title>How do business model and health technology design influence each other? Insights from a longitudinal case study of three academic spin-offs</article-title><source>Res Policy</source><year>2014</year><month>07</month><volume>43</volume><issue>6</issue><fpage>1025</fpage><lpage>1038</lpage><pub-id pub-id-type="doi">10.1016/j.respol.2014.02.001</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Javanmardi</surname><given-names>E</given-names> </name><name name-style="western"><surname>Maresova</surname><given-names>P</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>N</given-names> </name><name name-style="western"><surname>Mierzwiak</surname><given-names>R</given-names> </name></person-group><article-title>Exploring business models for managing uncertainty in healthcare, medical devices, and biotechnology industries</article-title><source>Heliyon</source><year>2024</year><month>02</month><day>29</day><volume>10</volume><issue>4</issue><fpage>e25962</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e25962</pub-id><pub-id pub-id-type="medline">38370194</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Leeuwen</surname><given-names>KG</given-names> </name><name name-style="western"><surname>de Rooij</surname><given-names>M</given-names> </name><name name-style="western"><surname>Schalekamp</surname><given-names>S</given-names> </name><name name-style="western"><surname>van Ginneken</surname><given-names>B</given-names> </name><name name-style="western"><surname>Rutten</surname><given-names>MJCM</given-names> </name></person-group><article-title>How does artificial intelligence in radiology improve efficiency and health outcomes?</article-title><source>Pediatr Radiol</source><year>2022</year><month>10</month><volume>52</volume><issue>11</issue><fpage>2087</fpage><lpage>2093</lpage><pub-id pub-id-type="doi">10.1007/s00247-021-05114-8</pub-id><pub-id pub-id-type="medline">34117522</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lobig</surname><given-names>F</given-names> </name><name name-style="western"><surname>Subramanian</surname><given-names>D</given-names> </name><name name-style="western"><surname>Blankenburg</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sharma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Variyar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Butler</surname><given-names>O</given-names> </name></person-group><article-title>To pay or not to pay for artificial intelligence applications in radiology</article-title><source>NPJ Digit Med</source><year>2023</year><month>06</month><day>23</day><volume>6</volume><issue>1</issue><fpage>117</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00861-4</pub-id><pub-id pub-id-type="medline">37353531</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alami</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name><name name-style="western"><surname>Denis</surname><given-names>JL</given-names> </name><etal/></person-group><article-title>Organizational readiness for artificial intelligence in health care: insights for decision-making and practice</article-title><source>J Health Organ Manag</source><year>2020</year><month>12</month><day>3</day><volume>35</volume><issue>1</issue><fpage>106</fpage><lpage>114</lpage><pub-id pub-id-type="doi">10.1108/JHOM-03-2020-0074</pub-id><pub-id pub-id-type="medline">33258359</pub-id></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="web"><article-title>Medical research involving human subjects act (WMO)</article-title><source>CCMO</source><access-date>2024-11-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://english.ccmo.nl/investigators/legal-framework-for-medical-scientific-research/laws/medical-research-involving-human-subjects-act-wmo">https://english.ccmo.nl/investigators/legal-framework-for-medical-scientific-research/laws/medical-research-involving-human-subjects-act-wmo</ext-link></comment></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="web"><article-title>Guide to the review procedure</article-title><source>CCMO</source><access-date>2024-11-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://english.ccmo.nl/investigators/guide-to-the-review-procedure">https://english.ccmo.nl/investigators/guide-to-the-review-procedure</ext-link></comment></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>N</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kamal</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Bodhi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>B</given-names> </name></person-group><article-title>Technological paradoxes and artificial intelligence implementation in healthcare. An application of paradox theory</article-title><source>Technol Forecast Soc Change</source><year>2024</year><month>01</month><volume>198</volume><fpage>122967</fpage><pub-id pub-id-type="doi">10.1016/j.techfore.2023.122967</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liew</surname><given-names>C</given-names> </name></person-group><article-title>The future of radiology augmented with artificial intelligence: a strategy for success</article-title><source>Eur J Radiol</source><year>2018</year><month>05</month><volume>102</volume><fpage>152</fpage><lpage>156</lpage><pub-id pub-id-type="doi">10.1016/j.ejrad.2018.03.019</pub-id><pub-id pub-id-type="medline">29685530</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ongena</surname><given-names>YP</given-names> </name><name name-style="western"><surname>Yakar</surname><given-names>D</given-names> </name><name name-style="western"><surname>Haan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kwee</surname><given-names>TC</given-names> </name></person-group><article-title>Artificial intelligence in screening mammography: a population survey of women&#x2019;s preferences</article-title><source>J Am Coll Radiol</source><year>2021</year><month>01</month><volume>18</volume><issue>1 Pt A</issue><fpage>79</fpage><lpage>86</lpage><pub-id pub-id-type="doi">10.1016/j.jacr.2020.09.042</pub-id><pub-id pub-id-type="medline">33058789</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lennartz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dratsch</surname><given-names>T</given-names> </name><name name-style="western"><surname>Zopfs</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Use and control of artificial intelligence in patients across the medical workflow: single-center questionnaire study of patient perspectives</article-title><source>J Med Internet Res</source><year>2021</year><month>02</month><day>17</day><volume>23</volume><issue>2</issue><fpage>e24221</fpage><pub-id pub-id-type="doi">10.2196/24221</pub-id><pub-id pub-id-type="medline">33595451</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Groenier</surname><given-names>M</given-names> </name><name name-style="western"><surname>Spijkerboer</surname><given-names>K</given-names> </name><name name-style="western"><surname>Venix</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Evaluation of the impact of technical physicians on improving individual patient care with technology</article-title><source>BMC Med Educ</source><year>2023</year><month>03</month><day>23</day><volume>23</volume><issue>1</issue><fpage>181</fpage><pub-id pub-id-type="doi">10.1186/s12909-023-04137-z</pub-id><pub-id pub-id-type="medline">36959581</pub-id></nlm-citation></ref><ref id="ref82"><label>82</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hadjiiski</surname><given-names>L</given-names> </name><name name-style="western"><surname>Cha</surname><given-names>K</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>HP</given-names> </name><etal/></person-group><article-title>AAPM task group report 273: recommendations on best practices for AI and machine learning for computer&#x2010;aided diagnosis in medical imaging</article-title><source>Med Phys</source><year>2023</year><month>02</month><volume>50</volume><issue>2</issue><fpage>e1</fpage><lpage>e24</lpage><pub-id pub-id-type="doi">10.1002/mp.16188</pub-id><pub-id pub-id-type="medline">36565447</pub-id></nlm-citation></ref><ref id="ref83"><label>83</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Geis</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Brady</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>CC</given-names> </name><etal/></person-group><article-title>Ethics of artificial intelligence in radiology: summary of the joint European and North American multisociety statement</article-title><source>Radiology</source><year>2019</year><month>11</month><volume>293</volume><issue>2</issue><fpage>436</fpage><lpage>440</lpage><pub-id pub-id-type="doi">10.1148/radiol.2019191586</pub-id><pub-id pub-id-type="medline">31573399</pub-id></nlm-citation></ref><ref id="ref84"><label>84</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mahmood</surname><given-names>U</given-names> </name><name name-style="western"><surname>Shukla-Dave</surname><given-names>A</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>HP</given-names> </name><etal/></person-group><article-title>Artificial intelligence in medicine: mitigating risks and maximizing benefits via quality assurance, quality control, and acceptance testing</article-title><source>BJR Artif Intell</source><year>2024</year><month>01</month><volume>1</volume><issue>1</issue><fpage>ubae003</fpage><pub-id pub-id-type="doi">10.1093/bjrai/ubae003</pub-id><pub-id pub-id-type="medline">38476957</pub-id></nlm-citation></ref><ref id="ref85"><label>85</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Erickson</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Korfiatis</surname><given-names>P</given-names> </name><name name-style="western"><surname>Akkus</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Kline</surname><given-names>TL</given-names> </name></person-group><article-title>Machine learning for medical imaging</article-title><source>Radiographics</source><year>2017</year><volume>37</volume><issue>2</issue><fpage>505</fpage><lpage>515</lpage><pub-id pub-id-type="doi">10.1148/rg.2017160130</pub-id><pub-id pub-id-type="medline">28212054</pub-id></nlm-citation></ref><ref id="ref86"><label>86</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bosmans</surname><given-names>JML</given-names> </name><name name-style="western"><surname>Weyler</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>De Schepper</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Parizel</surname><given-names>PM</given-names> </name></person-group><article-title>The radiology report as seen by radiologists and referring clinicians: results of the COVER and ROVER surveys</article-title><source>Radiology</source><year>2011</year><month>04</month><volume>259</volume><issue>1</issue><fpage>184</fpage><lpage>195</lpage><pub-id pub-id-type="doi">10.1148/radiol.10101045</pub-id><pub-id pub-id-type="medline">21224423</pub-id></nlm-citation></ref><ref id="ref87"><label>87</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><collab>European Society of Radiology (ESR)</collab></person-group><article-title>Good practice for radiological reporting. Guidelines from the European Society of Radiology (ESR)</article-title><source>Insights Imaging</source><year>2011</year><month>04</month><volume>2</volume><issue>2</issue><fpage>93</fpage><lpage>96</lpage><pub-id pub-id-type="doi">10.1007/s13244-011-0066-7</pub-id></nlm-citation></ref><ref id="ref88"><label>88</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>A comprehensive review on synergy of multi-modal data and AI technologies in medical diagnosis</article-title><source>Bioengineering (Basel)</source><year>2024</year><month>02</month><day>25</day><volume>11</volume><issue>3</issue><fpage>219</fpage><pub-id pub-id-type="doi">10.3390/bioengineering11030219</pub-id><pub-id pub-id-type="medline">38534493</pub-id></nlm-citation></ref><ref id="ref89"><label>89</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li&#x00E9;vin</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hother</surname><given-names>CE</given-names> </name><name name-style="western"><surname>Motzfeldt</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Winther</surname><given-names>O</given-names> </name></person-group><article-title>Can large language models reason about medical questions?</article-title><source>Patterns (N Y)</source><year>2024</year><month>03</month><day>8</day><volume>5</volume><issue>3</issue><fpage>100943</fpage><pub-id pub-id-type="doi">10.1016/j.patter.2024.100943</pub-id><pub-id pub-id-type="medline">38487804</pub-id></nlm-citation></ref><ref id="ref90"><label>90</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>CY</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>CF</given-names> </name><etal/></person-group><article-title>Towards a holistic framework for multimodal LLM in 3D brain CT radiology report generation</article-title><source>Nat Commun</source><year>2025</year><month>03</month><day>6</day><volume>16</volume><issue>1</issue><fpage>2258</fpage><pub-id pub-id-type="doi">10.1038/s41467-025-57426-0</pub-id><pub-id pub-id-type="medline">40050277</pub-id></nlm-citation></ref><ref id="ref91"><label>91</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Van Veen</surname><given-names>D</given-names> </name><name name-style="western"><surname>Van Uden</surname><given-names>C</given-names> </name><name name-style="western"><surname>Blankemeier</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Adapted large language models can outperform medical experts in clinical text summarization</article-title><source>Nat Med</source><year>2024</year><month>04</month><volume>30</volume><issue>4</issue><fpage>1134</fpage><lpage>1142</lpage><pub-id pub-id-type="doi">10.1038/s41591-024-02855-5</pub-id><pub-id pub-id-type="medline">38413730</pub-id></nlm-citation></ref><ref id="ref92"><label>92</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Leiner</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bennink</surname><given-names>E</given-names> </name><name name-style="western"><surname>Mol</surname><given-names>CP</given-names> </name><name name-style="western"><surname>Kuijf</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Veldhuis</surname><given-names>WB</given-names> </name></person-group><article-title>Bringing AI to the clinic: blueprint for a vendor-neutral AI deployment infrastructure</article-title><source>Insights Imaging</source><year>2021</year><month>02</month><day>2</day><volume>12</volume><issue>1</issue><fpage>11</fpage><pub-id pub-id-type="doi">10.1186/s13244-020-00931-1</pub-id><pub-id pub-id-type="medline">33528677</pub-id></nlm-citation></ref><ref id="ref93"><label>93</label><nlm-citation citation-type="web"><article-title>WHO outlines considerations for regulation of artificial intelligence for health</article-title><source>World Health Organization</source><year>2023</year><access-date>2025-11-29</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/news/item/19-10-2023-who-outlines-considerations-for-regulation-of-artificial-intelligence-for-health">https://www.who.int/news/item/19-10-2023-who-outlines-considerations-for-regulation-of-artificial-intelligence-for-health</ext-link></comment></nlm-citation></ref><ref id="ref94"><label>94</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kotter</surname><given-names>E</given-names> </name><name name-style="western"><surname>D&#x2019;Antonoli</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Cuocolo</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Guiding AI in radiology: ESR&#x2019;s recommendations for effective implementation of the European AI Act</article-title><source>Insights Imaging</source><year>2025</year><volume>16</volume><issue>1</issue><pub-id pub-id-type="doi">10.1186/s13244-025-01905-x</pub-id></nlm-citation></ref><ref id="ref95"><label>95</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Winkler</surname><given-names>EC</given-names> </name><name name-style="western"><surname>Jungkunz</surname><given-names>M</given-names> </name><name name-style="western"><surname>Thorogood</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lotz</surname><given-names>V</given-names> </name><name name-style="western"><surname>Schickhardt</surname><given-names>C</given-names> </name></person-group><article-title>Patient data for commercial companies? An ethical framework for sharing patients&#x2019; data with for-profit companies for research</article-title><source>J Med Ethics</source><year>2025</year><month>05</month><volume>51</volume><issue>5</issue><fpage>jme</fpage><lpage>2022</lpage><pub-id pub-id-type="doi">10.1136/jme-2022-108781</pub-id></nlm-citation></ref><ref id="ref96"><label>96</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Opara-Martins</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sahandi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tian</surname><given-names>F</given-names> </name></person-group><article-title>Critical analysis of vendor lock-in and its impact on cloud computing migration: a business perspective</article-title><source>J Cloud Comp</source><year>2016</year><month>12</month><volume>5</volume><issue>1</issue><fpage>4</fpage><pub-id pub-id-type="doi">10.1186/s13677-016-0054-z</pub-id></nlm-citation></ref><ref id="ref97"><label>97</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Botha</surname><given-names>NN</given-names> </name><name name-style="western"><surname>Segbedzi</surname><given-names>CE</given-names> </name><name name-style="western"><surname>Dumahasi</surname><given-names>VK</given-names> </name><etal/></person-group><article-title>Artificial intelligence in healthcare: a scoping review of perceived threats to patient rights and safety</article-title><source>Arch Public Health</source><year>2024</year><month>10</month><day>23</day><volume>82</volume><issue>1</issue><fpage>188</fpage><pub-id pub-id-type="doi">10.1186/s13690-024-01414-1</pub-id><pub-id pub-id-type="medline">39444019</pub-id></nlm-citation></ref><ref id="ref98"><label>98</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Bekkum</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zuiderveen Borgesius</surname><given-names>F</given-names> </name><name name-style="western"><surname>Heskes</surname><given-names>T</given-names> </name></person-group><article-title>AI, insurance, discrimination and unfair differentiation: an overview and research agenda</article-title><source>Law Innov Technol</source><year>2025</year><month>01</month><day>2</day><volume>17</volume><issue>1</issue><fpage>177</fpage><lpage>204</lpage><pub-id pub-id-type="doi">10.1080/17579961.2025.2469348</pub-id></nlm-citation></ref><ref id="ref99"><label>99</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Desislavov</surname><given-names>R</given-names> </name><name name-style="western"><surname>Mart&#x00ED;nez-Plumed</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hern&#x00E1;ndez-Orallo</surname><given-names>J</given-names> </name></person-group><article-title>Trends in AI inference energy consumption: Beyond the performance-vs-parameter laws of deep learning</article-title><source>Sustainable Computing: Informatics and Systems</source><year>2023</year><month>04</month><volume>38</volume><fpage>100857</fpage><pub-id pub-id-type="doi">10.1016/j.suscom.2023.100857</pub-id></nlm-citation></ref><ref id="ref100"><label>100</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gong</surname><given-names>E</given-names> </name><name name-style="western"><surname>Pauly</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Wintermark</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zaharchuk</surname><given-names>G</given-names> </name></person-group><article-title>Deep learning enables reduced gadolinium dose for contrast&#x2010;enhanced brain MRI</article-title><source>Magn Reson Imaging</source><year>2018</year><month>08</month><volume>48</volume><issue>2</issue><fpage>330</fpage><lpage>340</lpage><pub-id pub-id-type="doi">10.1002/jmri.25970</pub-id></nlm-citation></ref><ref id="ref101"><label>101</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Johnson</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Knoll</surname><given-names>F</given-names> </name><name name-style="western"><surname>Lui</surname><given-names>YW</given-names> </name></person-group><article-title>Artificial intelligence for MR image reconstruction: an overview for clinicians</article-title><source>Magn Reson Imaging</source><year>2021</year><month>04</month><volume>53</volume><issue>4</issue><fpage>1015</fpage><lpage>1028</lpage><pub-id pub-id-type="doi">10.1002/jmri.27078</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Worksheet canvas for exercise 2. The original worksheet during the workshop was in Dutch.</p><media xlink:href="jmir_v28i1e83407_app1.png" xlink:title="PNG File, 199 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Worksheet canvas for exercise 3. Mind that, again, the original worksheet during the workshop was in Dutch.</p><media xlink:href="jmir_v28i1e83407_app2.png" xlink:title="PNG File, 105 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Quotebook.</p><media xlink:href="jmir_v28i1e83407_app3.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material></app-group></back></article>