<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e79863</article-id><article-id pub-id-type="doi">10.2196/79863</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>The Ethics of Leveraging Routinely Collected Patient Data for AI Development: Mixed Methods Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Maris</surname><given-names>Menno T</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Klopotowska</surname><given-names>Joanna E</given-names></name><degrees>PhD, PharmD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Cornet</surname><given-names>Ronald</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Van den Hoven</surname><given-names>Mari&#x00EB;tte A</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Lieverse</surname><given-names>Joris E</given-names></name><degrees>PharmD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Fern&#x00E1;ndez-Llaneza</surname><given-names>Daniel</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bak</surname><given-names>Marieke A R</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><collab>LEAPfROG Consortium</collab><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="aff" rid="aff7">7</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Ethics, Law and Humanities, Amsterdam UMC Location University of Amsterdam</institution><addr-line>Meibergdreef 9</addr-line><addr-line>Amsterdam</addr-line><country>The Netherlands</country></aff><aff id="aff2"><institution>Amsterdam Public Health Institute</institution><addr-line>Amsterdam</addr-line><country>The Netherlands</country></aff><aff id="aff3"><institution>Department of Medical Informatics, Amsterdam UMC location University of Amsterdam</institution><addr-line>Amsterdam</addr-line><country>The Netherlands</country></aff><aff id="aff4"><institution>Department of Ethics, Law and Humanities, Amsterdam UMC location Vrije Universiteit Amsterdam</institution><addr-line>Amsterdam</addr-line><country>The Netherlands</country></aff><aff id="aff5"><institution>Institute of History and Ethics in Medicine, TUM School of Medicine, Technical University of Munich</institution><addr-line>Munich</addr-line><country>Germany</country></aff><aff id="aff6"><institution>See Acknowledgments</institution></aff><aff id="aff7">Abu-Hanna Ameen, Damoiseaux Birgit A, Boersma Cornelis, Dongelmans Dave A, Koning David H de, Harmelen Frank van, Holla Gerty, Marck Heiralde, Vagliano Iacopo, Pander Jan, Schans Jurjen van der, Cin&#x00E0; Giovanni, Jager Kitty J, Drop Leonora van, Dusseljee-Peute Linda, Hilbrands Luuk B, Kwa Marcel SG, Bak Marieke A R, Hoven Mariette van den, Kersloot Martijn G, Keizer Nicolette F de, Maarsingh Otto R, Blank Paul, Heingraaf Piet, Wildt Ren&#x00E9;e de, Herings Ron, Keizer Ron J, Boyd Ruben, Knijnenburg Sebastiaan L, Visser Sipke, Gremmen Stijn, Gelder Teun van, Visser Tjerk S Heijmens, Stel Vianda S</aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Stone</surname><given-names>Alicia</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Wu</surname><given-names>Chaochen</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Jaisankar</surname><given-names>Dharan Sankar</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Gupta</surname><given-names>Neelam</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Menno T Maris, MSc, Department of Ethics, Law and Humanities, Amsterdam UMC Location University of Amsterdam, Meibergdreef 9, Amsterdam, The Netherlands, 31 (020) 566 9111; <email>m.t.maris@amsterdamumc.nl</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>2</day><month>3</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e79863</elocation-id><history><date date-type="received"><day>30</day><month>06</month><year>2025</year></date><date date-type="rev-recd"><day>25</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>25</day><month>11</month><year>2025</year></date></history><copyright-statement>&#x00A9; Menno T Maris, Joanna E Klopotowska, Ronald Cornet, Mari&#x00EB;tte A Van den Hoven, Joris E Lieverse, Daniel Fern&#x00E1;ndez-Llaneza, Marieke A R Bak, LEAPfROG Consortium. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 2.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e79863"/><abstract><sec><title>Background</title><p>Electronic health record (EHR) data, a key form of routinely collected patient data, offer great potential for medical research and the development of artificial intelligence (AI) tools. However, because these data are primarily gathered for health care rather than research, it often lacks the quality needed for AI training, raising both methodological and ethical concerns. While previous studies have reviewed the ethical implications of both routinely collected patient data and AI separately, their intersection, where AI is applied to such data, remains largely unexplored.</p></sec><sec><title>Objective</title><p>This study aimed to examine the ethical challenges that arise at the intersection of EHR data and AI development and to derive practice-oriented recommendations using the Dutch LEAPfROG (Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods) project as a guiding case.</p></sec><sec sec-type="methods"><title>Methods</title><p>We used a mixed methods design combining a scoping literature review with a systematic search and 2 stakeholder workshops structured by the guidance ethics approach, reflecting a staged and iterative process aligned with the LEAPfROG project&#x2019;s development phases. The review identified 25 relevant publications from 2014 to 2024. The workshops, conducted with 17 and 13 participants respectively, included patients, clinicians, ethicists, data officers, and AI developers. Both workshops used dialogue to identify ethical values, impacts, and action points, focusing on a case study of drug-induced acute kidney injury.</p></sec><sec sec-type="results"><title>Results</title><p>The analysis highlighted four major themes: (1) data privacy, transparency, and consent, including challenges of meaningful consent and risks of reidentification; (2) public trust and regulatory challenges, such as fragmented oversight and inconsistent governance; (3) fair representation and model generalizability, where incomplete or biased data may perpetuate health inequities; and (4) responsible AI integration in clinical practice, including concerns about clinical tropism, administrative burden, and the risk of overreliance on AI outputs. Both literature and stakeholder perspectives underscore the risk of decontextualization when EHR data are reused and emphasize the importance of clearly defining the purpose of data reuse to ensure real-world applicability and foster trust.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Responsible AI development requires explicit attention to how EHR data are produced, interpreted, and governed in practice, recognizing that data quality and meaning are shaped by the clinical, institutional, and social contexts in which they originate. Technical solutions or top-down regulation alone are insufficient. Instead, stakeholder-led and context-sensitive approaches are needed to define the purposes, risks, and benefits of medical AI. Grounded in the realities of health care practice and in the perspectives of patients, clinicians, and data custodians, these approaches can strengthen transparency, fairness, and clinical relevance, ensuring that EHR data are used ethically and effectively to support equitable and trustworthy AI innovation.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>routinely collected health data</kwd><kwd>ethics</kwd><kwd>stakeholder participation</kwd><kwd>medical informatics</kwd><kwd>pharmacotherapy</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Both routinely collected patient data and artificial intelligence (AI) are widely regarded as promising resources for advancing medical research. AI methods are valued for their ability to manage and analyze large, diverse datasets, making them potentially well-suited to maximize the potential of routinely collected patient data. Studies leveraging such data offer several advantages over other study designs, including lower administrative costs, adaptability to evolving practice patterns, and access to larger sample sizes [<xref ref-type="bibr" rid="ref1">1</xref>]. One of the fields where leveraging routinely collected data with AI seems particularly promising is individualized pharmacotherapy, focusing on the &#x201C;rational use of drugs&#x201D; to tailor pharmacotherapy to patients&#x2019; clinical needs, thereby maximizing benefits and minimizing harm [<xref ref-type="bibr" rid="ref2">2</xref>]. Effective innovation in pharmacotherapy is currently especially challenging for patients with multimorbidity, characterized by the presence of 2 or more chronic diseases and a frequent need for multiple medications, a phenomenon known as polypharmacy [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Despite the increasing prevalence of multimorbidity, partly due to population aging and improvements in health care services [<xref ref-type="bibr" rid="ref5">5</xref>], clinical programs, guidelines, and research continue to primarily focus on managing individual diseases [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>By leveraging routinely collected patient data from electronic health records (EHRs) with AI, the Dutch LEAPfROG Project (Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods) seeks to enhance medication safety for patients with multimorbidity. A central focus of the project is the clinically urgent use case of drug-induced acute kidney injury (DAKI) in patients with chronic kidney disease (CKD). Ultimately, LEAPfROG aims to develop high-quality and fit-for-purpose EHR data, adhering to the principles of findability, accessibility, interoperability, and reusability [<xref ref-type="bibr" rid="ref7">7</xref>] and to develop robust analytic methods, providing real-world evidence on pharmacotherapy outcomes and contributing to learning health systems that offer personalized treatments, improve quality of life, and reduce health care costs (<xref ref-type="other" rid="box1">Textbox 1</xref>).</p><boxed-text id="box1"><title> LEAPfROG (Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods) project: improving drug safety in chronic kidney disease (CKD) through electronic health record (EHR) data and artificial intelligence (AI).</title><p><bold>Background</bold></p><p>The LEAPfROG project leverages routinely collected EHR data and structured domain knowledge to improve medication safety for patients with multimorbidity, specifically aiming to reduce the risk of drug-induced acute kidney injury (DAKI) in patients with CKD.</p><p>The LEAPfROG approach combines data from 3 EHR sources: detailed EHRs from Amsterdam UMC hospitals, EHRs from the Academic Network of General Practices Amsterdam (ANHA), and EHRs from general practices and outpatient pharmacies within the PHARMQ Database Network, covering general practices, outpatient pharmacies, and hospitals. Together, these datasets provide a rich source of insights into patient pharmacotherapy outcomes and drug exposures. To enhance these insights, LEAPfROG integrates domain knowledge from resources such as drug monographs, medication safety alerts, and scientific literature.</p><p>A key goal of the project is to develop transparent, explainable tools that can assist clinicians in making well-informed prescribing decisions. LEAPfROG's AI models combine machine learning with knowledge representation methods to detect DAKI, assess DAKI risk in patients with CKD, and clarify the underlying factors contributing to kidney safety issues. By doing so, the project aims to improve the detection of adverse drug events such as DAKI and enhance patient outcomes, particularly for patients with multimorbidity, such as those with CKD.</p><p><bold>Causal machine learning model for retrospective diagnosis of DAKI</bold></p><p>One of LEAPfROG's aims is the development of a causal machine learning model to assist physicians in determining whether acute kidney injury was caused by a drug (combination), a process known as &#x201C;retrospective diagnosis.&#x201D; This model aims not only to identify CKD patients who developed DAKI but also to explain cause-and-effect relationships underlying the DAKI diagnosis. Through this effort, LEAPfROG aims to create a large dataset of patients who did and did not develop DAKI. This dataset is critical for training future machine learning models capable of identifying patients at risk of DAKI ("prognostic models"), ultimately helping to prevent such adverse drug events in future CKD patients.</p></boxed-text><p>Because EHR data are collected during routine care for clinical purposes, they often have quality limitations when reused for research, lacking the consistency required by clinical research standards [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Integrating AI with EHR data may therefore exacerbate or further obscure existing data shortcomings, ultimately impacting their meaningful use in clinical practice [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>Previous studies have systematically reviewed ethical issues related to leveraging health data for medical research, including concerns related to the sharing and linkage of health data [<xref ref-type="bibr" rid="ref12">12</xref>], public attitudes towards the reuse of health data for research [<xref ref-type="bibr" rid="ref13">13</xref>], and broader ethical considerations of using EHRs for biomedical research [<xref ref-type="bibr" rid="ref14">14</xref>]. Regarding the ethical implications of AI in health care, previous studies have reviewed both epistemic ethical issues (ie, the type of knowledge AI generates) and normative ethical concerns [<xref ref-type="bibr" rid="ref15">15</xref>], along with empirical investigations into ethical considerations specific to medical AI [<xref ref-type="bibr" rid="ref16">16</xref>]. However, literature reviews on the ethical considerations inherent in the integration of EHR data and AI technologies are currently nonexistent.</p><p>In this study, we aim to explore the ethical implications of reusing EHR data for AI-driven research and innovation in health care and to provide actionable recommendations based on our use case, the LEAPfROG project. This article presents the results of a mixed methods approach that combines a scoping literature review with a systematic search and 2 innovative stakeholder engagement workshops grounded in the guidance ethics approach (GEA) [<xref ref-type="bibr" rid="ref17">17</xref>]. The scoping review informed the design of the stakeholder workshops, which in turn generated empirical insights that complemented and contextualized the review&#x2019;s findings.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>The study followed an iterative, participatory design that combined a scoping review and stakeholder engagement workshops to explore the ethical implications of reusing EHR data for AI-driven health research. The scoping review was conducted in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) extension for scoping reviews [<xref ref-type="bibr" rid="ref18">18</xref>]. No protocol for the scoping review component of this study was registered. The stakeholder workshops were structured using the GEA for evaluating emerging technologies [<xref ref-type="bibr" rid="ref17">17</xref>]. The overall design was staged and sequential rather than triangulated, with each step reflecting a different stage of the LEAPfROG project. This iterative process reflected the real-world complexity of AI ethics, allowing themes to unfold progressively, from broad reflection on EHR reuse to more specific considerations of AI development and implementation.</p><p>The workshops were integrated into the staged structure of the project, with their timing and number determined in advance and aligned with key project phases. Rather than aiming for thematic saturation, the workshops served as structured points of ethical reflection tied to the specific technology and context [<xref ref-type="bibr" rid="ref17">17</xref>]. The scoping review identified ethical themes in the literature, providing a broad understanding of existing ethical considerations, while the workshops enabled further exploration and contextualization within the real-world context of the LEAPfROG project.</p><p>In the first workshop, participants discussed the ethical implications of reusing EHR data in the Dutch context, focusing on DAKI in patients with CKD. As these discussions aligned with initial literature findings, the subsequent scoping review shifted focus from the ethics of EHR reuse to ethical considerations at the intersection of EHR data and AI. In the second workshop, preliminary findings from the literature were presented to validate, contextualize, and further refine the identified themes. Furthermore, this workshop centered on a concrete AI-based tool within the LEAPfROG project: a causal machine learning model for the retrospective diagnosis of DAKI in patients with CKD.</p></sec><sec id="s2-2"><title>Scoping Review</title><sec id="s2-2-1"><title>Search Strategy</title><p>MTM, JEK, RC, and MB jointly developed the search strategy to identify articles that thoroughly examine or discuss the ethical considerations associated with leveraging EHR data for advancing medicine using AI. A comprehensive search was conducted by MTM in PubMed, CINAHL (via EBSCO), and Web of Science, restricted to results between March 2014 and March 2024. The search was structured around 4 key concepts: &#x201C;Artificial Intelligence,&#x201D; &#x201C;Ethics,&#x201D; &#x201C;Research,&#x201D; and &#x201C;Routinely collected patient data&#x201D; (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Additionally, the most relevant MeSH (Medical Subject Headings) terms, synonyms, and related terms were identified, leading to the development of final search strings for each database. Results from the 3 databases were merged in EndNote (version 21; Clarivate Analytics), and duplicates were removed.</p></sec><sec id="s2-2-2"><title>Selection Process and Analysis</title><p>One researcher (MTM) screened titles and abstracts using Rayyan [<xref ref-type="bibr" rid="ref19">19</xref>]. Eligible studies were written in English and addressed ethical considerations at the intersection of AI, including machine learning and natural language processing, that analyze routinely collected patient data. Following Benchimol et al [<xref ref-type="bibr" rid="ref20">20</xref>], such data were defined as health data collected without specific a priori research questions, including observational data from EHRs and disease registries. Particular emphasis during selection was placed on studies using EHRs or data comparable in origin, as these form the primary data source within the LEAPfROG project and are central to the ethical questions explored in this study. Because terminology varies across publications, the search strategy was intentionally broad to capture studies referring to similar data under related terms (eg, clinical or electronic medical records). Studies that also discussed other data sources (eg, clinical trial or wearable data) were included, provided the focus was sufficiently on the reuse of EHRs. Any uncertainties during full-text assessment were resolved by consensus between 2 researchers (MTM and MB). Reference lists were screened for additional relevant studies.</p><p>We uploaded full-text articles to MaxQDA and performed open coding to explore the ethical implications of using routinely collected EHR data for AI-driven research and innovation. Data extraction and coding were treated as the data charting process for the scoping review. Using a subset of 10 articles, one researcher (MTM) developed a coding scheme based on overlapping ethical considerations and emerging themes, such as &#x201C;privacy&#x201D; and &#x201C;doctor-patient relationship.&#x201D; This framework was discussed with the second researcher (MB) throughout the process and guided the coding of the remaining articles by MTM.</p><p>Insights from the scoping review informed both the design and thematic focus of the second stakeholder workshop. The resulting data were analyzed inductively to explore how participants interpreted and prioritized ethical themes. This analysis refined and contextualized the review&#x2019;s findings, emphasizing those most pertinent to real-world practice within the LEAPfROG project and completing the staged, iterative process.</p></sec></sec><sec id="s2-3"><title>Guidance Ethics Workshops</title><sec id="s2-3-1"><title>Workshop Design</title><p>We organized 2 stakeholder engagement workshops using the GEA (translated from the Dutch &#x201C;Aanpak Begeleidingsethiek&#x201D;), a relatively new method for addressing the ethical aspects of technological innovation [<xref ref-type="bibr" rid="ref17">17</xref>]. The workshops were conducted in Dutch. The GEA uses dialogue to map the most pressing moral considerations in technology implementation, as identified by stakeholders [<xref ref-type="bibr" rid="ref21">21</xref>]. The GEA primarily serves to guide the development of a technology by prioritizing ethical considerations relevant to the specific case, while striving to align with stakeholder values.</p><p>Both workshops, which lasted 3 hours each, followed the GEA approach and comprised three separate stages: (1) providing a clear description of the technology and its context, (2) identifying stakeholders, potential impacts, and underlying values, and (3) generating a list of options for responsible innovation and implementation from 3 distinct perspectives: technology, environment, and user (<xref ref-type="other" rid="box2">Textbox 2</xref>). The first workshop was prepared by JEK, RC, MTM, and MB and moderated by external facilitators from Stichting ECP. The second was prepared by JEK, MTM, MB, MvdH, and DF-L and moderated by JEK, MTM, MvdH, JEL, and MB.</p><boxed-text id="box2"><title> Stakeholder workshops design &#x201C;guidance ethics approach.&#x201D;</title><p>Stage 1: Technology in context</p><list list-type="roman-lower"><list-item><p>Introduction by project leaders: Description and use context.</p></list-item><list-item><p>Stakeholder inquiry: Opportunity for participants to gain a deeper understanding of and explore the details of the intended technology.</p></list-item></list><p>Stage 2: Dialogue</p><list list-type="roman-lower"><list-item><p>Actors: Individuals or groups involved in or affected by the proposed technology or solution.</p></list-item><list-item><p>Effects: Potential benefits and harms arising from the development and implementation of the technology or solution.</p></list-item><list-item><p>Values: Examination of the most pertinent ethical considerations related to the effects from step II.</p></list-item></list><p>Stage 3: Action points</p><p>Action points to promote responsible innovation at 3 levels of abstraction:</p><list list-type="roman-lower"><list-item><p>Technology<italic>:</italic> How should it be designed?</p></list-item><list-item><p>Environment: Includes both tangible elements (eg, physical infrastructure) and systematic aspects (eg, policy).</p></list-item><list-item><p>User: What can users do?</p></list-item></list></boxed-text></sec><sec id="s2-3-2"><title>Participant Recruitment</title><p>Recruitment took place within the Dutch health care and research context through 2 structured routes. First, invitations were distributed via the LEAPfROG consortium network, including affiliated partners and organizations, following the project&#x2019;s progress. Second, patient participants were recruited through the Dutch Kidney Patients Association (Nederlandse Vereniging voor Nierpati&#x00EB;nten [NVN]). When invitees were unavailable, comparable participants were identified outside these routes to maintain diversity across stakeholder groups. All invited participants received a written background document explaining the LEAPfROG project, the purpose of the workshops, and their expected contribution. No prior preparation was required for participation. Patients were invited as experiential experts rather than professionals and were offered an introductory meeting to ask questions and familiarize themselves with the process. All participants provided informed consent before the workshops took place.</p><p>Before the workshops, participants were notified of the intention to publish a report on the session results and were given the opportunity to raise objections. Prior to publication, they were able to request changes and provide consent for the final reports, which are available in Dutch as <xref ref-type="supplementary-material" rid="app2">Multimedia Appendices 2</xref> and <xref ref-type="supplementary-material" rid="app3">3</xref>. The first workshop, led by external moderators from Stichting ECP, was not audio-recorded. To further strengthen the accuracy and completeness of the documentation, the second workshop was audio recorded, with participants informed in advance and given the opportunity to object.</p></sec><sec id="s2-3-3"><title>Documentation and Validation</title><p>In both workshops, discussions were documented on flip charts or a shared screen, allowing participants to review and refine notes in real time. Notes from moderators and researchers were later compared and synthesized into validated reports. The GEA emphasizes collective ethical reflection, in which differing perspectives are explicitly explored rather than reconciled into consensus. In the first workshop, notes were taken independently by 2 external moderators from Stichting ECP, alongside 2 researchers (JEK and MTM). The moderators&#x2019; notes from the first session formed the basis for the initial draft of the first workshop report. Notes from JEK and MTM were compared, reconciled, and extensively discussed within the research team to ensure completeness and accuracy. This iterative process led to the final validated report. The first draft of the second workshop report was prepared by MTM and subsequently finalized through joint review by MTM, JEK, and JEL.</p></sec><sec id="s2-3-4"><title>Workshop Data Analysis</title><p>The workshop discussions, facilitator notes, and summary reports were thematically analyzed using the GEA framework, which emphasizes dialogue and collective reflection among diverse stakeholders. The analysis focused on how participants articulated ethical and practical considerations across the 3 reflective phases of the GEA: mapping the technology in context, ethical reflection, and translation to action. MTM led the review and synthesis of the workshop materials, identifying recurring themes and insights that emerged through these dialogues. The resulting reports were designed as standalone outputs for dissemination within the LEAPfROG consortium. For the purpose of this paper, the analysis focused on themes most relevant to the research gap at the intersection of EHR data and AI development. The coding framework developed from the scoping review provided the analytical foundation, guiding the thematic structure while allowing refinement and contextualization based on stakeholder perspectives. Themes were subsequently reviewed and refined with the coauthors during manuscript preparation to ensure consistency and alignment with the broader study.</p></sec></sec><sec id="s2-4"><title>Ethical Considerations</title><p>The overall LEAPfROG project protocol was reviewed by the Medical Ethics Committee of Amsterdam UMC (the Netherlands). The committee granted a waiver of formal approval (W22_340 #22.412) because the LEAPfROG project, including all substudies, does not fall within the scope of the Dutch Medical Research Involving Human Subjects Act (WMO). All workshop participants were informed about the study objectives and procedures and provided informed consent to participate. Participation was voluntary and unpaid. Travel expenses were reimbursed where applicable. Participants were informed that workshop reports would be made publicly available, and any attribution of names or roles in those reports occurred with participants&#x2019; knowledge and consent.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Included Literature</title><p>The results of the literature screening are presented in a PRISMA flow diagram (<xref ref-type="fig" rid="figure1">Figure 1</xref>). In total, 25 articles met the inclusion criteria. Most were commentary-style publications (eg, perspectives and debates; n=14), alongside review-type articles (n=8) and a smaller number of qualitative studies (n=3). In terms of medical focus, the largest group of articles explored themes within general health care and clinical informatics (n=9). Other represented areas included medical ethics and health equity (n=5), primary care (n=3), public health and health disparities (n=2), geriatrics (n=1), psychiatry (n=1), nephrology (n=1), rheumatology (n=1), pediatrics (n=1), and orthopedics (n=1).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analysis) flow diagram of study selection. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e79863_fig01.png"/></fig><p>In terms of geographical distribution, the majority of articles were published by authors based in North America (n=14), followed by Europe (n=8), Asia (n=2), and Australia (n=1). A detailed overview of article characteristics is available in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref> [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref46">46</xref>].</p></sec><sec id="s3-2"><title>Workshop Participants</title><p>Two stakeholder workshops were held, with 17 participants in the first and 13 in the second, with some attending both. Participants included medical ethicists (n=3), medical informatics experts and AI engineers (n=9), patients associated with the Kidney Patients Association Netherlands (NVN) (n=2), patient representatives from the NVN (n=2), a nephrologist, a chief medical information officer (n=1), a chief scientific information officer and endocrinologist (n=1), a general practitioner (n=1), pharmacists (n=2), a digital transition manager, data protection officers (n=2), and a digital ethics manager.</p></sec><sec id="s3-3"><title>Key Themes From the Literature and Integration of Stakeholder Workshops</title><p>This section is structured around 4 key themes: data privacy, transparency, and consent; public trust and regulatory challenges; fair representation and model generalizability; and responsible AI integration in clinical practice. This structure reflects the staged design of the LEAPfROG project and the cyclical nature of AI development in health care, which typically extends across data governance and quality, model generalizability, and clinical implementation, with each dimension influencing and being influenced by the others across policy, data, AI, and practice. The themes raised during the second stakeholder session aligned with the coding scheme developed from the scoping review, reinforcing the relevance of the literature to stakeholder perspectives. The workshops played a crucial role in grounding the analysis in real-world experience, ensuring its practical relevance within the LEAPfROG project.</p><p>To illustrate how thematic emphasis shifted across the different study components, <xref ref-type="table" rid="table1">Table 1</xref> presents the staged development of these themes across the scoping review and stakeholder workshops.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Shifts in thematic emphasis across study components of the staged LEAPfROG (Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods) design.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Key themes / stage</td><td align="left" valign="bottom">Workshop 1 (EHR<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> data reuse)</td><td align="left" valign="bottom">Scoping review (EHR&#x2013;AI<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> Integration)</td><td align="left" valign="bottom">Workshop 2 (AI development and implementation)</td></tr></thead><tbody><tr><td align="left" valign="top">1. Data privacy, transparency, and consent</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Patient autonomy and pseudonymization challenges</p></list-item><list-item><p>Clear consent for secondary use of EHR data</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Privacy risks amplified by data linkage and free-text inputs; persistent reidentification vulnerabilities</p></list-item><list-item><p>Tensions between ethically robust consent and notions of a civic duty to share EHR data</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Fostering trust through transparency across AI model development, explainability, and outputs</p></list-item></list></td></tr><tr><td align="left" valign="top">2. Public trust and regulatory challenges</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Institutional fragmentation and suboptimal national EHR infrastructure</p></list-item><list-item><p>Inconsistent policy and legal interpretations among stakeholders (including data custodians and researchers)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Siloed governance and fragmented oversight</p></list-item><list-item><p>Regulatory uncertainty limiting data sharing and accountability</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Stakeholder-led, value-driven governance</p></list-item><list-item><p>Shared vision on responsible EHR data reuse and multistakeholder collaboration</p></list-item></list></td></tr><tr><td align="left" valign="top">3. Fair representation and model generalizability</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Data heterogeneity and variability in EHR documentation practices</p></list-item><list-item><p>Digital and social inequities shaping clinicians&#x2019; use of EHRs and patients&#x2019; access to their health data</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Missing or biased data reinforcing inequities</p></list-item><list-item><p>Lack of demographic diversity and transparency in reporting</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Variation in clinical practices (eg, workflow diversity) shaping EHR data patterns and subsequent model outputs and interpretation</p></list-item><list-item><p>Need to recontextualize EHR-derived data across hospital departments and care settings</p></list-item></list></td></tr><tr><td align="left" valign="top">4. AI integration in clinical practice</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Documentation burden and workflow diversity in CKD<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>/DAKI<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup> care</p></list-item><list-item><p>Impact on EHR data reliability and clinician workload</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Model opacity, &#x201C;clinical tropism&#x201D;</p></list-item><list-item><p>Risk of overreliance and clinician deskilling</p></list-item><list-item><p>Ethical need for interpretability and accountability</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Timing and interpretability of AI model outputs</p></list-item><list-item><p>Risk of overly general or narrow outputs with limited clinical relevance</p></list-item><list-item><p>Model could enhance clinicians&#x2019; understanding of drug side effects and support shared decision-making</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>EHR: electronic health record.</p></fn><fn id="table1fn2"><p><sup>b</sup>AI: artificial intelligence.</p></fn><fn id="table1fn3"><p><sup>c</sup>CKD: chronic kidney disease.</p></fn><fn id="table1fn4"><p><sup>d</sup>DAKI: drug-induced acute kidney injury.</p></fn></table-wrap-foot></table-wrap><p>We connected insights from the literature and stakeholder discussions to situate the key themes within the Dutch health care landscape and the specific context of the LEAPfROG project. Each of the following sections elaborates on one of the 4 key themes and concludes with a reflection on how that theme was grounded and contextualized through stakeholder discussions.</p></sec><sec id="s3-4"><title>Data Privacy, Transparency, and Consent</title><sec id="s3-4-1"><title>Privacy Risks and Security Challenges</title><p>The reviewed literature highlights growing privacy concerns around the reuse of EHR data for AI research [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref34">34</xref>]. EHR data originate from everyday clinical care and often contain sensitive personal and medical information recorded without explicit patient consent, raising concerns about misuse and breaches of confidentiality [<xref ref-type="bibr" rid="ref30">30</xref>]. In contrast, data from controlled research settings such as randomized trials follow predefined consent procedures and strict regulatory oversight.</p><p>Safeguarding privacy in this context requires clear data access agreements, secure systems for storage and transmission, and transparent mechanisms for patient control and consent, in line with legal and ethical standards. The risks described in the literature include security breaches, such as hacking or malware attacks, that could lead to data exploitation, intentional discrimination, or identity theft [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Additionally, combining multiple data sources, such as EHRs and disease registries, amplifies privacy risks through greater exposure to breaches, inconsistencies, and loss of data integrity [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Advances in AI have further intensified the risk of misuse and harm by enabling sophisticated reidentification, tracking, and profiling techniques that threaten patient confidentiality, including methods capable of inferring sensitive information from nonsensitive data. These developments heighten the risk of misuse and harm [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Even privacy-preserving approaches remain vulnerable to inference attacks that can reveal attributes of training data, posing ongoing risks to confidentiality [<xref ref-type="bibr" rid="ref35">35</xref>].</p><p>Moreover, free text fields in EHRs often include both medical and personal information. Persisting difficulties in deidentifying this information increase the potential for reidentification and unintended disclosure when used for research purposes, possibly even beyond the individual patient [<xref ref-type="bibr" rid="ref27">27</xref>]. Ford et al [<xref ref-type="bibr" rid="ref27">27</xref>] discuss the challenge of balancing automated and human-led deidentification of free text, noting that participants in citizen jury discussions expressed concerns that automated methods might leave identifiers, while human-led processes, on the other hand, could introduce bias or error due to inconsistency or workload. Others emphasize that the use of AI methods to extract information from unstructured EHR text often lacks standardized guidelines and the level of scrutiny applied to other medical interventions, further exacerbating privacy risks [<xref ref-type="bibr" rid="ref33">33</xref>].</p></sec><sec id="s3-4-2"><title>Informed Consent: Is There a Civic Duty to Share Data for AI Health Research?</title><p>Most included studies identify challenges related to the need for explicit patient consent in reusing EHR data for AI-driven research [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Patients are often unaware of how their data may be used over time, raising questions about the validity of prior or implicit consent given for clinical care or research. Evolving AI models may introduce new applications that extend beyond the original scope of consent, repurposing data in ways that patients could not have anticipated [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. These concerns intensify when multiple data sources are linked, making it difficult to ensure meaningful consent at each stage of data integration [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Without clear processes for obtaining and maintaining consent, AI-driven research risks undermining patient autonomy and trust in health data governance, compromising its moral legitimacy.</p><p>Among the included studies, M&#x00FC;ller critically examines the view that citizens have a moral duty to share their health data for medical AI development. He argues against this by outlining key contrasts between common assumptions underlying such a duty and the ethical realities of data sharing [<xref ref-type="bibr" rid="ref28">28</xref>]. First, the &#x201C;rule to rescue,&#x201D; which asserts a duty to help others in immediate danger, does not apply here, as data sharing lacks the direct, person-to-person moral obligation implied in such situations. Second, the &#x201C;low risks, high benefits&#x201D; argument claims that societal gains justify data sharing. He argues that this view overlooks how risks such as privacy breaches and erosion of trust in health care are unequally distributed and more immediate than potential benefits. Third, the &#x201C;property rights argument&#x201D; holds that the involvement of public institutions in generating health data justifies limiting individual ownership. M&#x00FC;ller disputes this, emphasizing that health data are inherently personal and sensitive, and that treating them as a public commodity undermines autonomy and confidentiality.</p><p>Instead of a universal moral duty, M&#x00FC;ller proposes a civic responsibility approach grounded in transparency, value alignment, and voluntary participation, emphasizing consent mechanisms that allow individuals to manage how their data are shared. In line with this perspective, other authors argue that even when data reuse does not legally require consent, researchers remain ethically obligated to communicate clearly how data are used, how AI models operate, and how decisions are reached, as legal compliance does not substitute for transparency or patient and public engagement [<xref ref-type="bibr" rid="ref33">33</xref>].</p></sec><sec id="s3-4-3"><title>Stakeholder Perspectives: Data Privacy, Transparency, and Consent</title><p>In line with the literature, the stakeholder group discussed potential risks to patient privacy in the reuse of EHR data for research. They identified data exchange and EHR systems linkages as particularly vulnerable points, along with the risk of unauthorized data use by third parties such as health care insurers and private companies. To address these concerns, stakeholders proposed several measures, including the use of virtual research data environments that strictly comply with data protection regulations, ensuring that research data are pseudonymized or anonymized to reduce the risk of misuse or breaches, and enabling data providers to audit researchers&#x2019; actions on the data.</p><p>Other suggestions involved giving patients greater control over how their EHR data is reused, including the ability to decide whether, and which parts of, their records should be used for research. While this approach could enhance patient autonomy, concerns were raised about the risk of overburdening patients with repeated consent requests, especially when the research offers no clear or direct benefit at the individual level and does not immediately reflect patient priorities.</p></sec></sec><sec id="s3-5"><title>Public Trust and Regulatory Challenges</title><sec id="s3-5-1"><title>Trust and transparency</title><p>The majority of the studies highlight trust as crucial in the relationship between various publics and those who collect, control, access, and use EHR data for AI research [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]. Many studies identify low public awareness as a key factor undermining trust in these practices [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Patients are often unaware that their data are shared across organizations within integrated systems or for what specific purposes [<xref ref-type="bibr" rid="ref31">31</xref>]. Low public trust in institutions handling health data may reduce participation in AI research, ultimately introducing self-selection bias and weakening model quality and representativeness [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. Given these concerns, many studies underscore the ethical importance of transparency, not only as a means to foster trustworthiness but also for ensuring legitimate and accountable data governance [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>].</p></sec><sec id="s3-5-2"><title>Regulatory fragmentation and legal uncertainty</title><p>Several studies identify regulatory and legal uncertainties as major obstacles to the adoption of health AI tools [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. A fragmented legal landscape, marked by inconsistencies across institutions and jurisdictions, creates compliance and accountability challenges [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. In Germany, for example, data-sharing initiatives are complicated by regulatory fragmentation and varying requirements across sectors [<xref ref-type="bibr" rid="ref29">29</xref>]. Siloed regulation, in which separate and uncoordinated frameworks govern different types of health data and medical technologies, fragments oversight, leading to regulatory gaps (areas not clearly covered by any authority), overlaps (conflicting or duplicated requirements), and broader compliance hurdles that hinder implementation [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Regulatory uncertainty often deters data custodians&#x2014;responsible for data storage, integrity, and authorized access&#x2014;from sharing EHR data beyond clinical settings. This reflects a broader tension between oversight predominantly aimed at safeguarding privacy and fostering the flexibility required for AI innovation. For instance, strict interpretations of data protection laws, such as the General Data Protection Regulation, can limit researchers&#x2019; access to patient data, influencing the trajectory of developments in the field [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref38">38</xref>].</p></sec><sec id="s3-5-3"><title>Stakeholder Workshops: Public Trust and Regulatory Challenges</title><p>In the Netherlands, EHR data exchange remains limited due to the fragmented health care system, where institutions rely on diverse and often incompatible standards. For example, hospitals may use different clinical coding systems or inconsistent data formats for diagnoses, making integration across systems technically and semantically challenging. This challenge is further compounded by a decentralized governance framework that grants considerable institutional autonomy, resulting in varied requirements and interpretations across institutional policies. In line with the literature, stakeholders noted that unclear and inconsistent interpretations of laws and regulations create barriers to collaboration within a complex network of actors, including hospitals, health insurers, data vendors, EHR system providers, and research funders, each imposing specific conditions for data availability and reuse.</p><p>A key suggestion that emerged during the workshops was to develop a shared vision within collaborative networks on the &#x201C;why&#x201D; of reusing EHR data for research. This could be achieved, for example, through consensus workshops involving patients, health care providers, researchers, data managers, and data protection officers, with attention to values such as trustworthiness, solidarity, quality of care, and privacy. Participants also emphasized the importance of establishing shared agreements within these networks on which data should be recorded in the EHR, using predefined terminologies, and defining how and where these should be implemented.</p></sec></sec><sec id="s3-6"><title>Fair Representation and Model Generalizability</title><sec id="s3-6-1"><title>Health Inequities Reflected in the Data</title><p>Most included studies warn that missing or incomplete patient data can compromise model performance and perpetuate existing health care inequities, disproportionately affecting marginalized groups [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. These inequities often stem from social determinants of health, such as financial barriers, limited digital and health literacy, and structural marginalization. Failing to adequately consider these factors can result in flawed models that underestimate disease burden for certain groups and undermine the equitable distribution of benefits [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>-<xref ref-type="bibr" rid="ref44">44</xref>]. Several studies further highlight the need to account for the historical context of research on race and ethnicity, ancestry, and sex and gender minorities, particularly in the United States, where past exploitation contributes to ongoing distrust in health care systems [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p></sec><sec id="s3-6-2"><title>Institutional and Structural Variability in Data and Its Impact on AI Data Quality</title><p>Several studies highlight how institution-specific practices, norms, and patient populations shape the composition of EHR data [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. Factors such as patient proximity to clinics, visit frequency, insurance coverage, and differences in clinical protocols and diagnostic resources influence data representation [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. For example, some institutions may request diagnostic tests earlier [<xref ref-type="bibr" rid="ref30">30</xref>] or use activity-based financing, leading to an overrepresentation of codes tied to higher reimbursement, reflecting billing priorities rather than actual clinical complexity [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Sampling bias can occur when chronically ill patients, who interact more frequently with the health care system, are overrepresented, skewing patterns that do not reflect the broader population. Within institutions, departmental priorities and documentation practices may differ, contributing to inconsistencies in data capture [<xref ref-type="bibr" rid="ref25">25</xref>]. Increasing reliance on data-intensive health care can reduce direct communication between medical staff, heightening the risk of incomplete or selective documentation [<xref ref-type="bibr" rid="ref24">24</xref>].</p><p>While standardizing EHR data through structured formats and predefined categories may reduce inconsistencies, it also risks removing essential clinical context [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. Excluding physicians&#x2019; notes and clinical judgments, for example, may lead to the loss of valuable information such as symptom descriptions, diagnostic reasoning, and the context preceding symptoms. This increases the likelihood of misinterpretation by researchers and AI models, potentially resulting in biased or unsafe decisions [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. Yet, as Knevel and Liao [<xref ref-type="bibr" rid="ref30">30</xref>] note, some gaps in EHR data can hold clinical meaning. For instance, the absence of specific test results may suggest that a particular diagnosis was deemed unlikely. Interpreting such nuances requires strong domain knowledge and clinician involvement to ensure meaningful analysis.</p><p>Variability in EHR systems across institutions and countries continues to challenge the development of generalizable AI models [<xref ref-type="bibr" rid="ref30">30</xref>]. The absence of centralized or standardized data-sharing mechanisms, such as those based on findability, accessibility, interoperability, and reusability principles, exacerbates this problem by hindering interoperability and seamless data integration across settings [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. To address structural and institutional fragmentation, federated learning has been proposed as a promising approach aimed at preserving patient privacy while enabling AI models to be trained across institutions without sharing raw data. Such methods seek to facilitate collaboration across decentralized datasets while maintaining institutional autonomy. Related privacy-preserving techniques include differential privacy and homomorphic encryption, which are designed to enhance data protection but remain largely experimental and face challenges of scalability, performance, and validation in clinical settings [<xref ref-type="bibr" rid="ref35">35</xref>].</p></sec><sec id="s3-6-3"><title>Challenges in AI Model Reliability and Potential Harm</title><p>Several studies raise concerns about harm from unreliable or biased AI systems, particularly when embedded in clinical decision-making. Deliberate and unintentional choices in AI development can introduce algorithmic bias, reinforcing EHR data limitations and distorting model outputs [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. Examples include data integrity issues from reusing and merging databases, such as data duplication [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref35">35</xref>], model design choices reflecting provider bias [<xref ref-type="bibr" rid="ref32">32</xref>], and preselection of training data based on physician or developer preferences [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p><p>Technical flaws throughout model development can also result in harmful clinical outcomes, even when using high-quality data [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. For example, models may overfit datasets that do not reflect relevant clinical questions, undermining generalizability and reliability [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Optimistic predictions can lead to futile or even harmful interventions, while overly pessimistic ones may cause unnecessary withholding of care [<xref ref-type="bibr" rid="ref32">32</xref>]. In such cases, the ethical imperative to minimize harm may justify simpler models that generalize more safely, even if less accurate. Some studies argue that the appropriate level of model explainability should be calibrated according to the clinical risks and the extent to which domain experts can interpret the model&#x2019;s output [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. Several studies also highlight the lack of standardized frameworks or tools to assess and evaluate AI&#x2019;s real-world clinical impact, particularly regarding patient safety, value alignment, and health equity, thereby limiting consistent assessment of ethical implications and potential harm [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref45">45</xref>].</p></sec><sec id="s3-6-4"><title>Barriers to Inclusive AI Development</title><p>Several studies identify deeper systemic and infrastructural barriers that undermine inclusive and equitable AI. Adapting AI to local environments requires significant investment in data integration, testing, infrastructure, and clinical expertise [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. However, many health organizations lack the secure, scalable infrastructure needed for effective AI deployment, a gap often overlooked in policies that emphasize AI&#x2019;s benefits while downplaying resource demands [<xref ref-type="bibr" rid="ref22">22</xref>]. These barriers also raise questions of equity in data governance, including whether and how data contributors should be compensated or share in AI&#x2019;s benefits [<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>Another persistent challenge is the unclear definition of target populations and the inconsistent representation of demographic groups in AI development [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. One study found, for example, inconsistent demographic reporting across 164 EHR-based AI studies, undermining model representativeness and reproducibility [<xref ref-type="bibr" rid="ref45">45</xref>]. Limited data sharing, closed-source models, and poor interoperability hinder validation across diverse clinical settings, while low digital adoption and data vendor monopolies further restrict datasets to less representative populations [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref45">45</xref>].</p><p>Paulus and Kent [<xref ref-type="bibr" rid="ref42">42</xref>] argue that fairness in algorithmic decision-making is context-dependent, requiring value judgments and stakeholder consensus. No universal definition exists, and ethical concerns over protected attributes such as race persist. Others similarly highlight concerns about the values and priorities embedded in AI models, which may implicitly reflect biases or unexamined assumptions [<xref ref-type="bibr" rid="ref44">44</xref>]. This risk intensifies when health data is used for generating profit, for example, potentially placing financial incentives above equitable patient care and responsible data stewardship [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. In response, many studies advocate for sustained stakeholder engagement throughout AI development efforts, including participatory co-design and mixed methods approaches that integrate qualitative and quantitative insights rather than relying on externally imposed solutions [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p></sec><sec id="s3-6-5"><title>Stakeholder Workshops: Fair Representation and Model Generalizability</title><p>Stakeholder discussions highlighted the complex realities of clinical practice in the context of CKD and DAKI, which are inherently mirrored in the data. For instance, when treating sepsis with antibiotics such as vancomycin, known to potentially cause acute kidney injury (AKI), clinicians may consciously accept the risk of AKI as a necessary trade-off to, in some cases, save the patient&#x2019;s life. Additionally, when a side effect is common and widely acknowledged, physicians may not document or provide less detailed documentation, focusing instead on more pressing clinical problems. Moreover, these considerations can also vary across medical specialties, with clinical practices such as prescribing practices differing, for example, between internal medicine and cardiology.</p><p>In short, understanding a potential causal relationship between a drug and AKI provides only part of the story. During the workshops, it was emphasized that gaining a clear understanding of how variability from different clinical settings is represented in the EHR data is crucial, as is considering how such complexity could be effectively incorporated into an AI-driven decision-support model for retrospective DAKI diagnosis. At the same time, consistent with Knevel and Liao [<xref ref-type="bibr" rid="ref30">30</xref>], it was noted that research using EHR data can offer valuable insights into clinical routines and decision-making patterns, such as differences in prescribing practices across specialties. However, uncovering these insights is highly resource-intensive and time-consuming, as it requires not only technical expertise but also deep contextual knowledge and sustained engagement with clinical practice.</p></sec></sec><sec id="s3-7"><title>Responsible AI Integration in Clinical Practice</title><sec id="s3-7-1"><title>Administrative Workload</title><p>While efforts to structure and standardize EHR data are central to enabling AI-driven research, several studies emphasize the unintended increase in administrative burden for health care providers [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>-<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. Processes such as careful data assessment and logging, essential for ensuring data quality, reducing fragmentation, and addressing issues like concept drift in longitudinal datasets, are time-consuming and resource-intensive [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. These demands raise ethical concerns related to clinician well-being, sustainable work practices, and their impact on patient care [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Furthermore, several studies note that professionals&#x2019; earlier experiences with EHR implementations often failed to deliver the anticipated quality improvements despite significant increases in workload associated with data entry and system management, contributing to skepticism toward data-driven AI solutions [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Finally, many health care settings still lack fully digitized, structured records, complicating the integration of AI into clinical workflows [<xref ref-type="bibr" rid="ref41">41</xref>]. Together, these challenges highlight a tension between the technical demands of fit-for-purpose data in AI-based research and the practical realities of clinical work, underscoring the need for implementation strategies that balance data quality with provider sustainability.</p></sec><sec id="s3-7-2"><title>Clinical Tropism</title><p>A major barrier to AI adoption in health care is the limited transparency and interpretability of complex, &#x201C;black-box,&#x201D; models [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. When AI-driven decisions lack clear explanations, clinicians and patients struggle to trust them, undermining ethical requirements for transparency, informed decision-making, and accountability in clinical care. Without insight into how these models function, users cannot detect or correct biases, increasing the risk of erroneous or harmful outcomes [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Distinguishing causal from correlative relationships is essential in medical decision-making to avoid ineffective or harmful interventions [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. These risks are further amplified by the fact that harmful AI-related errors can occur at scale, exceeding the impact of individual provider errors [<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>As discussed earlier, many studies highlight challenges of representativeness and model generalizability, emphasizing how incomplete or biased datasets can perpetuate existing inequalities. Beyond these population-level concerns, several studies draw attention to how AI systems mirror the clinical contexts in which they are developed. Alami et al [<xref ref-type="bibr" rid="ref22">22</xref>] describe this as clinical tropism, referring to the tendency of AI systems to reproduce the specific practices, routines, and priorities of the settings that generated their training data. They argue that models trained on localized data may reflect institutional protocols, workflows, or device infrastructures that are not easily transferable to other contexts. Over time, such systems risk becoming repositories of established clinical practices, developing what Alami et al [<xref ref-type="bibr" rid="ref22">22</xref>] term a &#x201C;clinical mind,&#x201D; rather than generating new insights. This dynamic may lead professionals to place disproportionate trust in AI outputs that merely echo existing patterns of care. Furthermore, because clinical practice continually evolves through new treatments and protocol updates, EHRs are inherently temporal, making input data prone to diverge from the contexts represented in their underlying sources [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s3-7-3"><title>Overreliance and the Risk of Deskilling</title><p>Several studies highlight the risk of deskilling among medical professionals as AI adoption increases, potentially weakening clinical judgment, autonomy, and the ability to make independent decisions [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Overreliance on AI may also reduce adaptability across clinical contexts and limit clinicians&#x2019; capacity to account for patient preferences and values, raising concerns about the long-term impact on clinical competency and patient care [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>Empirical evidence suggests that caregivers (including nurses, midwives, clinicians, and pharmacists) are generally less likely to question algorithm-driven diagnostic results, increasing the error risk and further eroding clinical judgment over time [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Conversely, when AI-generated results deviate considerably from a clinician&#x2019;s assessment, there is a risk that the model&#x2019;s recommendations will be dismissed outright, potentially limiting AI&#x2019;s practical value in clinical settings [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. These dynamics underscore the need for careful integration of AI into clinical workflows, with sustained attention to its effects on professional roles, decision-making, and oversight.</p></sec><sec id="s3-7-4"><title>Stakeholder Workshops: Responsible AI Integration in Clinical Practice</title><p>In the complex care setting of CKD and DAKI, precise documentation of medication use and adverse drug events is essential for generating reliable data to support AI-driven research. However, DAKI is difficult to diagnose because of its multifactorial causes, subtle onset, and patient-specific variability, often involving combinations of dehydration, sepsis, and drug toxicity. During stakeholder workshops, participants stressed that documenting this complexity is time-consuming. One general practitioner noted that recording side effects alone can take up to 30 minutes, a significant burden given current time pressures in Dutch health care. Although general practitioners are legally required to provide digital access to medical records upon request, they are not obliged to use an EHR system. One kidney patient shared that her general practitioner still prefers pen and paper, relying on digital systems only when necessary, illustrating how variability in documentation practices continues to limit the availability of structured data for AI.</p><p>Participants saw potential for the model to have a learning effect, improving clinicians&#x2019; understanding of drug side effects over time. This could enhance care quality and, if communicated clearly, support shared decision-making. However, they stressed that model outputs must always be evaluated alongside clinical expertise to ensure safe and appropriate care. To improve clinical relevance, they suggested linking outputs to relevant guidelines and scientific literature to help physicians interpret findings in context and assess their significance for individual patients. At the same time, they cautioned that the time required to interpret outputs might increase or shift clinical workload rather than reduce it.</p><p>Stakeholders raised concerns consistent with the concept of &#x201C;clinical tropism&#x201D; of Alami et al [<xref ref-type="bibr" rid="ref22">22</xref>], which describes how AI models may reproduce the narrow or context-specific knowledge embedded in the data on which they are trained. They noted that this tendency could be reinforced by the steering effect of clinical guidelines and institutional protocols that shape both data collection and model performance. Participants further noted that publication bias in the studies informing model development can compound these effects. When only positive or statistically significant results are published, the evidence base fails to reflect the full range of clinical outcomes, leading models to learn a distorted view of clinical reality, thereby reinforcing rather than mitigating clinical tropism. Together, publication bias and the standardizing influence of clinical guidelines narrow the evidentiary foundation on which AI systems are built, privileging typical or well-studied cases while obscuring clinical variability and local practice. Participants also cautioned that integrating diverse data sources may produce overly general outputs with limited clinical relevance, lacking actionable value for clinicians and therefore likely to be disregarded.</p><p>Finally, participants stressed that assessing the causes of AKI requires attention to factors beyond medications and careful interpretation of predicted probabilities within individual patient contexts. For instance, a 61% probability that an antibiotic caused AKI might be clinically insignificant in one case but highly relevant in another, depending on the patient&#x2019;s circumstances. Participants emphasized that the model should be developed and implemented primarily as a decision-support tool to help clinicians assess whether a drug likely contributed to AKI.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study used a mixed methods approach to explore the ethical considerations of using EHR data for AI-driven health research and innovation, with the LEAPfROG project serving as a guiding case. We combined a scoping literature review, a systematic search, and 2 stakeholder engagement workshops, all informed by the GEA [<xref ref-type="bibr" rid="ref17">17</xref>].</p><p>Previous studies have examined the reuse of EHRs and the ethical implications of AI development separately. Research on data reuse has addressed governance, consent, and public trust, showing how challenges of transparency, fairness, and oversight are amplified by the data&#x2019;s variable quality, uncertain provenance, and fragmented governance [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. Studies on medical AI have focused on fairness, transparency, and accountability, mapping the ethical and epistemic risks that arise within algorithmic systems [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Our study brings these strands together by examining how recurring ethical challenges emerge and take shape at the intersection of EHR data and AI development, while also situating these issues within a real-world clinical and institutional setting (the LEAPfROG project).</p><p>By integrating stakeholder perspectives with existing ethical analyses, this study contributes an empirically grounded account of how recurring ethical challenges emerge in practice when AI systems are developed using EHR data. In this context, issues of consent, privacy, and governance become more complex as EHR data is repurposed and leveraged using AI, while the opacity and scale of AI systems introduce new risks of decontextualization and fragmented accountability across policy and oversight domains.</p><p>Building on this analysis, we advocate for an approach to data governance and AI development that is centered on and led by stakeholders. We stress the importance of involving all relevant parties not only in evaluating benefits and risks, but also in shaping the goals and direction of AI innovation. Accordingly, we structure the following discussion as follows: first, we present a set of practical recommendations derived from the literature and stakeholder workshops, intended to guide future reflection, engagement, and implementation. We then explore the concept of data work and examine the ethical risks associated with removing data from its original context and reusing it for AI development. Finally, we review current practices in stakeholder engagement and highlight the need for more inclusive and participatory forms of governance in future AI initiatives.</p></sec><sec id="s4-2"><title>Practical Recommendations</title><p>In addition to the central themes, both the scoping review and stakeholder workshops yielded practical recommendations for addressing ethical concerns related to AI and the use of EHRs (<xref ref-type="table" rid="table2">Table 2</xref>). Although these action points were not the primary focus of this study, they offer opportunities and potential entry points for addressing the broader challenges we aimed to explore. These insights reflect the perspectives and priorities identified in the sources consulted and do not necessarily represent the position of the author group. As the findings draw on international literature, some recommendations, such as patient representation in ethics committees, may already be established in contexts like the Netherlands. Nevertheless, they offer a useful supplement for future reflection, engagement, and implementation.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Key recommendations from literature and stakeholder workshops.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Key themes and recommendations</td><td align="left" valign="bottom">Main actors</td><td align="left" valign="bottom">Source (key articles)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Data privacy, transparency, and consent</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Strengthen AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> and data literacy among patients, clinicians, and institutional review board (IRB) members, emphasizing how EHR<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> quality and AI training may affect privacy, consent, and data integrity.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Board of Directors of health care organizations</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]+ workshops</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Implement dynamic consent mechanisms that reflect the iterative reuse of EHR data in AI development and ensure institutional governance structures can accommodate these updates.</td><td align="left" valign="top">Board of Directors of health care organizations</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Integrate consent management tools into EHR interfaces so that patients can review and modify how their data is used in research and model training, with clinical staff facilitating informed communication.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Board of Directors of health care organizations and EHR system vendors</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]+ workshops</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Require transparent documentation of AI provenance, including funding sources, proprietary model components, and training data, to prevent hidden conflicts of interest and foster public trust.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup> project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Public trust and regulatory challenges</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Adopt standardized, FAIR-aligned<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup> data practices and establish cross-institutional agreements that clarify ownership, accountability, and data flow between hospitals and general practices.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">National research infrastructure organizations like Health-RI in the Netherlands</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]+ workshops</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Establish joint oversight frameworks that include clinicians, data stewards, developers, and regulators to coordinate decision-making on data use, ownership, and third-party involvement, reducing fragmented accountability.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">National research infrastructure organizations like Health-RI in the Netherlands</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]+ workshops</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Embed patient representatives (ideally trained in AI ethics and data governance) in ethics committees and regulatory boards to strengthen inclusiveness and transparency in oversight processes.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">National research infrastructure organizations like Health-RI in the Netherlands and Board of Directors of health care organizations</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>].</td></tr><tr><td align="left" valign="top" colspan="3">Fair representation and model generalizability</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Validate AI models in real clinical environments (for example, CKD<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup> and AKI<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup> contexts) and across diverse patient subgroups to ensure representativeness and clinical relevance.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Conduct fairness audits to explicitly assess whether model outputs are biased by demographic differences or by specific clinical routines and documentation practices that vary across institutions.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Adopt mixed methods evaluation combining quantitative metrics with qualitative insights to understand how AI affects workflows, professional judgment, and patient experience.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]+ workshop 2</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Implement continuous postdeployment monitoring: reassess clinical impact on patient outcomes over time to ensure alignment with evolving clinical practice and technologies.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Board of Directors of health care organizations</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]+ workshop 2</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Appraise both clinical and economic outcomes through frameworks that evaluate how AI contributes to care quality, efficiency, and sustainability of health care delivery.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Responsible AI integration in clinical practice</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Align AI development with clinical workflows, prioritizing usability and practical relevance over purely technical performance metrics.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]+ workshops</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Ensure interpretability supports clinical judgment by designing models that reflect clinical complexity, communicate uncertainty, and complement human expertise.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]+ workshop 2</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Develop learning-oriented AI systems that enhance, rather than erode, professional expertise, and support reflective clinical practice.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project</td><td align="left" valign="top">Workshop 2</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Design EHR/AI tools collaboratively with stakeholders throughout the data and model lifecycle to clarify purposes, risks, and benefits, aligning these with shared values and patient expectations.<named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content></td><td align="left" valign="top">Researchers involved in AI projects like the LEAPfROG project and Board of Directors of health care organizations</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]+ workshop 1</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>EHR: electronic health record.</p></fn><fn id="table2fn3"><p><sup>c</sup>LEAPfROG: Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods.</p></fn><fn id="table2fn4"><p><sup>d</sup>FAIR: findability, accessibility, interoperability, and reusability.</p></fn><fn id="table2fn5"><p><sup>e</sup>CKD: chronic kidney disease.</p></fn><fn id="table2fn6"><p><sup>f</sup>AKI: acute kidney injury.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s4-3"><title>Making Data Work</title><p>While much of the public and academic debate on AI in health care focuses on model explainability or algorithmic opacity, these discussions often overlook a more foundational issue: the nature of the data on which AI depends. Earlier studies on health data reuse have shown [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>] that questions of trust, consent, and governance already hinge on how data is produced, managed, and interpreted. Our findings suggest that without a deeper understanding of how health data is actually generated, structured, and used in practice, efforts to build fair, accurate, and trustworthy AI systems will remain incomplete. Data is rarely a neutral input, as it is deeply shaped by clinical routines, institutional diversity, and social context. Documentation practices, system design, and socio-ethical expectations determine what gets recorded and how it is organized. Recognizing this &#x201C;data work&#x201D; is not merely a technical concern but an ethical one, since it determines whose experiences are recorded and how decisions are justified.</p><p>Our findings reveal the complexity and effort involved in making EHR data fit for AI-driven research. Both the reviewed literature and stakeholder insights point to inconsistencies in EHR documentation, variability in clinical workflows, and the labor-intensive nature of data preparation and standardization. Such challenges underscore that data is produced through ongoing, situated work rather than passively collected. This aligns with the literature review on data-related activities by Bertelsen et al [<xref ref-type="bibr" rid="ref47">47</xref>], or &#x201C;data work,&#x201D; which emphasizes its multifaceted and situated nature. They group data work into 3 interrelated categories: data collection (eg, capturing, discovering, requesting, and self-tracking), data production (eg, coding, entry, and digitization), and data use and sharing (eg, providing, exchanging, and disseminating).</p><p>As data move through different stages and are reused for AI training, they often lose the clinical and institutional contexts that once gave them meaning. This process, referred to here as decontextualization, involves detaching data from the social and professional environments in which they were produced, allowing them to be reinterpreted, or even misinterpreted. Hoeyer [<xref ref-type="bibr" rid="ref48">48</xref>] describes this as a separation between the &#x201C;data work of production&#x201D; and the &#x201C;data work of analysis,&#x201D; where separation from the original context renders data fragile and open to misinterpretation. In AI development, this dynamic has ethical as well as epistemic consequences: models may treat context-dependent judgments or incomplete entries as objective facts, obscuring the human and institutional processes that shaped them. This dynamic is also reflected in the concept of &#x201C;broken data,&#x201D; which highlights the ongoing, often hidden repair and improvisation required to keep data fit for purpose [<xref ref-type="bibr" rid="ref49">49</xref>], as well as in studies showing how context loss invites interpretive uncertainty, with trust, visual design, and perceived credibility shaping how data is understood [<xref ref-type="bibr" rid="ref50">50</xref>].</p><p>When AI systems trained on such data are applied in clinical settings, the same loss of context can reappear, shaping how models interact with the environments that produced their data and often reinforcing the very patterns they were meant to improve. Stakeholders noted that AI systems may mirror existing prescribing and documentation practices, reproducing the same routines they aim to change. This recursive dynamic reflects what emerged from the literature as clinical tropism, where models embed themselves in the patterns most strongly represented in their training data and in the settings where those patterns persist. When that influence begins to shape the outcomes it predicts, it can result in a self-fulfilling prophecy [<xref ref-type="bibr" rid="ref51">51</xref>]. In such cases, models appear accurate precisely because their use helps make their predictions true. Clinical tropism creates the conditions, while the self-fulfilling prophecy describes the result. Together, they show how decontextualization can evolve from an issue within data to one that reshapes clinical practice, as models begin to reproduce the very patterns they were designed to improve. These concerns are mirrored in debates on explainable AI, where current approaches have been criticized for creating a false sense of understanding and trust, offering surface descriptions of model behavior without clarifying whether decisions are reasonable or justified [<xref ref-type="bibr" rid="ref52">52</xref>]. Rather than mitigating bias, such transparency can obscure how context loss shapes both model development and evaluation.</p><p>Extending the discussion of decontextualization, Hoeyer [<xref ref-type="bibr" rid="ref48">48</xref>] draws attention to a deeper issue often overlooked in discussions of data work: the ways in which data are interpreted and mobilized in pursuit of narratives that are often implicit and conflicting. He argues that datafication fragments patient information into pieces that can be recombined to serve diverse aims, such as clinical care, research, governance, or performance monitoring. In this process, the patient may be obscured, and data detached from the context of its production. Clinicians may document with specific knowledge and intent, while analysts or policymakers may act on institutional logics shaped by different priorities. These tensions are not resolved through technical solutions alone but often reappear, in new forms, within the very tools developed to address them. For instance, even privacy-preserving approaches such as federated learning, which are designed to mitigate data sharing risks, can inadvertently deepen decontextualization by abstracting data use from the institutional and clinical settings in which they originate [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. By enabling model training across institutions without sharing raw data, these methods aim to enhance privacy but may nevertheless reproduce institutional differences and context loss, introducing additional ethical concerns that extend beyond the scope of this paper but are highlighted elsewhere [<xref ref-type="bibr" rid="ref53">53</xref>].</p><p>As is evident in the LEAPfROG project, AI initiatives involving EHR data span a diverse network of stakeholders, including health care providers, data custodians, EHR vendors, patients, and institutions, whose interests may not align. These misalignments are not incidental but intrinsic to how data is leveraged, representing structural and arguably unavoidable realities that must be acknowledged. Developing responsible AI, therefore, requires collaborations that explicitly recognize these dynamics and openly address how stakeholders themselves navigate and negotiate data use in practice, while articulating the competing aims, values, and uses of data within their own processes [<xref ref-type="bibr" rid="ref47">47</xref>]. Doing so requires more than technical expertise: it depends on collaboration across domains to understand how clinical, organizational, and social contexts shape data and influence AI training and deployment. Together, these insights underscore that making data work is as much a social and ethical practice as a technical one.</p></sec><sec id="s4-4"><title>Beyond Compliance: Meaningful Stakeholder Involvement</title><p>In current debates on health data governance, the public good is often invoked as a guiding principle, with values such as solidarity promoted as ethical foundations [<xref ref-type="bibr" rid="ref54">54</xref>]. Prainsack et al [<xref ref-type="bibr" rid="ref55">55</xref>], for instance, frame data solidarity as a means to promote justice, prevent harm, and ensure collective benefit. However, critics argue that such values, while well-intentioned, are often applied in top-down, unreflective ways that limit their ethical force [<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>] and risk overlooking deeper power asymmetries, such as the lack of patient representation in governance and decision-making structures. Rather than rejecting these principles, they have called for alternative models grounded in mutual aid and collective action, fostering more equitable relationships between data providers, users, and affected communities.</p><p>Taken together, these critiques reveal a broader structural issue: legal and policy alignment, while necessary, is not sufficient for the ethical use of health data or responsible AI development. As our results show, many studies point to the need for participatory, transparent governance that goes beyond compliance or rhetorical appeals to ethics. Concerns about fragmented oversight, unclear consent procedures, and opaque decision-making illustrate how legitimacy is undermined when stakeholders are not meaningfully involved. Without sustained, inclusive engagement, especially with those most directly affected, governance risks remain superficial and biased toward institutional priorities.</p><p>At a practical level, these structural shortcomings are reinforced by persistent power asymmetries in engagement processes, which risk prioritizing influential groups such as government officials, large nongovernmental organizations, or scientists [<xref ref-type="bibr" rid="ref58">58</xref>]. These &#x201C;elite&#x201D; actors, with greater access to time, resources, and expertise, tend to set the terms of participation, sidelining less-resourced groups and limiting patient involvement. This is especially problematic in the context of CKD, where patients face complex treatments, uncertainty, and symptoms that can limit participation in research [<xref ref-type="bibr" rid="ref59">59</xref>]. As a result, they are often underrepresented in studies that fail to reflect patient or caregiver priorities. Critical participation frameworks emphasize that such unequal conditions make engagement prone to tokenism, particularly when structural inequities like corporate influence or health care disparities remain unaddressed [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>].</p><p>These dynamics become even more pronounced in the context of AI development. Here, technological optimism often frames innovation as inherently beneficial to patient care [<xref ref-type="bibr" rid="ref61">61</xref>], which can obscure deeper ethical and social tensions. By emphasizing efficiency, accuracy, and progress, this framing tends to narrow debate to technical or policy issues, while sidelining questions of power and lived experience. Consequently, participation in AI development tends to be tightly controlled, serving institutional efficiency or public acceptance rather than fostering meaningful, value-driven dialogue. In this way, engagement risks becoming instrumental, legitimizing AI initiatives rather than engaging with lived experience or long-term ethical and practical implications.</p><p>In light of these challenges, we were particularly mindful of how stakeholder engagement was approached in the LEAPfROG workshops. Recognizing the potential influence of technological optimism, we sought to create space for more critical, grounded conversations about the role of AI in health care. By grounding ethical reflections in concrete use cases and fostering open-ended, collaborative discussions, we aimed to avoid tokenism and reduce the influence of more resourced stakeholders. Our mixed methods approach, which included a literature review alongside stakeholder workshops, helped broaden the scope to address systemic issues such as health care inequities and the resource-intensive nature of AI development.</p><p>As an alternative to top-down regulation and rhetorical appeals to ethics, more participatory and inclusive governance models may offer a promising path forward. One such approach involves shifting toward stakeholder-led governance within consortia, supported by coproduced guidelines or codes of conduct. A key suggestion from our workshops was to develop a shared vision around the reuse of EHR data, including its purpose, risks, and benefits, through collaborative processes involving patients, clinicians, researchers, data managers, and data protection officers. Such efforts could also include agreements on what data should be recorded in EHRs and how. Rather than treating participation as a means to secure legitimacy for predetermined goals, these practices aim to embed values like trustworthiness, solidarity, and privacy into the infrastructure of data governance itself. By continuing our collaboration with the Dutch Kidney Association (NVN), we hope to support a model of engagement that centers patient perspectives, addresses power imbalances, and ultimately aligns data-driven innovation with the needs of those most affected.</p></sec><sec id="s4-5"><title>Limitations</title><p>While time constraints, limited participant availability, and last-minute cancellations are common challenges in stakeholder workshops, a more critical limitation was ensuring balanced representation among stakeholders. Although the workshops were conducted in Dutch to promote inclusivity across diverse education levels and professional backgrounds, this choice inadvertently excluded non-Dutch-speaking participants, including some within the LEAPfROG consortium. Similarly, the scoping review included only English-language publications, potentially introducing language bias and omitting relevant studies in other languages. However, only 4 non-English publications were identified during screening, suggesting that such literature remains limited in this field.</p><p>In addition, the predominance of commentaries and reviews (14 out of 25) over empirical studies may limit the depth of evidence available to substantiate claims about real-world implementation, stakeholder experiences, or the practical implications. However, this imbalance also reflects the early and largely conceptual stage of ethical reflection at the intersection of AI and EHR data. Furthermore, many of the reviewed publications were authored by researchers without specialized legal or regulatory expertise, such as legal scholars deeply familiar with the GDPR (General Data Protection Regulation). This may have shaped how certain challenges were framed and further limited discussion of broader ethical issues, such as whether EHR data should be used for AI-driven research at all.</p><p>As mentioned above, our efforts to include diverse stakeholders may also have unintentionally reinforced power or knowledge imbalances, particularly due to the underrepresentation of patients. However, as described in the methods, patient participants were briefed in advance to support meaningful engagement. To enhance transparency and relevance, all workshop participants were also invited to review and comment on the workshop reports prior to publication. In addition, while policy perspectives were well represented in the workshops, participants with formal legal or regulatory expertise were underrepresented. This limitation may have influenced how governance and compliance challenges were discussed.</p><p>Furthermore, thematic convergence between the literature and the stakeholder workshop discussions may reflect the dominance of Global North perspectives, particularly those shaped by Dutch socioeconomic and institutional norms. While this alignment across methods strengthens the internal coherence of our findings, it also highlights a potential blind spot. Both the literature reviewed and the workshops conducted were situated within contexts that may not capture the unique challenges and priorities of the Global South. As others have noted, failing to account for these limitations risks overlooking perspectives that remain largely absent from this paper [<xref ref-type="bibr" rid="ref62">62</xref>].</p><p>Beyond the institutional contexts examined here, large-scale health data initiatives (for example, the All of Us Research Program [<xref ref-type="bibr" rid="ref63">63</xref>] and MIMIC-IV [<xref ref-type="bibr" rid="ref64">64</xref>]) have likewise revealed persistent ethical challenges related to decontextualization, governance, and accountability. Similar questions are now emerging within Europe through the development of the European Health Data Space [<xref ref-type="bibr" rid="ref54">54</xref>], which aims to harmonize secondary data use across member states. These examples demonstrate that the issues identified in this study re-emerge even in large-scale data initiatives with formal governance frameworks, suggesting that future research should further examine how context-specific governance practices can inform responsible data sharing at scale.</p><p>Empirical evaluation and the development of metrics to assess how the proposed ethical measures influence factors such as AI model fairness, data quality, or stakeholder trust fall outside the scope of this study. The goal of this work was to derive ethical measures through a novel approach that combines a scoping review with workshops based on the GEA. Future studies in other settings are needed to further develop and test evaluation frameworks that can operationalize these ethical measures in practice.</p><p>Our study deliberately focused on the Dutch and European legal and policy context to support LEAPfROG&#x2019;s goals. The transferability of our stakeholder workshop methods to other countries or institutional settings was beyond its scope and could be explored in future empirical research. Future work could also broaden these discussions by addressing themes such as data sovereignty, the ethical dimensions of data labor in the Global South, and the potential for data colonialism [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref65">65</xref>].</p></sec><sec id="s4-6"><title>Conclusions</title><p>The use of EHR data in AI development is situated at the intersection of health care, data science, ethics, and policy, where competing priorities, values, and practices converge. Given this complex intersection, building responsible AI-based tools requires more than technical solutions and compliance. Our findings point to the need to begin with a critical understanding of the data, for which domain knowledge is essential. EHR data are not neutral but shaped by clinical routines, institutional constraints, and documentation practices. When removed from context, it is vulnerable to misinterpretation, which can cause AI systems to reproduce inequities and reinforce clinical patterns instead of supporting informed decision-making. Addressing these risks is essential to ensure accuracy and accountability. Our mixed methods approach, which included multistakeholder guidance ethics workshops, underscored the need for inclusive, value-sensitive development grounded in the realities of health care practice.</p></sec></sec></body><back><ack><p>Special thanks to Dani&#x00EB;l Tijink and Pieter van Kuilenburg for preparing and moderating the first guidance ethics workshop, and to Giovanni Cin&#x00E0; for his support in preparing the second workshop. We would also like to thank the participants of the workshops for their valuable contributions regarding the next steps for the LEAPfROG consortium, as well as the development of this paper.</p><p>Finally, the LEAPfROG consortium is a unique cross-sectional collaboration between patients, health care professionals, epidemiologists, medical informatics, and computer science researchers, as well as partners from the medical technology industry, regulatory bodies, and health care insurance. Principal investigator: Dr Joanna E Klopotowska. Work package (WP) leaders: Dr Ronald Cornet (WP1), Prof Dr Annette ten Teije (WP2), Dr Joanna E Klopotowska (WP3), and Dr Stephanie Medlock (WP4). Beneficiaries of the LEAPfROG consortium: Amsterdam University Medical Center, leadership and coordination; Vrije Universiteit Amsterdam, and Open University Heerlen. Ongoing LEAPfROG PhDs: Daniel Fern&#x00E1;ndez-Llaneza, Romy M P Vos, Joris E Lieverse, and Menno Maris (Amsterdam University Medical Center), The LEAPfROG Consortium members include: Ameen Abu-Hanna (Amsterdam University Medical Center), Birgit A Damoiseaux (Amsterdam University Medical Center), Cornelis Boersma (Open Universiteit), Dave A Dongelmans (Nationale Intensive Care Evaluatie foundation), David H de Koning (Amsterdam University Medical Center), Frank van Harmelen (Vrije Universiteit Amsterdam), Gerty Holla (Amsterdam Economic Board), Heiralde Marck (Koninklijke Nederlandse Maatschappij ter bevordering der Pharmacie [KNMP] Geneesmiddelen Informatie Centrum), Iacopo Vagliano (Amsterdam University Medical Center), Jan Pander (AstraZeneca), Jurjen van der Schans (Open Universiteit), Giovanni Cin&#x00E0; (Amsterdam University Medical Center), Kitty J Jager (Amsterdam University Medical Center, European Renal Association-European Dialysis and Transplantation Association [ERA-EDTA] Registry), Leonora van Drop - Grandia (Z-Index), Linda Dusseljee-Peute (Amsterdam University Medical Center), Luuk B Hilbrands (Radboud University Medical Centre), Marcel SG Kwa (College ter Beoordeling van Geneesmiddelen), Marieke A R Bak (Amsterdam University Medical Center), Mariette van den Hoven (Amsterdam University Medical Centre), Martijn G Kersloot (Castor), Nicolette F de Keizer (Amsterdam University Medical Center), Otto R Maarsingh (Amsterdam University Medical Center), Paul Blank (NWO), Piet Heingraaf (Pitts.AI), Ren&#x00E9;e de Wildt (Nier Vereniging Nederland), Ron Herings (PHARMO Institute for Drug Outcomes Research &#x0026; Amsterdam University Medical Center), Ron J Keizer (InsightRX), Ruben Boyd (IXA), Sebastiaan L Knijnenburg (Castor), Sipke Visser (Digital Health Link), Stijn Gremmen (Dutch Kidney Foundation/Nierstichting), Teun van Gelder (Leiden University Medical Center), Tjerk S Heijmens Visser (CZ Health Insurance, Zorgverzekeraars Nederland), and Vianda S Stel (Amsterdam University Medical Center, ERA-EDTA Registry). Generative artificial intelligence (ChatGPT, OpenAI) was used solely to provide limited assistance with grammar and phrasing. The authors confirm that all writing, analysis, and interpretation were carried out by the authors without the use of AI tools.</p></ack><notes><sec><title>Funding</title><p>The study was funded by the Nederlandse Organisatie voor Wetenschappelijk Onderzoek (NWO; Dutch Research Council) (KICH1.ST01.20.011). It was co-funded in cash by the Dutch Kidney Foundation and the National Intensive Care Evaluation (NICE) foundation, and in kind by the PHARMO Institute for Drug Outcomes Research, Castor, InsightRX, Z-Index, and Digital Health Link.</p></sec></notes><fn-group><fn fn-type="con"><p>MTM prepared the original draft of the manuscript and contributed to conceptualization, formal analysis, investigation, methodology, verification, resources, and visualization. MB, JEK, and RC contributed to conceptualization, investigation, methodology, supervision, verification (MB and JEK), resources (MB and JEK), and review and editing of the manuscript. JEK, RC, and MvdH acquired funding, and MvdH also contributed to the investigation, resources, and review and editing. DF-L contributed to methodology and review and editing. JEL contributed to verification and review and editing. All authors and LEAPfROG consortium members gave final approval of the submitted version.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AKI</term><def><p>acute kidney injury</p></def></def-item><def-item><term id="abb3">CKD</term><def><p>chronic kidney disease</p></def></def-item><def-item><term id="abb4">DAKI</term><def><p>drug-induced acute kidney injury</p></def></def-item><def-item><term id="abb5">EHR</term><def><p>electronic health record</p></def></def-item><def-item><term id="abb6">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb7">GEA</term><def><p>Guidance Ethics Approach</p></def></def-item><def-item><term id="abb8">LEAPfROG</term><def><p>Leveraging Real-World Data to Optimize Pharmacotherapy Outcomes in Multimorbid Patients Using Machine Learning and Knowledge Representation Methods</p></def></def-item><def-item><term id="abb9">MeSH</term><def><p>Medical Subject Headings</p></def></def-item><def-item><term id="abb10">NVN</term><def><p>Nierpati&#x00EB;nten Vereniging Nederland (Kidney Patients Association Netherlands)</p></def></def-item><def-item><term id="abb11">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analysis</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hammack-Aviran</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Brelsford</surname><given-names>KM</given-names> </name><name name-style="western"><surname>McKenna</surname><given-names>KC</given-names> </name><name name-style="western"><surname>Graham</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Lampron</surname><given-names>ZM</given-names> </name><name name-style="western"><surname>Beskow</surname><given-names>LM</given-names> </name></person-group><article-title>Research use of electronic health records: patients&#x2019; views on alternative approaches to permission</article-title><source>AJOB Empir Bioeth</source><year>2020</year><volume>11</volume><issue>3</issue><fpage>172</fpage><lpage>186</lpage><pub-id pub-id-type="doi">10.1080/23294515.2020.1755383</pub-id><pub-id pub-id-type="medline">32338567</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>J&#x00F8;rgensen</surname><given-names>JT</given-names> </name></person-group><article-title>Twenty years with personalized medicine: past, present, and future of individualized pharmacotherapy</article-title><source>Oncologist</source><year>2019</year><month>07</month><volume>24</volume><issue>7</issue><fpage>e432</fpage><lpage>e440</lpage><pub-id pub-id-type="doi">10.1634/theoncologist.2019-0054</pub-id><pub-id pub-id-type="medline">30940745</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fraccaro</surname><given-names>P</given-names> </name><name name-style="western"><surname>Arguello Casteleiro</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ainsworth</surname><given-names>J</given-names> </name><name name-style="western"><surname>Buchan</surname><given-names>I</given-names> </name></person-group><article-title>Adoption of clinical decision support in multimorbidity: a systematic review</article-title><source>JMIR Med Inform</source><year>2015</year><month>01</month><day>7</day><volume>3</volume><issue>1</issue><fpage>e4</fpage><pub-id pub-id-type="doi">10.2196/medinform.3503</pub-id><pub-id pub-id-type="medline">25785897</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Masnoon</surname><given-names>N</given-names> </name><name name-style="western"><surname>Shakib</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kalisch-Ellett</surname><given-names>L</given-names> </name><name name-style="western"><surname>Caughey</surname><given-names>GE</given-names> </name></person-group><article-title>What is polypharmacy? a systematic review of definitions</article-title><source>BMC Geriatr</source><year>2017</year><month>10</month><day>10</day><volume>17</volume><issue>1</issue><fpage>230</fpage><pub-id pub-id-type="doi">10.1186/s12877-017-0621-2</pub-id><pub-id pub-id-type="medline">29017448</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chowdhury</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Chandra Das</surname><given-names>D</given-names> </name><name name-style="western"><surname>Sunna</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Beyene</surname><given-names>J</given-names> </name><name name-style="western"><surname>Hossain</surname><given-names>A</given-names> </name></person-group><article-title>Global and regional prevalence of multimorbidity in the adult population in community settings: a systematic review and meta-analysis</article-title><source>EClinicalMedicine</source><year>2023</year><month>03</month><volume>57</volume><fpage>101860</fpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2023.101860</pub-id><pub-id pub-id-type="medline">36864977</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barnett</surname><given-names>K</given-names> </name><name name-style="western"><surname>Mercer</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Norbury</surname><given-names>M</given-names> </name><name name-style="western"><surname>Watt</surname><given-names>G</given-names> </name><name name-style="western"><surname>Wyke</surname><given-names>S</given-names> </name><name name-style="western"><surname>Guthrie</surname><given-names>B</given-names> </name></person-group><article-title>Epidemiology of multimorbidity and implications for health care, research, and medical education: a cross-sectional study</article-title><source>The Lancet</source><year>2012</year><month>07</month><volume>380</volume><issue>9836</issue><fpage>37</fpage><lpage>43</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(12)60240-2</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilkinson</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Dumontier</surname><given-names>M</given-names> </name><name name-style="western"><surname>Aalbersberg</surname><given-names>IJJ</given-names> </name><etal/></person-group><article-title>The FAIR guiding principles for scientific data management and stewardship</article-title><source>Sci Data</source><year>2016</year><month>03</month><day>15</day><volume>3</volume><issue>1</issue><fpage>160018</fpage><pub-id pub-id-type="doi">10.1038/sdata.2016.18</pub-id><pub-id pub-id-type="medline">26978244</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hersh</surname><given-names>WR</given-names> </name><name name-style="western"><surname>Weiner</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Embi</surname><given-names>PJ</given-names> </name><etal/></person-group><article-title>Caveats for the use of operational electronic health record data in comparative effectiveness research</article-title><source>Med Care</source><year>2013</year><month>08</month><volume>51</volume><issue>8 Suppl 3</issue><fpage>S30</fpage><lpage>7</lpage><pub-id pub-id-type="doi">10.1097/MLR.0b013e31829b1dbd</pub-id><pub-id pub-id-type="medline">23774517</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goldstein</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Navar</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Pencina</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Ioannidis</surname><given-names>JPA</given-names> </name></person-group><article-title>Opportunities and challenges in developing risk prediction models with electronic health records data: a systematic review</article-title><source>J Am Med Inform Assoc</source><year>2017</year><month>01</month><day>1</day><volume>24</volume><issue>1</issue><fpage>198</fpage><lpage>208</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocw042</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Magrabi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ammenwerth</surname><given-names>E</given-names> </name><name name-style="western"><surname>McNair</surname><given-names>JB</given-names> </name><etal/></person-group><article-title>Artificial intelligence in clinical decision support: challenges for evaluating AI and practical implications</article-title><source>Yearb Med Inform</source><year>2019</year><month>08</month><volume>28</volume><issue>1</issue><fpage>128</fpage><lpage>134</lpage><pub-id pub-id-type="doi">10.1055/s-0039-1677903</pub-id><pub-id pub-id-type="medline">31022752</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kristiansen</surname><given-names>TB</given-names> </name><name name-style="western"><surname>Kristensen</surname><given-names>K</given-names> </name><name name-style="western"><surname>Uffelmann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Brandslund</surname><given-names>I</given-names> </name></person-group><article-title>Erroneous data: the achilles&#x2019; heel of AI and personalized medicine</article-title><source>Front Digit Health</source><year>2022</year><volume>4</volume><fpage>862095</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2022.862095</pub-id><pub-id pub-id-type="medline">35937419</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aitken</surname><given-names>M</given-names> </name><name name-style="western"><surname>de St Jorre</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pagliari</surname><given-names>C</given-names> </name><name name-style="western"><surname>Jepson</surname><given-names>R</given-names> </name><name name-style="western"><surname>Cunningham-Burley</surname><given-names>S</given-names> </name></person-group><article-title>Public responses to the sharing and linkage of health data for research purposes: a systematic review and thematic synthesis of qualitative studies</article-title><source>BMC Med Ethics</source><year>2016</year><month>11</month><day>10</day><volume>17</volume><issue>1</issue><fpage>73</fpage><pub-id pub-id-type="doi">10.1186/s12910-016-0153-x</pub-id><pub-id pub-id-type="medline">27832780</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Skovgaard</surname><given-names>LL</given-names> </name><name name-style="western"><surname>Wadmann</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hoeyer</surname><given-names>K</given-names> </name></person-group><article-title>A review of attitudes towards the reuse of health data among people in the European Union: the primacy of purpose and the common good</article-title><source>Health Policy</source><year>2019</year><month>06</month><volume>123</volume><issue>6</issue><fpage>564</fpage><lpage>571</lpage><pub-id pub-id-type="doi">10.1016/j.healthpol.2019.03.012</pub-id><pub-id pub-id-type="medline">30961905</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Piasecki</surname><given-names>J</given-names> </name><name name-style="western"><surname>Walkiewicz-&#x017B;arek</surname><given-names>E</given-names> </name><name name-style="western"><surname>Figas-Skrzypulec</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kordecka</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dranseika</surname><given-names>V</given-names> </name></person-group><article-title>Ethical issues in biomedical research using electronic health records: a systematic review</article-title><source>Med Health Care Philos</source><year>2021</year><month>12</month><volume>24</volume><issue>4</issue><fpage>633</fpage><lpage>658</lpage><pub-id pub-id-type="doi">10.1007/s11019-021-10031-6</pub-id><pub-id pub-id-type="medline">34146228</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Machado</surname><given-names>CCV</given-names> </name><name name-style="western"><surname>Burr</surname><given-names>C</given-names> </name><etal/></person-group><article-title>The ethics of AI in health care: a mapping review</article-title><source>Soc Sci Med</source><year>2020</year><month>09</month><volume>260</volume><fpage>113172</fpage><pub-id pub-id-type="doi">10.1016/j.socscimed.2020.113172</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fantus</surname><given-names>S</given-names> </name></person-group><article-title>Medical artificial intelligence ethics: a systematic review of empirical studies</article-title><source>Digit Health</source><year>2023</year><volume>9</volume><fpage>20552076231186064</fpage><pub-id pub-id-type="doi">10.1177/20552076231186064</pub-id><pub-id pub-id-type="medline">37434728</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Verbeek</surname><given-names>PP</given-names> </name><name name-style="western"><surname>Tijink</surname><given-names>D</given-names> </name></person-group><article-title>Guidance ethics approach: an ethical dialogue about technology with perspective on actions</article-title><source>University of Twente</source><year>2020</year><access-date>2026-02-04</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ris.utwente.nl/ws/portalfiles/portal/247401391/060_002_Boek_Guidance_ethics_approach_Digital_EN.pdf">https://ris.utwente.nl/ws/portalfiles/portal/247401391/060_002_Boek_Guidance_ethics_approach_Digital_EN.pdf</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tricco</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Lillie</surname><given-names>E</given-names> </name><name name-style="western"><surname>Zarin</surname><given-names>W</given-names> </name><etal/></person-group><article-title>PRISMA extension for scoping reviews (PRISMA-ScR): checklist and explanation</article-title><source>Ann Intern Med</source><year>2018</year><month>10</month><day>2</day><volume>169</volume><issue>7</issue><fpage>467</fpage><lpage>473</lpage><pub-id pub-id-type="doi">10.7326/M18-0850</pub-id><pub-id pub-id-type="medline">30178033</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ouzzani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hammady</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fedorowicz</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Elmagarmid</surname><given-names>A</given-names> </name></person-group><article-title>Rayyan-a web and mobile app for systematic reviews</article-title><source>Syst Rev</source><year>2016</year><month>12</month><day>5</day><volume>5</volume><issue>1</issue><fpage>210</fpage><pub-id pub-id-type="doi">10.1186/s13643-016-0384-4</pub-id><pub-id pub-id-type="medline">27919275</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benchimol</surname><given-names>EI</given-names> </name><name name-style="western"><surname>Smeeth</surname><given-names>L</given-names> </name><name name-style="western"><surname>Guttmann</surname><given-names>A</given-names> </name><etal/></person-group><article-title>The reporting of studies conducted using observational routinely-collected health data (RECORD) statement</article-title><source>PLoS Med</source><year>2015</year><month>10</month><volume>12</volume><issue>10</issue><fpage>e1001885</fpage><pub-id pub-id-type="doi">10.1371/journal.pmed.1001885</pub-id><pub-id pub-id-type="medline">26440803</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Krom</surname><given-names>A</given-names> </name><name name-style="western"><surname>de Boer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Geurtzen</surname><given-names>R</given-names> </name><name name-style="western"><surname>de Vries</surname><given-names>MC</given-names> </name></person-group><article-title>Capabilities and stakeholders - two ways of enriching the ethical debate on artificial womb technology</article-title><source>Am J Bioeth</source><year>2023</year><month>05</month><volume>23</volume><issue>5</issue><fpage>110</fpage><lpage>113</lpage><pub-id pub-id-type="doi">10.1080/15265161.2023.2191028</pub-id><pub-id pub-id-type="medline">37130420</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alami</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name><name name-style="western"><surname>Auclair</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Artificial intelligence and health technology assessment: anticipating a new level of complexity</article-title><source>J Med Internet Res</source><year>2020</year><month>07</month><day>7</day><volume>22</volume><issue>7</issue><fpage>e17707</fpage><pub-id pub-id-type="doi">10.2196/17707</pub-id><pub-id pub-id-type="medline">32406850</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baric-Parker</surname><given-names>J</given-names> </name><name name-style="western"><surname>Anderson</surname><given-names>EE</given-names> </name></person-group><article-title>Patient data-sharing for AI: ethical challenges, catholic solutions</article-title><source>Linacre Q</source><year>2020</year><month>11</month><volume>87</volume><issue>4</issue><fpage>471</fpage><lpage>481</lpage><pub-id pub-id-type="doi">10.1177/0024363920922690</pub-id><pub-id pub-id-type="medline">33100395</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bednorz</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mak</surname><given-names>JKL</given-names> </name><name name-style="western"><surname>Jylh&#x00E4;v&#x00E4;</surname><given-names>J</given-names> </name><name name-style="western"><surname>Religa</surname><given-names>D</given-names> </name></person-group><article-title>Use of electronic medical records (EMR) in gerontology: benefits, considerations and a promising future</article-title><source>Clin Interv Aging</source><year>2023</year><volume>18</volume><fpage>2171</fpage><lpage>2183</lpage><pub-id pub-id-type="doi">10.2147/CIA.S400887</pub-id><pub-id pub-id-type="medline">38152074</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>IG</given-names> </name><name name-style="western"><surname>Amarasingham</surname><given-names>R</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>A</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lo</surname><given-names>B</given-names> </name></person-group><article-title>The legal and ethical concerns that arise from using complex predictive analytics in health care</article-title><source>Health Aff (Millwood)</source><year>2014</year><month>07</month><volume>33</volume><issue>7</issue><fpage>1139</fpage><lpage>1147</lpage><pub-id pub-id-type="doi">10.1377/hlthaff.2014.0048</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fischer</surname><given-names>T</given-names> </name><name name-style="western"><surname>Brothers</surname><given-names>KB</given-names> </name><name name-style="western"><surname>Erdmann</surname><given-names>P</given-names> </name><name name-style="western"><surname>Langanke</surname><given-names>M</given-names> </name></person-group><article-title>Clinical decision-making and secondary findings in systems medicine</article-title><source>BMC Med Ethics</source><year>2016</year><month>05</month><day>21</day><volume>17</volume><issue>1</issue><fpage>32</fpage><pub-id pub-id-type="doi">10.1186/s12910-016-0113-5</pub-id><pub-id pub-id-type="medline">27209083</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ford</surname><given-names>E</given-names> </name><name name-style="western"><surname>Oswald</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hassan</surname><given-names>L</given-names> </name><name name-style="western"><surname>Bozentko</surname><given-names>K</given-names> </name><name name-style="western"><surname>Nenadic</surname><given-names>G</given-names> </name><name name-style="western"><surname>Cassell</surname><given-names>J</given-names> </name></person-group><article-title>Should free-text data in electronic medical records be shared for research? a citizens&#x2019; jury study in the UK</article-title><source>J Med Ethics</source><year>2020</year><month>06</month><volume>46</volume><issue>6</issue><fpage>367</fpage><lpage>377</lpage><pub-id pub-id-type="doi">10.1136/medethics-2019-105472</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>M&#x00FC;ller</surname><given-names>S</given-names> </name></person-group><article-title>Is there a civic duty to support medical AI development by sharing electronic health records?</article-title><source>BMC Med Ethics</source><year>2022</year><month>12</month><day>10</day><volume>23</volume><issue>1</issue><fpage>134</fpage><pub-id pub-id-type="doi">10.1186/s12910-022-00871-z</pub-id><pub-id pub-id-type="medline">36496427</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kamradt</surname><given-names>M</given-names> </name><name name-style="western"><surname>Po&#x00DF;-Doering</surname><given-names>R</given-names> </name><name name-style="western"><surname>Szecsenyi</surname><given-names>J</given-names> </name></person-group><article-title>Exploring physician perspectives on using real-world care data for the development of artificial intelligence-based technologies in health care: qualitative study</article-title><source>JMIR Form Res</source><year>2022</year><month>05</month><day>18</day><volume>6</volume><issue>5</issue><fpage>e35367</fpage><pub-id pub-id-type="doi">10.2196/35367</pub-id><pub-id pub-id-type="medline">35583921</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Knevel</surname><given-names>R</given-names> </name><name name-style="western"><surname>Liao</surname><given-names>KP</given-names> </name></person-group><article-title>From real-world electronic health record data to real-world results using artificial intelligence</article-title><source>Ann Rheum Dis</source><year>2023</year><month>03</month><volume>82</volume><issue>3</issue><fpage>306</fpage><lpage>311</lpage><pub-id pub-id-type="doi">10.1136/ard-2022-222626</pub-id><pub-id pub-id-type="medline">36150748</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liaw</surname><given-names>ST</given-names> </name><name name-style="western"><surname>Liyanage</surname><given-names>H</given-names> </name><name name-style="western"><surname>Kuziemsky</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Ethical use of electronic health record data and artificial intelligence: recommendations of the primary care informatics working group of the international medical informatics association</article-title><source>Yearb Med Inform</source><year>2020</year><month>08</month><volume>29</volume><issue>1</issue><fpage>51</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1055/s-0040-1701980</pub-id><pub-id pub-id-type="medline">32303098</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Anom</surname><given-names>BY</given-names> </name></person-group><article-title>Ethics of big data and artificial intelligence in medicine</article-title><source>Ethics Med Public Health</source><year>2020</year><month>10</month><volume>15</volume><fpage>100568</fpage><pub-id pub-id-type="doi">10.1016/j.jemep.2020.100568</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vollmer</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mateen</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Bohner</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Machine learning and artificial intelligence research for patient benefit: 20 critical questions on transparency, replicability, ethics, and effectiveness</article-title><source>BMJ</source><year>2020</year><month>03</month><day>20</day><volume>368</volume><fpage>l6927</fpage><pub-id pub-id-type="doi">10.1136/bmj.l6927</pub-id><pub-id pub-id-type="medline">32198138</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Panagiotakos</surname><given-names>D</given-names> </name></person-group><article-title>Real-world data: a brief review of the methods, applications, challenges and opportunities</article-title><source>BMC Med Res Methodol</source><year>2022</year><month>11</month><day>5</day><volume>22</volume><issue>1</issue><fpage>287</fpage><pub-id pub-id-type="doi">10.1186/s12874-022-01768-6</pub-id><pub-id pub-id-type="medline">36335315</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khalid</surname><given-names>N</given-names> </name><name name-style="western"><surname>Qayyum</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bilal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Al-Fuqaha</surname><given-names>A</given-names> </name><name name-style="western"><surname>Qadir</surname><given-names>J</given-names> </name></person-group><article-title>Privacy-preserving artificial intelligence in healthcare: techniques and applications</article-title><source>Comput Biol Med</source><year>2023</year><month>05</month><volume>158</volume><fpage>106848</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.106848</pub-id><pub-id pub-id-type="medline">37044052</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lovis</surname><given-names>C</given-names> </name></person-group><article-title>Unlocking the power of artificial intelligence and big data in medicine</article-title><source>J Med Internet Res</source><year>2019</year><month>11</month><day>8</day><volume>21</volume><issue>11</issue><fpage>e16607</fpage><pub-id pub-id-type="doi">10.2196/16607</pub-id><pub-id pub-id-type="medline">31702565</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chekroud</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Bondar</surname><given-names>J</given-names> </name><name name-style="western"><surname>Delgadillo</surname><given-names>J</given-names> </name><etal/></person-group><article-title>The promise of machine learning in predicting treatment outcomes in psychiatry</article-title><source>World Psychiatry</source><year>2021</year><month>06</month><volume>20</volume><issue>2</issue><fpage>154</fpage><lpage>170</lpage><pub-id pub-id-type="doi">10.1002/wps.20882</pub-id><pub-id pub-id-type="medline">34002503</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Ho</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Caals</surname><given-names>K</given-names></name></person-group><source>A Call for an Ethics and Governance Action Plan to Harness the Power of Artificial Intelligence and Digitalization in Nephrology Seminars in Nephrology</source><year>2021</year><publisher-name>Elsevier</publisher-name><pub-id pub-id-type="doi">10.1016/j.semnephrol.2021.05.009</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Atkinson</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Atkinson</surname><given-names>EG</given-names> </name></person-group><article-title>Machine learning and health care: potential benefits and issues</article-title><source>J Ambul Care Manage</source><year>2023</year><volume>46</volume><issue>2</issue><fpage>114</fpage><lpage>120</lpage><pub-id pub-id-type="doi">10.1097/JAC.0000000000000453</pub-id><pub-id pub-id-type="medline">36649491</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Breen</surname><given-names>N</given-names> </name><name name-style="western"><surname>Berrigan</surname><given-names>D</given-names> </name><name name-style="western"><surname>Jackson</surname><given-names>JS</given-names> </name><etal/></person-group><article-title>Translational health disparities research in a data-rich world</article-title><source>Health Equity</source><year>2019</year><volume>3</volume><issue>1</issue><fpage>588</fpage><lpage>600</lpage><pub-id pub-id-type="doi">10.1089/heq.2019.0042</pub-id><pub-id pub-id-type="medline">31720554</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Darcel</surname><given-names>K</given-names> </name><name name-style="western"><surname>Upshaw</surname><given-names>T</given-names> </name><name name-style="western"><surname>Craig-Neil</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Implementing artificial intelligence in Canadian primary care: barriers and strategies identified through a national deliberative dialogue</article-title><source>PLoS ONE</source><year>2023</year><volume>18</volume><issue>2</issue><fpage>e0281733</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0281733</pub-id><pub-id pub-id-type="medline">36848339</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Paulus</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Kent</surname><given-names>DM</given-names> </name></person-group><article-title>Predictably unequal: understanding and addressing concerns that algorithmic clinical prediction may increase health disparities</article-title><source>NPJ Digit Med</source><year>2020</year><volume>3</volume><issue>1</issue><fpage>99</fpage><pub-id pub-id-type="doi">10.1038/s41746-020-0304-9</pub-id><pub-id pub-id-type="medline">32821854</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gianfrancesco</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Tamang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yazdany</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schmajuk</surname><given-names>G</given-names> </name></person-group><article-title>Potential biases in machine learning algorithms using electronic health record data</article-title><source>JAMA Intern Med</source><year>2018</year><month>11</month><day>1</day><volume>178</volume><issue>11</issue><fpage>1544</fpage><lpage>1547</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2018.3763</pub-id><pub-id pub-id-type="medline">30128552</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Landau</surname><given-names>AY</given-names> </name><name name-style="western"><surname>Ferrarello</surname><given-names>S</given-names> </name><name name-style="western"><surname>Blanchard</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Developing machine learning-based models to help identify child abuse and neglect: key ethical challenges and recommended solutions</article-title><source>J Am Med Inform Assoc</source><year>2022</year><month>01</month><day>29</day><volume>29</volume><issue>3</issue><fpage>576</fpage><lpage>580</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocab286</pub-id><pub-id pub-id-type="medline">35024859</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bozkurt</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cahan</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Seneviratne</surname><given-names>MG</given-names> </name><etal/></person-group><article-title>Reporting of demographic data and representativeness in machine learning models using electronic health records</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>12</month><day>9</day><volume>27</volume><issue>12</issue><fpage>1878</fpage><lpage>1884</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocaa164</pub-id><pub-id pub-id-type="medline">32935131</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rubinger</surname><given-names>L</given-names> </name><name name-style="western"><surname>Gazendam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ekhtiari</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bhandari</surname><given-names>M</given-names> </name></person-group><article-title>Machine learning and artificial intelligence in research and healthcare</article-title><source>Injury</source><year>2023</year><month>05</month><volume>54 Suppl 3</volume><fpage>S69</fpage><lpage>S73</lpage><pub-id pub-id-type="doi">10.1016/j.injury.2022.01.046</pub-id><pub-id pub-id-type="medline">35135685</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bertelsen</surname><given-names>PS</given-names> </name><name name-style="western"><surname>Bossen</surname><given-names>C</given-names> </name><name name-style="western"><surname>Knudsen</surname><given-names>C</given-names> </name><name name-style="western"><surname>Pedersen</surname><given-names>AM</given-names> </name></person-group><article-title>Data work and practices in healthcare: a scoping review</article-title><source>Int J Med Inform</source><year>2024</year><month>04</month><volume>184</volume><fpage>105348</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2024.105348</pub-id><pub-id pub-id-type="medline">38309238</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Hoeyer</surname><given-names>K</given-names> </name></person-group><source>Data Paradoxes: The Politics of Intensified Data Sourcing in Contemporary Healthcare</source><year>2023</year><publisher-name>MIT Press</publisher-name><pub-id pub-id-type="other">9780262374156</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pink</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ruckenstein</surname><given-names>M</given-names> </name><name name-style="western"><surname>Willim</surname><given-names>R</given-names> </name><name name-style="western"><surname>Duque</surname><given-names>M</given-names> </name></person-group><article-title>Broken data: conceptualising data in an emerging world</article-title><source>Big Data Soc</source><year>2018</year><month>01</month><volume>5</volume><issue>1</issue><fpage>2053951717753228</fpage><pub-id pub-id-type="doi">10.1177/2053951717753228</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kennedy</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hill</surname><given-names>RL</given-names> </name></person-group><article-title>The feeling of numbers: emotions in everyday engagements with data and their visualisation</article-title><source>Sociology</source><year>2018</year><month>08</month><volume>52</volume><issue>4</issue><fpage>830</fpage><lpage>848</lpage><pub-id pub-id-type="doi">10.1177/0038038516674675</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Amsterdam</surname><given-names>WAC</given-names> </name><name name-style="western"><surname>van Geloven</surname><given-names>N</given-names> </name><name name-style="western"><surname>Krijthe</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Ranganath</surname><given-names>R</given-names> </name><name name-style="western"><surname>Cin&#x00E0;</surname><given-names>G</given-names> </name></person-group><article-title>When accurate prediction models yield harmful self-fulfilling prophecies</article-title><source>Patterns (N Y)</source><year>2025</year><month>04</month><day>11</day><volume>6</volume><issue>4</issue><fpage>101229</fpage><pub-id pub-id-type="doi">10.1016/j.patter.2025.101229</pub-id><pub-id pub-id-type="medline">40264961</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghassemi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Oakden-Rayner</surname><given-names>L</given-names> </name><name name-style="western"><surname>Beam</surname><given-names>AL</given-names> </name></person-group><article-title>The false hope of current approaches to explainable artificial intelligence in health care</article-title><source>Lancet Digit Health</source><year>2021</year><month>11</month><volume>3</volume><issue>11</issue><fpage>e745</fpage><lpage>e750</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(21)00208-9</pub-id><pub-id pub-id-type="medline">34711379</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bak</surname><given-names>M</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><name name-style="western"><surname>Celi</surname><given-names>LA</given-names> </name><etal/></person-group><article-title>Federated learning is not a cure-all for data ethics</article-title><source>Nat Mach Intell</source><year>2024</year><volume>6</volume><issue>4</issue><fpage>370</fpage><lpage>372</lpage><pub-id pub-id-type="doi">10.1038/s42256-024-00813-x</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Staunton</surname><given-names>C</given-names> </name><name name-style="western"><surname>Shabani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mascalzoni</surname><given-names>D</given-names> </name><name name-style="western"><surname>Me&#x017E;inska</surname><given-names>S</given-names> </name><name name-style="western"><surname>Slokenberga</surname><given-names>S</given-names> </name></person-group><article-title>Ethical and social reflections on the proposed European Health Data Space</article-title><source>Eur J Hum Genet</source><year>2024</year><month>05</month><volume>32</volume><issue>5</issue><fpage>498</fpage><lpage>505</lpage><pub-id pub-id-type="doi">10.1038/s41431-024-01543-9</pub-id><pub-id pub-id-type="medline">38355959</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Prainsack</surname><given-names>B</given-names> </name><name name-style="western"><surname>El-Sayed</surname><given-names>S</given-names> </name><name name-style="western"><surname>Forg&#x00F3;</surname><given-names>N</given-names> </name><name name-style="western"><surname>Szoszkiewicz</surname><given-names>&#x0141;</given-names> </name><name name-style="western"><surname>Baumer</surname><given-names>P</given-names> </name></person-group><article-title>Data solidarity: a blueprint for governing health futures</article-title><source>Lancet Digit Health</source><year>2022</year><month>11</month><volume>4</volume><issue>11</issue><fpage>e773</fpage><lpage>e774</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(22)00189-3</pub-id><pub-id pub-id-type="medline">36307191</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bak</surname><given-names>MAR</given-names> </name><name name-style="western"><surname>Ploem</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>HL</given-names> </name><name name-style="western"><surname>Blom</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Willems</surname><given-names>DL</given-names> </name></person-group><article-title>Towards trust-based governance of health data research</article-title><source>Med Health Care Philos</source><year>2023</year><month>06</month><volume>26</volume><issue>2</issue><fpage>185</fpage><lpage>200</lpage><pub-id pub-id-type="doi">10.1007/s11019-022-10134-8</pub-id><pub-id pub-id-type="medline">36633724</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Proost</surname><given-names>MD</given-names> </name></person-group><article-title>Data solidarity disrupted: musings on the overlooked role of mutual aid in data-driven medicine</article-title><source>Kennedy Inst Ethics J</source><year>2023</year><volume>33</volume><issue>4</issue><fpage>401</fpage><lpage>419</lpage><pub-id pub-id-type="doi">10.1353/ken.2023.a931052</pub-id><pub-id pub-id-type="medline">38973484</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Turnhout</surname><given-names>E</given-names> </name><name name-style="western"><surname>Metze</surname><given-names>T</given-names> </name><name name-style="western"><surname>Wyborn</surname><given-names>C</given-names> </name><name name-style="western"><surname>Klenk</surname><given-names>N</given-names> </name><name name-style="western"><surname>Louder</surname><given-names>E</given-names> </name></person-group><article-title>The politics of co-production: participation, power, and transformation</article-title><source>Curr Opin Environ Sustain</source><year>2020</year><month>02</month><volume>42</volume><fpage>15</fpage><lpage>21</lpage><pub-id pub-id-type="doi">10.1016/j.cosust.2019.11.009</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cazzolli</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sluiter</surname><given-names>A</given-names> </name><name name-style="western"><surname>Guha</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Partnering with patients and caregivers to enrich research and care in kidney disease: values and strategies</article-title><source>Clin Kidney J</source><year>2023</year><month>09</month><volume>16</volume><issue>Suppl 1</issue><fpage>i57</fpage><lpage>i68</lpage><pub-id pub-id-type="doi">10.1093/ckj/sfad063</pub-id><pub-id pub-id-type="medline">37711636</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dedding</surname><given-names>C</given-names> </name><name name-style="western"><surname>Aussems</surname><given-names>K</given-names> </name></person-group><article-title>Participatie, het verschil tussen een methode en een kritisch paradigma [participation, the difference between a method and a critical paradigm]</article-title><source>TSG Tijdschr Gezondheidswet [TSG J Health Sci]</source><year>2024</year><volume>102</volume><issue>3</issue><fpage>81</fpage><lpage>87</lpage><pub-id pub-id-type="doi">10.1007/s12508-024-00439-9</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lysen</surname><given-names>F</given-names> </name><name name-style="western"><surname>Wyatt</surname><given-names>S</given-names> </name></person-group><article-title>Refusing participation: hesitations about designing responsible patient engagement with artificial intelligence in healthcare</article-title><source>J Responsible Innov</source><year>2024</year><month>12</month><day>31</day><volume>11</volume><issue>1</issue><fpage>2300161</fpage><pub-id pub-id-type="doi">10.1080/23299460.2023.2300161</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Roche</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lewis</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wall</surname><given-names>P</given-names> </name></person-group><article-title>Artificial intelligence ethics: an inclusive global discourse</article-title><source>arXiv</source><comment>Preprint posted online on  Aug 23, 2021</comment><pub-id pub-id-type="doi">10.48550/arXiv.2108.09959</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ramirez</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Sulieman</surname><given-names>L</given-names> </name><name name-style="western"><surname>Schlueter</surname><given-names>DJ</given-names> </name><etal/></person-group><article-title>The All of Us research program: data quality, utility, and diversity</article-title><source>Patterns (N Y)</source><year>2022</year><month>08</month><day>12</day><volume>3</volume><issue>8</issue><fpage>100570</fpage><pub-id pub-id-type="doi">10.1016/j.patter.2022.100570</pub-id><pub-id pub-id-type="medline">36033590</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Johnson</surname><given-names>AEW</given-names> </name><name name-style="western"><surname>Bulgarelli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>L</given-names> </name><etal/></person-group><article-title>MIMIC-IV, a freely accessible electronic health record dataset</article-title><source>Sci Data</source><year>2023</year><month>01</month><day>3</day><volume>10</volume><issue>1</issue><fpage>1</fpage><pub-id pub-id-type="doi">10.1038/s41597-022-01899-x</pub-id><pub-id pub-id-type="medline">36596836</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arora</surname><given-names>A</given-names> </name><name name-style="western"><surname>Barrett</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>E</given-names> </name><name name-style="western"><surname>Oborn</surname><given-names>E</given-names> </name><name name-style="western"><surname>Prince</surname><given-names>K</given-names> </name></person-group><article-title>Risk and the future of AI: algorithmic bias, data colonialism, and marginalization</article-title><source>Inf Organ</source><year>2023</year><month>09</month><volume>33</volume><issue>3</issue><fpage>100478</fpage><pub-id pub-id-type="doi">10.1016/j.infoandorg.2023.100478</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search query details for systematic search.</p><media xlink:href="jmir_v28i1e79863_app1.pdf" xlink:title="PDF File, 172 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Report of stakeholder workshop 1.</p><media xlink:href="jmir_v28i1e79863_app2.pdf" xlink:title="PDF File, 660 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Report of stakeholder workshop 2.</p><media xlink:href="jmir_v28i1e79863_app3.pdf" xlink:title="PDF File, 456 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Characteristics of included studies.</p><media xlink:href="jmir_v28i1e79863_app4.pdf" xlink:title="PDF File, 313 KB"/></supplementary-material><supplementary-material id="app5"><label>Checklist 1</label><p>PRISMA-ScR checklist.</p><media xlink:href="jmir_v28i1e79863_app5.docx" xlink:title="DOCX File, 40 KB"/></supplementary-material></app-group></back></article>