<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e92680</article-id><article-id pub-id-type="doi">10.2196/92680</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>From Pilot Trap to Institutional Capacity: A Governance Framework for Sustainable Clinical AI Implementation in Health Systems</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Tian</surname><given-names>Jin</given-names></name><degrees>MME, MHS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhao</surname><given-names>Zengren</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Tang</surname><given-names>Longmei</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Song</surname><given-names>Yongzhao</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Yuchang</given-names></name><degrees>MPH</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Jiang</surname><given-names>Nan</given-names></name><degrees>DPH</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Hospital Management Innovation Center, The First Hospital of Hebei Medical University</institution><addr-line>89 Donggang Street</addr-line><addr-line>Shijiazhuang</addr-line><addr-line>Hebei</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Social Medicine and Health Services Management, Hebei Medical University</institution><addr-line>Shijiazhuang</addr-line><addr-line>Hebei</addr-line><country>China</country></aff><aff id="aff3"><institution>Hebei Province Key Laboratory of Environmental and Human Health</institution><addr-line>Shijiazhuang</addr-line><addr-line>Hebei</addr-line><country>China</country></aff><aff id="aff4"><institution>Department of Public Health, The First Hospital of Hebei Medical University</institution><addr-line>Shijiazhuang</addr-line><addr-line>Hebei</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Fletscher</surname><given-names>Luis</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Palama</surname><given-names>Valentina</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Jin Tian, MME, MHS, Hospital Management Innovation Center, The First Hospital of Hebei Medical University, 89 Donggang Street, Shijiazhuang, Hebei, 050000, China, +86 311 87156084; <email>jtia716@aucklanduni.ac.nz</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>7</day><month>5</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e92680</elocation-id><history><date date-type="received"><day>02</day><month>02</month><year>2026</year></date><date date-type="rev-recd"><day>19</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>06</day><month>04</month><year>2026</year></date></history><copyright-statement>&#x00A9; Jin Tian, Zengren Zhao, Longmei Tang, Yongzhao Song, Yuchang Li, Nan Jiang. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 7.5.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e92680"/><abstract><p>Clinical artificial intelligence (AI) applications frequently fail to transition from short-term pilot projects into sustained components of routine clinical care, a phenomenon referred to in this viewpoint as the pilot trap. This persistent gap reflects not only technical or regulatory limitations but also insufficient governance capacity within health care organizations. We argue that such capacity is not fully established before deployment; rather, it develops through implementation as real-world operational tensions clarify organizational ownership, accountability boundaries, and coordination mechanisms. Drawing on an 18-month implementation of a provincial clinical AI platform in China, we develop a 6-module governance framework encompassing institutional carrier formation, infrastructure governance, regulatory and ethical governance, interdisciplinary coordination, translational scaling, and lifecycle evaluation and oversight. These modules represent functional governance conditions observed during implementation rather than a prescriptive institutional architecture to be installed prior to deployment. We further introduce the concept of functional transferability and position the framework as an upstream complement to existing international governance standards, which typically specify what governance should achieve, but often assume that the organizational capacity to implement it already exists. Advancing clinical AI beyond demonstration, therefore, depends less on model performance alone than on the ability of health systems to develop and sustain the institutional capacity required for routine clinical use.</p></abstract><kwd-group><kwd>clinical artificial intelligence</kwd><kwd>AI governance</kwd><kwd>health systems</kwd><kwd>institutional infrastructure</kwd><kwd>implementation science</kwd><kwd>governance framework</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Substantial investment in clinical artificial intelligence (AI) has generated a rapidly expanding portfolio of tools proposed for health care delivery, including applications for diagnostic support, clinical decision-making, and operational coordination [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Despite these advances, most clinical AI systems remain confined to short-lived pilots or proof-of-concept demonstrations rather than becoming durable components of routine care across health systems [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. This persistent gap between technological progress and sustained clinical integration suggests that the central constraint lies not only in model performance but also in the institutional arrangements required to authorize, operate, and sustain AI as accountable clinical capacity. Understanding these institutional conditions therefore becomes central to explaining why technically promising AI systems fail to persist in routine clinical practice.</p><p>This pattern is frequently described as a &#x201C;pilot trap,&#x201D; in which technically promising systems fail to transition into stable operational use [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Under such conditions, ownership structures remain fragmented, accountability boundaries are unclear, and deployment depends on temporary resources or individual champions rather than durable institutional arrangements. What appears as a challenge of technological adoption therefore reflects a deeper problem of institutional capacity formation, not merely the absence of governance structures, but the absence of the implementation conditions through which such structures are generated and stabilized within routine clinical practice.</p><p>China provides an analytically informative setting for examining these dynamics. Clinical AI has been incorporated into national strategies for health care modernization and embedded within evolving regulatory frameworks governing software as a medical device [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Within this policy environment, provincial platforms and tertiary hospitals, where administrative legitimacy, clinical scale, and technical capacity converge, represent a structurally significant configuration for AI integration. These conditions make governance formation observable as a determinant of whether AI transitions from episodic deployment to institutional infrastructure. Yet, despite this policy alignment, the conditions enabling governance formation, including stable organizational ownership, standardized infrastructure, and sustained operational accountability, remain inconsistently realized.</p><p>Existing scholarship on clinical AI has largely concentrated on model validation, predictive performance, and application-level effectiveness [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. Governance has increasingly been recognized as relevant to implementation outcomes, yet available frameworks remain limited in operational specificity [<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. High-level governance principles often lack clearly defined organizational carriers through which governance becomes actionable [<xref ref-type="bibr" rid="ref17">17</xref>], while implementation-oriented approaches tend to emphasize evaluation and oversight without addressing upstream conditions of ownership, financing, and sustained operational responsibility [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Critical questions therefore remain underspecified, including who governs clinical AI, under what institutional mandate, and how accountability is distributed across the system lifecycle. Without addressing these questions, the institutional conditions required for sustained clinical AI integration remain poorly understood.</p><p>In this viewpoint, we draw on an 18-month real-world implementation of a provincial clinical AI platform in China to develop a governance framework for the institutionalization of clinical AI. By analyzing contemporaneous implementation materials, including governance documents, regulatory filings, and operational records, we identify 6 interdependent governance modules that repeatedly emerged as necessary conditions for sustained deployment. These modules describe the organizational and governance capacities required for clinical AI to transition from episodic pilot projects to routinized clinical infrastructure. We further introduce the concept of functional transferability to distinguish governance functions that remain stable across contexts from the institutional forms through which they may be realized. The framework is intended both to explain why many clinical AI systems remain pilot-bound and to help identify governance conditions that support durable AI integration across health systems.</p></sec><sec id="s2"><title>Conceptual Framing</title><p>The conceptual framing of this study is grounded in 3 interrelated constructs: institutionalization, infrastructure, and governance. Institutionalization refers to the stabilization of AI-enabled practices as routinized clinical processes that persist beyond pilot deployments and project-based implementation. Infrastructure denotes the technical and operational substrate required for reliable and repeatable system use, encompassing data pipelines, interoperability systems, computational capacity, and access control. Governance refers to the allocation of decision rights and accountability arrangements that authorize system use, assign responsibility, and oversee system updates across the deployment lifecycle [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>These constructs are analytically distinct yet operationally sequential. Infrastructure constitutes the technical substrate that enables system operation. Governance organizes the human and institutional arrangements through which that substrate is authorized, monitored, and maintained. Institutionalization denotes the observable outcome that emerges when governed systems persist in routine use over time. Governance and institutionalization are related but not identical. Governance refers to the arrangements through which accountability and oversight are structured, whereas institutionalization denotes the condition that becomes observable when such arrangements stabilize sufficiently to sustain routine practice beyond project-specific resources or individual champions. Governance may exist without institutionalization having yet been achieved, whereas institutionalization presupposes the presence of governance arrangements capable of sustaining routine practice.</p><p>In this study, these constructs are not treated as fully established preconditions prior to implementation. Governance capacity in particular is conceptualized as emergent rather than antecedent. It develops through the operational demands generated during implementation rather than being fully specified in advance. Implementation processes therefore play a constitutive role in shaping governance arrangements, through which accountability structures, decision rights, and operational responsibilities gradually become stabilized within clinical environments [<xref ref-type="bibr" rid="ref21">21</xref>]. Institutionalization emerges when these governance arrangements persist and sustain routine clinical practice over time. The sequential relationship among these 3 constructs is illustrated in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Infrastructure provides the technical substrate that enables system operation, including data, computing, and interoperability capacities. Governance organizes coordination and accountability across the lifecycle of deployment by structuring authorization, oversight, and operational responsibility. Institutionalization represents the sustained integration of artificial intelligence&#x2013;enabled practices into routine clinical workflows as governed systems persist over time. The figure illustrates the sequential relationship through which infrastructure enables governance and governance supports institutionalization.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e92680_fig01.png"/></fig></sec><sec id="s3"><title>Case Context</title><p>To examine how the conceptual relationships outlined above operate in practice, this study analyzes the implementation of a clinical AI platform at a large tertiary academic hospital in Hebei Province, China. Over an 18-month period, the institution deployed a set of AI-enabled systems aimed at supporting both clinical decision-making and operational coordination across departments and affiliated institutions. Implementation was organized around 3 clinical pathways, encompassing intelligent preconsultation triage, oncology multidisciplinary team (MDT) decision support, and therapeutic drug monitoring (TDM). Each pathway represents a domain in which AI systems intersect directly with routine clinical workflows and institutional decision structures and therefore provides an observable setting for examining governance formation during deployment.</p><p>The hospital functions as a regional medical center and maintains substantial digital infrastructure, including integrated clinical information systems and data management capabilities. As a large tertiary hospital operating within China&#x2019;s national regulatory framework for AI-enabled medical technologies, it represents a setting where clinical scale, digital infrastructure, and regulatory oversight converge. This institutional configuration creates an analytically valuable context for observing how governance arrangements evolve as AI systems transition from experimental deployment to sustained integration within routine clinical practice.</p></sec><sec id="s4"><title>From Implementation Materials to Governance Framework: Analytical Approach</title><p>This study adopts a qualitative, case-based process-tracing approach to examine how governance structures emerged during an 18-month clinical AI platform implementation at a large tertiary academic hospital in Hebei Province, China. The analytical strategy follows qualitative case study logic commonly used to examine institutional processes, organizational coordination, and decision trajectories within complex health system settings.</p><p>Empirical materials were derived from institutional artifacts generated through routine platform development and deployment. These materials comprised 17 institutional artifacts, including 13 text-based documents and 4 visual implementation materials, encompassing governance frameworks, regulatory filings, infrastructure certification records, internal operational monitoring records, operational deployment artifacts, monthly work reports, meeting materials, organizational documents, budget requests, workflow diagrams, and platform architecture and feature maps. These materials documented platform development milestones, coordination events, governance decisions, and operational activity across the review period from July 2, 2024, to January 24, 2026. Internal operational monitoring records additionally provided aggregated operational indicators documenting system use during platform deployment. These indicators were used descriptively to characterize continuity of deployment, scope of workflow embedding, and organizational consolidation rather than as formal effectiveness endpoints.</p><p>Materials were organized chronologically to identify key decision points and coordination tensions, defined as documented conflicts, delays, or breakdowns requiring institutional responses. Governance modules were derived inductively from recurrent coordination tensions and the institutional mechanisms developed to resolve them. Data analysis followed an inductive thematic analysis approach in which coordination tensions identified across implementation artifacts were iteratively grouped into higher-level governance functions that subsequently informed the 6-module framework. A tension was considered governance-relevant when it recurred across multiple implementation episodes and was corroborated by more than one category of institutional artifact. Initial coding and module development were conducted by two authors and subsequently reviewed by team members with distinct clinical, administrative, and policy perspectives to strengthen interpretive robustness and reduce the risk of post hoc rationalization. Members of the research team participated in documenting institutional processes during the implementation period, enabling access to contemporaneous institutional materials while maintaining analytical separation from AI model development, technical validation, and system performance evaluation activities. Coding was conducted manually by the authors without the use of qualitative data analysis software.</p><p>This retrospective, case-based study received formal approval from the Clinical Research Ethics Committee of The First Hospital of Hebei Medical University (approval number: 2026; research review number: 032; approval date: February 5, 2026). The analysis reported in this manuscript was based on institutional documents and aggregated operational materials and did not involve the use of identifiable individual patient-level data.</p></sec><sec id="s5"><title>Governance Framework for Clinical AI Institutionalization</title><sec id="s5-1"><title>Development of the Governance Framework</title><p>Analysis of implementation materials, conducted through the process-tracing approach described above, identified recurrent operational tensions arising during the deployment of clinical AI systems. These tensions were primarily associated with infrastructure integration, regulatory alignment, and the embedding of AI outputs within routine clinical workflows. Recurrent solutions to these implementation tensions were grouped into 6 interdependent modules. These modules represent governance conditions observed in practice rather than a prescriptive institutional design.</p><p>The 6 modules collectively constitute a governance cycle through which clinical AI systems transition from experimental deployment to stabilized institutional use. The first two modules, institutional carrier formation and infrastructure governance, represent upstream enabling conditions that establish organizational ownership and sustain technical continuity prior to clinical activation. Regulatory and ethical governance defines authorization boundaries and distributes accountability across the deployment lifecycle. The remaining modules, interdisciplinary operational coordination, translational scaling, and lifecycle evaluation and oversight, support active deployment by enabling cross-departmental embedding, contextual expansion, and adaptive governance as clinical and regulatory conditions evolve. Although analytically presented as 6 modules, the framework operates as a recursive governance cycle in which implementation generates feedback that informs subsequent governance adjustments and institutional learning. The governance framework is illustrated in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Six governance modules (M1-M6) represent organizational, technical, regulatory, and clinical functions required for sustained AI deployment. These modules collectively constitute governance capacity that enables the transition of clinical AI systems from pilot initiatives to institutionalized clinical practice. The framework operates as an adaptive learning cycle in which lifecycle oversight informs continuous governance formation. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e92680_fig02.png"/></fig></sec><sec id="s5-2"><title>Overview of the Governance Framework</title><p>The framework comprises 6 interdependent modules representing the organizational, technical, regulatory, and clinical functions required to stabilize AI systems as routinized components of clinical practice. Each module reflects a recurring governance tension identified during implementation and is supported by empirical process evidence drawn from the Hebei provincial clinical AI platform. <xref ref-type="table" rid="table1">Table 1</xref> summarizes the modules, their corresponding implementation artifacts, and the transferable governance requirements derived from them.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Governance framework modules, empirical implementation artifacts, and transferable governance requirements.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Governance module</td><td align="left" valign="bottom">Empirical evidence from implementation</td><td align="left" valign="bottom">Transferable governance requirement</td></tr></thead><tbody><tr><td align="left" valign="top">M1: Institutional carrier formation</td><td align="left" valign="top">Establishment of a dedicated medical AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> laboratory through formal administrative orders; defined organizational structure integrating algorithm development, clinical research, and application translation; designation as a provincial engineering research center.</td><td align="left" valign="top">Clinical AI deployment requires a designated institutional carrier with formal mandate, decision authority, and organizational independence from temporary project teams.</td></tr><tr><td align="left" valign="top">M2: Infrastructure governance</td><td align="left" valign="top">Integration of high-performance computing infrastructure with hospital information systems; EMR<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> functional maturity certification; interoperability standardization assessments; documented institutional data governance policies.</td><td align="left" valign="top">Data pipelines, computational capacity, and interoperability mechanisms must be governed as durable institutional infrastructure with defined access, security, and update controls.</td></tr><tr><td align="left" valign="top">M3: Regulatory and ethical governance</td><td align="left" valign="top">Algorithm filing under the national regulatory framework administered by the Cyberspace Administration of China; documentation defining the system&#x2019;s purpose, use boundaries, and institutional accountability.</td><td align="left" valign="top">Regulatory authorization and accountability structures should be formally specified prior to clinical activation of AI systems.</td></tr><tr><td align="left" valign="top">M4: Interdisciplinary operational coordination</td><td align="left" valign="top">Three-division laboratory governance structure; integration of AI outputs into multidisciplinary tumor board workflows; documented role allocation and decision rights across clinical and technical teams; temporary suspension of a pathway pending clarification of update authority.</td><td align="left" valign="top">Operational deployment requires clearly defined roles, decision rights, and accountability structures linking algorithm development, clinical decision processes, and institutional oversight.</td></tr><tr><td align="left" valign="top">M5: Translational scaling</td><td align="left" valign="top">Expansion of the AI-supported preconsultation pathway across multiple outpatient departments; cross-department workflow integration protocols; establishment of a provincial clinical AI research institute supporting broader deployment.</td><td align="left" valign="top">Scaling requires governance pathways that translate localized operational success into institution-wide organizational capability.</td></tr><tr><td align="left" valign="top">M6: Lifecycle evaluation and oversight</td><td align="left" valign="top">Documentation of system use boundaries; role-based pathway supervision; model version update logs; institutional review of operational feedback during implementation.</td><td align="left" valign="top">Lifecycle governance requires monitoring mechanisms tied to institutional authority to manage system updates, operational feedback, and evolving clinical conditions.</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table1fn2"><p><sup>b</sup>EMR: electronic medical record.</p></fn></table-wrap-foot></table-wrap><p>Across the observation period, the platform was progressively integrated into routine clinical workflows spanning multiple service lines. Risk assessment tools were incorporated into outpatient triage processes to support early identification of high-risk patients, while decision-support functions were embedded within multidisciplinary case discussions to facilitate structured cross-departmental evaluation. AI-assisted documentation tools, including generative medical record support and natural-language clinical assistants, were concurrently introduced to support routine documentation and information retrieval. The platform also enabled structured capture of research indicators and integration of clinical data management with institutional quality-monitoring functions. To provide high-level proof-of-concept evidence that the platform progressed beyond bounded pilot deployment, selected operational indicators are summarized in <xref ref-type="table" rid="table2">Table 2</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Reported operational indicators and measurement definitions<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Indicator</td><td align="left" valign="bottom">Definition and measurement basis</td><td align="left" valign="bottom">Observed value</td></tr></thead><tbody><tr><td align="left" valign="top">Cumulative patient interactions (preconsultation pathway)</td><td align="left" valign="top">Total number of completed patient-initiated interactions recorded in system logs across all 5 enrolled outpatient departments from the first departmental onboarding to the end of the observation period</td><td align="left" valign="top">&#x003E;24,000</td></tr><tr><td align="left" valign="top">Recent average daily use</td><td align="left" valign="top">Mean number of completed interactions per calendar day calculated over the postrollout operational phase, following phased onboarding of all 5 departments</td><td align="left" valign="top">~110 encounters/day</td></tr><tr><td align="left" valign="top">Continuous operational duration (preconsultation pathway)</td><td align="left" valign="top">Number of consecutive months the pathway remained operationally active following departmental rollout</td><td align="left" valign="top">&#x003E;12 months</td></tr><tr><td align="left" valign="top">Number of clinical pathways in sustained deployment</td><td align="left" valign="top">Count of artificial intelligence&#x2013;enabled clinical pathways that remained in active institutional use across the focal observation period</td><td align="left" valign="top">3</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>The operational indicators reported in this table were derived from aggregated institutional operational monitoring records generated during routine platform deployment. They are presented to improve interpretive transparency rather than as indicators of clinical effectiveness. The recent daily use figure refers to the mature postrollout phase following phased departmental onboarding and should not be interpreted as a rate that applied uniformly across the entire cumulative review period. No identifiable individual patient-level data were used.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s5-3"><title>Empirical Context of Platform Operation</title><p>During the observation period, the clinical AI platform was progressively integrated into routine clinical workflows across multiple service lines. Operational deployment was organized around 3 clinical pathways, each representing a domain in which AI-assisted functions were embedded within routine clinical decision processes and institutional coordination mechanisms.</p><p>The intelligent preconsultation pathway deployed a conversational interface based on a hospital-developed and locally deployed large language model&#x2013;enabled system, rather than a commercial vendor platform, to elicit structured symptom histories from patients prior to clinical encounters. The pathway generated summarized risk assessments that were automatically transmitted to the electronic medical record (EMR) system, thereby providing preliminary clinical information to support outpatient triage and physician assessment at the point of care.</p><p>The oncology multidisciplinary decision-support pathway embedded AI-assisted treatment recommendations within tumor board workflows. It was supported by a hospital-developed oncology decision-support model based on a transformer architecture and adapted for oncology-specific text generation and clinical recommendation tasks. Model-generated outputs were reviewed alongside imaging findings, pathology reports, and patient-specific clinical data under defined specialist decision authority, with final treatment determinations remaining within established clinical accountability structures.</p><p>The TDM pathway applied an in-house machine learning pipeline that integrated principal component analysis for feature optimization and a transformer-based deep learning regression model to generate plasma drug concentration estimates for duloxetine and support individualized dosing decisions. These estimates were derived from patient-specific clinical parameters and incorporated into routine medication-management workflows to inform dosing adjustments.</p><p>In addition to these clinical pathways, AI-assisted documentation tools were deployed to support routine information management, including generative medical record functions and natural-language clinical assistants facilitating clinical documentation and information retrieval. The platform also enabled structured capture of research indicators and integration of clinical data management with institutional quality-monitoring and downstream analytical processes. Sustained operation across these domains created an empirical context in which AI-enabled systems engaged directly with institutional governance arrangements and cross-departmental coordination structures. This provided the empirical basis for examining how governance mechanisms emerged and evolved during real-world implementation.</p><p>Three forms of implementation evidence were observable during the review period, together indicating that the platform had moved beyond bounded pilot deployment. Sustained operation was evident across 3 clinical pathways, with the preconsultation pathway remaining in routine use for more than 12 months and recording more than 24,000 completed patient interactions across 5 outpatient departments. At the same time, AI outputs were incorporated into existing clinical workflows rather than remaining external demonstrations, including automatic transmission of preconsultation summaries to the EMR system, integration of oncology decision support into MDT deliberation, and incorporation of duloxetine concentration prediction into medication-management processes. Continuity of deployment was also accompanied by formal governance consolidation: regulatory authorization was documented in the algorithm filing record, role-based operational oversight was specified in pathway supervision protocols, update procedures were recorded in version-control logs, and organizational ownership was formalized through the medical AI laboratory mandate. These observations collectively provide high-level implementation evidence that the platform functioned as an institutionalized operational capability rather than remaining a time-limited pilot initiative.</p></sec><sec id="s5-4"><title>Institutional Carrier Formation</title><p>A foundational governance challenge in clinical AI deployment concerns the absence of a stable organizational locus of ownership. Under such conditions, the institutional authority required to coordinate development, deployment, and lifecycle oversight remains fragmented, preventing AI capabilities from transitioning beyond pilot implementation into durable institutional use.</p><p>When AI initiatives operate as project-based activities, accountability for cross-departmental data access, clinical validation, and regulatory engagement becomes diffusely distributed across organizational units and external collaborators. In the absence of a designated institutional carrier, technically functional systems frequently fail to persist beyond initial pilot phases because no organizational entity possesses the authority required to align technical development with clinical governance and institutional decision structures.</p><p>Governance consolidation in the studied setting occurred through the establishment of a dedicated medical AI laboratory serving as an institutional carrier with a formal mandate and defined decision rights. The laboratory integrated algorithm development, clinical research, and application translation within a unified organizational structure, thereby concentrating responsibility for AI system oversight within a single accountable entity. This carrier was established prior to broader clinical integration and subsequently provided the administrative foundation upon which additional governance arrangements were constructed, including designation as a provincial engineering research center.</p><p>Institutional arrangements may vary across health systems in organizational form and administrative configuration. A consistent governance principle nevertheless emerges, whereby sustained clinical AI integration requires a durably designated organizational entity responsible for coordinating development, deployment, and lifecycle oversight. Such carriers may take the form of dedicated laboratories, formally chartered governance committees, or federated institutional arrangements, but must remain structurally distinct from temporary project teams and independent of short-term funding cycles.</p></sec><sec id="s5-5"><title>Infrastructure Governance</title><p>A second governance challenge arises from the treatment of data and computational resources as project-bound technical assets rather than as governed institutional infrastructure. When infrastructure provision remains embedded within temporary project arrangements, the standardization, interoperability, and operational continuity required for routine clinical deployment cannot be reliably sustained. Under such conditions, AI systems remain dependent on configurations that are structurally incompatible with long-term institutional operation.</p><p>Fragmented information systems, inconsistent data standards, and reliance on temporary computing environments constrain stable model operation and impede integration into clinical workflows. These limitations typically arise not from data scarcity but from the absence of institutional governance mechanisms capable of ensuring infrastructure continuity, access control, and system-wide coordination across heterogeneous hospital environments. Without formal governance of underlying infrastructure, technically validated AI systems may nevertheless fail to operate reliably at the point of clinical deployment, creating conditions in which deployment continuity depends on informal negotiation rather than defined accountability structures.</p><p>Infrastructure governance within the studied platform was progressively incorporated into institutional oversight arrangements. High-performance computing capacity was established and integrated with existing clinical information systems, while external certifications, including EMR functional maturity assessment and interoperability standardization evaluation, confirmed that infrastructure conditions satisfied requirements for clinical deployment. Data standardization across legacy systems further required iterative coordination among clinical, technical, and administrative units, as nonstandardized terminologies embedded within departmental workflows impeded the establishment of unified data pipelines.</p><p>Infrastructure architectures differ across institutions in scale, technical configuration, and organizational management. A consistent governance principle nevertheless emerges, whereby data pipelines, computational resources, and interoperability systems must be governed as durable institutional infrastructure, maintained under defined accountability arrangements and independent of the project-bound funding cycles that characterize early-stage AI deployment.</p></sec><sec id="s5-6"><title>Regulatory and Ethical Governance</title><p>A further governance challenge concerns the specification of regulatory authorization and accountability prior to clinical deployment. When regulatory alignment is deferred until after deployment decisions are made, accountability arrangements remain incompletely defined during the period of greatest institutional uncertainty, and the boundaries of legitimate system use are established retrospectively rather than by design.</p><p>In the absence of prior regulatory specification, system purpose, intended use boundaries, and designated institutional responsibility may remain insufficiently defined at the point of clinical activation. Effective governance therefore requires that regulatory authorization and accountability structures be established prior to deployment, particularly in regulatory environments that continue to evolve alongside emerging clinical AI capabilities.</p><p>Within the Hebei provincial platform, regulatory authorization was obtained prior to broader clinical activation of the oncology decision-support pathway through national algorithm filing procedures administered under the Cyberspace Administration of China framework. The filing process clarified system purpose, defined intended use boundaries, and formally designated institutional responsibility for ongoing deployment. Regulatory requirements therefore informed operational design decisions at the predeployment stage rather than constraining systems already embedded in clinical practice, and oversight functions were incorporated within existing institutional governance arrangements rather than constituted as a separate compliance structure.</p><p>Regulatory frameworks vary substantially across jurisdictions in scope, procedural requirements, and institutional applicability. A consistent governance principle nevertheless emerges, whereby accountability specification and the definition of authorized use boundaries should precede clinical activation. When regulatory alignment occurs only after deployment, governance uncertainty persists during implementation and institutional oversight across the system lifecycle becomes structurally constrained.</p></sec><sec id="s5-7"><title>Interdisciplinary Operational Coordination</title><p>Effective clinical deployment of AI systems depends on structured coordination across clinical, technical, and administrative domains. When such coordination is not institutionally specified, technically functional systems may operate without agreed procedures for clinical use, and accountability for AI-generated outputs may remain diffuse or unassigned.</p><p>Ambiguity frequently emerges at the interface between algorithm development and clinical decision-making. Technical personnel may not possess the authority to modify systems in response to operational feedback, while clinicians may lack formally defined procedures for incorporating AI outputs into diagnostic or therapeutic workflows. Without governance arrangements that connect these domains, AI systems may remain technically operational but institutionally unstable.</p><p>Operational coordination in the studied implementation was institutionalized through explicit role allocation across a 3-division laboratory structure responsible for algorithm development, clinical research, and application translation. In the oncology multidisciplinary decision-support pathway, AI outputs were embedded within established team workflows under defined decision rights. Model-generated outputs were reviewed during structured multidisciplinary meetings alongside imaging findings, pathology reports, and patient-specific clinical data, while final treatment decisions remained within established clinical accountability structures. Early deployment further revealed that responsibility boundaries required clarification, and one pathway experienced temporary suspension pending resolution of ambiguity regarding authority over model update decisions.</p><p>Institutional coordination mechanisms differ across health systems in organizational design and governance structure. A consistent governance principle nevertheless emerges, whereby sustained clinical deployment requires clearly defined roles, decision rights, and accountability structures linking algorithm development, clinical use, and institutional oversight.</p></sec><sec id="s5-8"><title>Translational Scaling</title><p>Durable institutional integration of clinical AI systems requires governance mechanisms capable of extending operational capacity beyond localized implementations. Without such mechanisms, systems demonstrating reliable performance within circumscribed clinical contexts may remain confined to isolated deployments, preventing broader organizational adoption and durable integration at scale.</p><p>Localized operational success does not automatically generate the institutional authority required for expansion across departments or clinical service lines. Effective scaling therefore depends on governance arrangements that connect deployment experience with institutional decision structures, enabling operational practices developed in one context to be translated into formally recognized organizational capabilities.</p><p>In the studied implementation, scaling occurred through progressive integration of AI-supported pathways into institutional governance structures rather than replication of isolated applications. The preconsultation risk assessment pathway was extended across multiple outpatient departments through standardized workflow integration, enabling its routine use across distinct clinical service lines. Additional AI-supported pathways were subsequently introduced in coordination with clinical and administrative units, each accompanied by governance documentation specifying operational roles, workflow procedures, and system use parameters. Over time, these expansions transformed initially localized tools into institutionally coordinated clinical services embedded within routine care processes. Institutional recognition further reinforced this process through research center designation and the establishment of a provincial clinical AI research institute.</p><p>Scaling mechanisms vary across institutional contexts in governance structure and policy environment. A consistent governance principle nevertheless emerges, whereby durable expansion of clinical AI capacity requires governance pathways linking localized deployment with institution-wide authority structures, allowing operational experience to translate into formally recognized institutional capability.</p></sec><sec id="s5-9"><title>Lifecycle Evaluation and Oversight</title><p>Long-term clinical deployment of AI systems requires governance mechanisms capable of monitoring performance, managing system updates, and adapting deployment conditions over time. Without such mechanisms, operational systems may gradually diverge from their intended use conditions as clinical workflows change, data distributions shift, and regulatory expectations develop over time.</p><p>In the absence of structured lifecycle oversight, responsibility for monitoring system performance may become diffuse across organizational units. Version control may be inconsistently documented, and operational feedback generated by clinical users may not systematically reach those with authority to modify deployed systems. This configuration introduces the risk of operational drift as AI systems expand beyond initial pilot environments into routine clinical practice, underscoring the necessity of governance mechanisms capable of maintaining accountability across the full system lifecycle.</p><p>Within the observed implementation, lifecycle oversight practices were incorporated into existing institutional governance arrangements rather than established as a fully separate governance protocol. Monitoring activities included documentation of system use boundaries, clarification of responsible units for pathway supervision, and iterative adjustments in response to operational feedback. System updates were conducted within established institutional decision structures and aligned with regulatory requirements. Standardized version-control documentation remained under iterative development during the observation period as institutional governance procedures matured.</p><p>Lifecycle governance arrangements vary across institutional contexts in their degree of formalization and organizational structure. Across these variations, a consistent governance principle applies, whereby sustained clinical deployment requires monitoring mechanisms linked to defined institutional authority, ensuring that system updates, clinical workflow adjustments, and evolving regulatory expectations remain subject to accountable oversight throughout the operational lifecycle.</p></sec></sec><sec id="s6" sec-type="discussion"><title>Discussion</title><sec id="s6-1"><title>From Implementation to Governance Formation</title><p>Persistent pilot dependence in clinical AI is commonly attributed to governance deficits, including the absence of accountability structures, ownership arrangements, or regulatory alignment that would enable AI systems to move from experimental deployment into routine clinical use [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. While this interpretation identifies important institutional constraints, it does not fully explain why governance arrangements so often fail to emerge even when pilot implementations are technically successful. The present analysis points to a more precise formulation. In this view, the pilot trap reflects not only the absence of governance but also insufficient implementation depth for governance structures to take shape [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Systems remain project-bound not because governance was unavailable in principle, but because deployment ended before the operational tensions capable of generating governance had fully materialized. What distinguishes sustained integration from persistent pilot dependence is therefore not the prior existence of a governance framework but the capacity of implementation processes to generate governance through iterative responses to recurrent coordination demands.</p><p>Reconceptualizing the pilot trap in institutional terms carries direct implications for how it is addressed. Existing approaches often position governance as a precondition for deployment by emphasizing accountability frameworks, compliance structures, and ownership arrangements before clinical integration proceeds [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. Evidence from this case indicates a different dynamic. Governance capacity did not precede implementation; it emerged through it. Data-standardization conflicts, responsibility-boundary disputes, and scaling misalignments created the institutional conditions under which governance structures became necessary and were progressively consolidated [<xref ref-type="bibr" rid="ref27">27</xref>]. Deployment therefore proceeded not from a complete governance architecture but from a minimal institutional foundation that accumulated governance capacity as implementation deepened. In contrast to frameworks that emphasize technical integration requirements or clinician adoption barriers as the primary constraints on clinical AI sustainability, the present analysis foregrounds the governance formation process itself. Attention is directed toward the mechanisms through which organizational ownership, accountability boundaries, and coordination structures become stabilized as durable institutional capacity. Across the implementation trajectory, 6 governance modules repeatedly emerged as functional conditions necessary for sustained deployment rather than as elements of a predefined governance design. Considered together, these modules constitute governance capacity as a system-level capability through which sociotechnical resources are consolidated into routinized clinical infrastructure. The operational indicators summarized in <xref ref-type="table" rid="table2">Table 2</xref> provide empirical grounding for this interpretation and suggest that governance formation was accompanied by sustained operational embedding rather than remaining a purely organizational or aspirational construct.</p><p>These indicators are not reported as performance benchmarks but as governance-relevant evidence. Sustained use across more than 12 months of continuous operation, encompassing more than 24,000 patient interactions within a phased multidepartment deployment, indicates that governance arrangements were sufficient to support accountable clinical operation over time rather than confining the platform to a bounded pilot episode. Operational continuity across the preconsultation pathway is consistent with the stabilization of M1 and M2, without which system continuity would be more likely to depend on informal coordination than on defined accountability structures. Regulatory authorization obtained prior to clinical activation of the oncology decision-support pathway is consistent with the functioning of M3, as documented in the algorithm filing record and related compliance materials. The maintenance of defined clinical decision authority throughout the oncology MDT pathway is consistent with the operationalization of M4. The phased expansion of the preconsultation pathway across multiple outpatient departments is indicative of M5, as documented in deployment materials and departmental rollout records. Iterative system updates conducted within institutional governance structures during the observation period are indicative of M6 functioning as an active governance mechanism rather than a nominal policy commitment. Although governance functions vary in their observability through use metrics, each of the 6 modules is linked to at least one category of traceable institutional artifact, including operational records, regulatory filings, deployment materials, and organizational documents. This supports the descriptive reproducibility of the framework as an analytical lens, whereas its broader transferability across heterogeneous health system contexts is discussed in the following section. Collectively, these indicators suggest that the governance modules described in this framework were associated with observable operational continuity and that the transition from pilot deployment to institutionalized clinical practice was sustained across multiple service lines and governance domains.</p></sec><sec id="s6-2"><title>From Implementation Tensions to Governance Mechanisms</title><p>Implementation of the platform was accompanied by several operational frictions that required institutional responses, through which governance structures gradually emerged [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. Rather than resulting from deliberate institutional design, governance arrangements developed through iterative responses to practical operational challenges. The analysis identifies 3 generative mechanisms that shaped this process: authorization, responsibility allocation, and coordination.</p><p>Authorization translated regulatory expectations into operationally defined deployment boundaries and clarified the legitimate scope and conditions under which clinical AI systems could be used [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. Responsibility allocation embedded AI-related tasks within existing organizational structures by assigning accountable ownership across institutional units and professional roles [<xref ref-type="bibr" rid="ref30">30</xref>]. Coordination across clinical, technical, and administrative domains was gradually stabilized as decision rights, communication channels, and operational interfaces became more clearly defined [<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>These mechanisms emerged through concrete operational friction rather than prior institutional design. During early deployment of the TDM pathway, inconsistencies in laboratory data formatting and record structures across hospital information systems temporarily limited automated data ingestion and required manual reconciliation. Resolving this tension required sustained coordination among clinical departments, the hospital information center, and the AI development team, ultimately producing standardized data interfaces and validation procedures, which were later incorporated into the governance framework.</p><p>A second coordination tension arose during deployment of the oncology MDT decision-support pathway. Clinician uncertainty regarding the appropriate role of AI-generated recommendations within established multidisciplinary decision-making processes led to a temporary operational suspension. Following administrative review, responsibility boundaries and documentation requirements were clarified, and the governance framework formalized AI outputs as advisory inputs under defined clinician oversight with role-based monitoring arrangements.</p><p>These episodes illustrate that institutionalization does not occur through seamless technological integration but through the iterative resolution of organizational tensions [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Governance neither simply precedes implementation nor follows it as a downstream regulatory layer. Instead, governance is produced through the implementation process itself. Institutionalization emerges when governance arrangements stabilize in response to recurrent operational demands, transforming iterative problem-solving into durable institutional capacity embedded within routine organizational practice.</p></sec><sec id="s6-3"><title>From Governance First to Governance Fit</title><p>Effective clinical AI deployment requires governance fit rather than governance expansion. Governance-first approaches are sometimes criticized for introducing procedural rigidity that slows technological innovation. This analysis addresses that concern by distinguishing governance functions from the institutional forms through which they are implemented. This distinction also clarifies why the framework is analytically reproducible across settings even when organizational forms differ.</p><p>Certain governance functions constitute nonnegotiable conditions for safe and accountable integration. These include clearly defined authorization boundaries, accountable ownership, and mechanisms for lifecycle oversight. The institutional arrangements through which such functions are enacted, however, may vary substantially in scale, procedural intensity, and degree of centralization across health system contexts. Accordingly, reproducibility in this context lies not in replicating identical organizational structures but in assessing whether comparable governance functions can be identified through traceable institutional evidence.</p><p>Governance fit refers to the calibrated alignment between governance intensity and system characteristics such as clinical risk, update frequency, and operational complexity [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. Under this perspective, governance architectures should not impose uniform procedural requirements across all deployment environments. Instead, effective governance preserves adaptive capacity by calibrating oversight intensity according to the potential consequences and reversibility of system changes.</p><p>Several mechanisms illustrate how such calibration can be operationalized in practice. Risk-tiered update thresholds allow routine model refinements to proceed under simplified authorization procedures while reserving stricter review processes for high-impact system modifications. Predefined authorization pathways for low-risk adjustments further reduce procedural bottlenecks. Staged deployment strategies similarly enable systems to evolve within monitored operational environments before wider clinical integration [<xref ref-type="bibr" rid="ref34">34</xref>].</p><p>Both extremes generate systemic challenges. Insufficient governance creates fragmentation and reinforces persistent pilot dependence, whereas excessive governance may introduce bureaucratic inertia that constrains technological adaptation [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Health systems can therefore balance accountability with technological agility by implementing governance arrangements that differentiate routine system evolution from high-risk modification processes while maintaining clear institutional responsibility for oversight. The broader question of how these governance functions travel across institutional environments is addressed in the following section on functional transferability.</p></sec><sec id="s6-4"><title>From Clinician-Centric to Participatory Governance Maturation</title><p>The absence of structural patient and community involvement in the present case invites a more precise analytical observation than simply noting a gap. Existing literature on patient participation in clinical AI often conflates two distinct roles. Patients may participate as design informants who help shape decisions about system purpose, or as governance stakeholders who contribute to oversight of how deployed systems operate within clinical environments [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Most existing guidance addresses the former by emphasizing co-design principles and participatory development methodologies. By contrast, patient involvement in operational governance has received substantially less analytical attention, and the institutional conditions required to make such participation structurally meaningful remain insufficiently specified.</p><p>Participatory governance capacity is not a function of inclusion per se but of the institutional anchoring through which participatory inputs acquire an accountable pathway to governance decisions. Meaningful patient involvement in AI governance requires an organizational carrier capable of receiving, processing, and acting on participatory inputs within established responsibility structures. Without such a carrier, participation risks becoming consultative rather than consequential, producing inputs that lack a defined institutional pathway to governance decisions [<xref ref-type="bibr" rid="ref39">39</xref>]. The sequencing observed here, therefore, reflects a structural constraint rather than a deliberate exclusion. Participatory mechanisms were introduced following initial stabilization of the accountability architecture, a sequencing that reflected the specific institutional conditions of this implementation context rather than a principled position on the optimal timing of patient involvement. Contemporary co-design frameworks and implementation science literature frequently advocate for patient involvement from the earliest stages of governance development, and parallel co-design of governance and participation structures represents a well-documented and viable alternative in other health system contexts.</p><p>As institutionalization advances and governance architecture stabilizes, participatory capacity becomes both feasible and functionally necessary. Within the proposed framework, patient and community involvement is most appropriately integrated into lifecycle governance mechanisms. Defined feedback channels, transparent communication of system use boundaries, and structured review of operational incidents allow oversight to extend beyond professional actors while preserving clear lines of institutional accountability [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. This integration addresses not only the legitimacy requirements identified in the literature but also the operational question of how participatory inputs are processed within functioning governance structures. Design-focused participation frameworks rarely specify these institutional pathways, leaving the governance dimension of participation largely unresolved [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref41">41</xref>].</p></sec><sec id="s6-5"><title>From Structural Replication to Functional Transferability</title><p>Although the framework emphasizes governance capacity rather than technological sophistication, several baseline conditions support its applicability across health system contexts [<xref ref-type="bibr" rid="ref42">42</xref>]. These conditions represent minimal enabling environments for sustained clinical AI operation rather than structural requirements of the framework itself. Institutional authority to assign responsibility for AI oversight must be present. Basic digital infrastructure capable of supporting clinical data capture, storage, and computational processing is also required, together with regulatory or administrative mechanisms that permit AI tools to be activated within clinical workflows. Interdisciplinary coordination capacity must additionally connect clinical practice, technical development, and organizational management to sustain cross-functional collaboration. Where such conditions are only partially present, functional equivalents such as shared infrastructure platforms, external technical partnerships, or regional governance arrangements may provide alternative implementation pathways [<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>Framework transferability therefore lies in functional reproduction rather than structural replication [<xref ref-type="bibr" rid="ref22">22</xref>]. In this paper, functional transferability is used as an analytic formulation to distinguish transferable governance functions from the setting-specific organizational forms through which they are instantiated. Institutionalization does not require identical organizational architectures but depends on the presence of governance functions capable of stabilizing clinical AI as durable institutional capacity. Six functional requirements underpin this process, including durable ownership arrangements, governed infrastructure, defined authorization boundaries, cross-functional coordination mechanisms, structured scaling pathways, and lifecycle oversight. Under this interpretation, governance transferability depends on whether equivalent institutional functions can be realized through different organizational arrangements across settings, rather than on replication of a single structural form. In this sense, reproducibility should be understood descriptively rather than structurally. What can be reproduced across settings is not an identical organizational form but an analytic approach that examines whether comparable governance functions are evidenced through traceable institutional artifacts. <xref ref-type="table" rid="table3">Table 3</xref> illustrates how these requirements may be instantiated across heterogeneous health system contexts. It presents both the institutional arrangements observed in the study setting and illustrative analogs that may fulfill equivalent governance roles in more decentralized health systems. The purpose of this mapping is therefore to operationalize functional equivalence across contexts, rather than to prescribe identical organizational structures.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Functional governance requirements and cross-system analogs.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Governance module</td><td align="left" valign="bottom">Functional requirement</td><td align="left" valign="bottom">Example instantiation in study context</td><td align="left" valign="bottom">Possible analog in decentralized health systems</td></tr></thead><tbody><tr><td align="left" valign="top">M1: Institutional carrier formation</td><td align="left" valign="top">Accountable ownership formally assigned beyond project teams with defined decision authority across deployment, maintenance, and update cycles.</td><td align="left" valign="top">Provincial engineering research center and hospital AI<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> laboratory.</td><td align="left" valign="top">Health system AI governance office or clinical AI steering committee.</td></tr><tr><td align="left" valign="top">M2: Infrastructure governance</td><td align="left" valign="top">Data and computational resources governed as institutional infrastructure ensuring continuity of access and interoperability with clinical systems.</td><td align="left" valign="top">Institutionally governed data platforms and high-performance computing environment.</td><td align="left" valign="top">Cloud-based infrastructure operating under institutional data governance agreements.</td></tr><tr><td align="left" valign="top">M3: Regulatory and ethical governance</td><td align="left" valign="top">Predeployment authorization establishing legal legitimacy and defined accountability boundaries for clinical AI use.</td><td align="left" valign="top">National algorithm filing and institutional compliance documentation.</td><td align="left" valign="top">Software-as-a-medical-device approval pathways and institutional compliance review processes.</td></tr><tr><td align="left" valign="top">M4: Interdisciplinary coordination</td><td align="left" valign="top">Stable cross-functional coordination with defined roles and decision rights linking clinical, technical, and administrative actors.</td><td align="left" valign="top">Three-division laboratory integrating clinical, technical, and translational roles.</td><td align="left" valign="top">Cross-functional clinical AI committees linking clinicians, engineers, and administrators.</td></tr><tr><td align="left" valign="top">M5: Translational scaling</td><td align="left" valign="top">Governance mechanisms linking localized deployment with higher-level institutional authority to support controlled expansion.</td><td align="left" valign="top">Regional medical alliances extending AI services across affiliated hospitals.</td><td align="left" valign="top">Multisite health system networks or collaborative implementation consortia.</td></tr><tr><td align="left" valign="top">M6: Lifecycle evaluation and oversight</td><td align="left" valign="top">Sustained monitoring and controlled system evolution through performance review, version control, and governance adjustment mechanisms.</td><td align="left" valign="top">Role-based monitoring and documented change-control procedures.</td><td align="left" valign="top">Model oversight committees and postdeployment monitoring programs.</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>Economic feasibility represents an important consideration for broader adoption of governance-oriented implementation models [<xref ref-type="bibr" rid="ref43">43</xref>]. The framework does not require the independent construction of large-scale computational infrastructure. The operative condition is continuity of access to governed data and computational resources rather than infrastructural ownership. In practice, this requirement may be fulfilled through shared regional platforms, cloud-based infrastructures operating under institutional data governance agreements, or staged implementation strategies focusing on high-value clinical pathways. These configurations allow institutions across resource levels to establish governance capacity without replicating the infrastructural scale characteristic of tertiary referral centers. Resource pooling, coordinated implementation, and shared accountability structures may therefore support deployment across diverse institutional configurations, including regional health networks, public-private partnerships, and externally supported platforms.</p><p>This functional interpretation situates the framework within the broader landscape of international AI governance initiatives while clarifying its distinct contribution. Existing frameworks, including ISO/IEC 42001 [<xref ref-type="bibr" rid="ref44">44</xref>], World Health Organization guidance on health AI governance [<xref ref-type="bibr" rid="ref19">19</xref>], and regulatory pathways developed by agencies such as the US Food and Drug Administration [<xref ref-type="bibr" rid="ref45">45</xref>], share a common structural assumption that the organizational capacity to implement governance already exists within deploying institutions. These frameworks specify what governance should achieve, including accountability documentation, risk management structures, and compliance requirements. However, they presuppose the existence of an institutional carrier capable of activating and sustaining these arrangements [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. The framework developed here addresses the prior question of how such capacity comes into existence. It identifies the conditions under which organizational ownership, coordination authority, and oversight mechanisms emerge through implementation rather than being fully established in advance [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. These two levels should therefore be seen as complementary rather than competing. Normative standards define the accountability architecture within which clinical AI should operate, while the present framework explains the institutional formation process through which that architecture becomes operationally viable at the point of deployment. The framework is therefore transferable not because institutions can or should replicate the Hebei case in structural detail, but because comparable governance functions may be identified, examined, and adapted across heterogeneous implementation settings.</p></sec><sec id="s6-6"><title>From Viewpoint to Research Agenda</title><p>The framework presented in this study derives from a single institutional case observed during an early stage of governance consolidation, which necessarily constrains generalizability [<xref ref-type="bibr" rid="ref46">46</xref>]. Embedded observation enabled detailed insight into implementation dynamics but also introduced interpretive proximity. Triangulation across traceable institutional artifacts and iterative review by coauthors with distinct clinical, administrative, and governance roles were used to mitigate this limitation. Lifecycle governance mechanisms were still evolving during the observation period, and participatory governance remained only partially developed. The scope of claims the present analysis can support is accordingly bounded by these conditions. Although the framework may be analytically transferable across settings, its empirical adequacy and practical consequences remain to be tested through comparative research.</p><p>Beyond the constraints of this single case, a more fundamental limitation lies in the current state of the field. Clinical AI governance research has generated a substantial body of normative frameworks, implementation principles, and policy guidance, yet the relationship between governance arrangements and clinical outcomes remains largely unexamined [<xref ref-type="bibr" rid="ref47">47</xref>]. Governance frameworks are frequently justified by the assumption that they improve care quality, patient safety, and operational reliability. However, empirical investigation of this assumption remains conspicuously limited. Governance studies rarely measure clinical outcomes, and outcome studies rarely examine governance structures [<xref ref-type="bibr" rid="ref47">47</xref>]. The result is a structural circularity in which the value of governance is asserted but seldom demonstrated empirically.</p><p>Breaking this circularity requires a specific methodological intervention, namely longitudinal comparative studies that track governance formation and clinical outcomes simultaneously, treating governance artifacts, including carrier establishment, authorization records, coordination protocols, and oversight documentation, as traceable intermediate variables in a causal pathway from implementation to clinical impact [<xref ref-type="bibr" rid="ref48">48</xref>]. Investigations of this kind would need to span multiple institutional contexts to distinguish governance effects from institutional confounders and would require follow-up periods sufficient to capture the maturation of lifecycle governance mechanisms beyond early implementation phases. Economic analyses characterizing the cost structures of alternative governance configurations constitute an equally pressing methodological demand [<xref ref-type="bibr" rid="ref49">49</xref>], particularly for institutions operating with resource constraints that preclude replication of tertiary-center infrastructure. Most fundamentally, the field requires an agreed methodology for attributing clinical and operational outcomes to governance arrangements rather than to technical model performance alone [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref47">47</xref>], a distinction that existing evaluation frameworks are currently ill-equipped to draw.</p></sec><sec id="s6-7"><title>Conclusions</title><p>Clinical AI adoption is typically framed as a problem of technology transfer, understood as the movement of capable systems from development into clinical practice. The present analysis points toward a more fundamental reconceptualization. What health systems are required to construct is not a pathway for technology to enter institutions but the institutional capacity through which technology becomes governable, accountable, and sustained. Institutional capacity of this kind is not established in advance of implementation; it is generated through it. The operational tensions that arise during deployment are not obstacles to governance formation. They are its generative conditions. The 6-module framework developed in this study identifies the functional arrangements through which this formation process stabilizes into sustained institutional capacity. The framework thus reorients institutional strategy away from governance prespecification and toward the cultivation of implementation conditions capable of generating governance over time. Institutions that recognize governance as a product of implementation rather than its prerequisite are better positioned to support the iterative process through which clinical AI transitions from experimental artifact to organizational infrastructure. Without such capacity, technically mature systems remain perpetually promising pilots awaiting conditions that never arrive.</p></sec></sec></body><back><ack><p>We acknowledge the clinical departments, information technology teams, and administrative units involved in the development and routine operation of the provincial clinical artificial intelligence platform. Their contributions to system integration, workflow embedding, and governance documentation provided essential implementation context for this viewpoint. During manuscript preparation, generative artificial intelligence tools were used for language editing and clarity improvements. All substantive scientific content, interpretations, and final wording were reviewed and approved by the authors, who take full responsibility for the manuscript.</p></ack><notes><sec><title>Funding</title><p>No external funding was received for this study.</p></sec><sec><title>Data Availability</title><p>The datasets generated and analyzed during this study consist of institutional governance documents, regulatory filings, infrastructure certification records, internal operational monitoring records, and other operational implementation artifacts. These materials are not publicly available due to institutional governance policies and regulatory restrictions. Requests for access to deidentified or aggregated information may be directed to the corresponding author and will be considered subject to institutional review and approval by the relevant authorities.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: JT, ZZ</p><p>Formal analysis: JT, YL</p><p>Investigation: YS, NJ</p><p>Methodology: JT, YL</p><p>Supervision: ZZ</p><p>Writing &#x2013; original draft: JT</p><p>Writing &#x2013; review &#x0026; editing: JT, ZZ, LT, YS, YL, NJ</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">EMR</term><def><p>electronic medical record</p></def></def-item><def-item><term id="abb3">MDT</term><def><p>multidisciplinary team</p></def></def-item><def-item><term id="abb4">TDM</term><def><p>therapeutic drug monitoring</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Emanuel</surname><given-names>EJ</given-names> </name></person-group><article-title>Predicting the future &#x2013; big data, machine learning, and clinical medicine</article-title><source>N Engl J Med</source><year>2016</year><month>09</month><day>29</day><volume>375</volume><issue>13</issue><fpage>1216</fpage><lpage>1219</lpage><pub-id pub-id-type="doi">10.1056/NEJMp1606181</pub-id><pub-id pub-id-type="medline">27682033</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>J</given-names> </name><name name-style="western"><surname>Baxter</surname><given-names>SL</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>K</given-names> </name></person-group><article-title>The practical implementation of artificial intelligence technologies in medicine</article-title><source>Nat Med</source><year>2019</year><month>01</month><volume>25</volume><issue>1</issue><fpage>30</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.1038/s41591-018-0307-0</pub-id><pub-id pub-id-type="medline">30617336</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kelly</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Karthikesalingam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Suleyman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Corrado</surname><given-names>G</given-names> </name><name name-style="western"><surname>King</surname><given-names>D</given-names> </name></person-group><article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title><source>BMC Med</source><year>2019</year><month>10</month><day>29</day><volume>17</volume><issue>1</issue><fpage>195</fpage><pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id><pub-id pub-id-type="medline">31665002</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahmed</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Spooner</surname><given-names>B</given-names> </name><name name-style="western"><surname>Isherwood</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lane</surname><given-names>M</given-names> </name><name name-style="western"><surname>Orrock</surname><given-names>E</given-names> </name><name name-style="western"><surname>Dennison</surname><given-names>A</given-names> </name></person-group><article-title>A systematic review of the barriers to the implementation of artificial intelligence in healthcare</article-title><source>Cureus</source><year>2023</year><month>10</month><volume>15</volume><issue>10</issue><fpage>e46454</fpage><pub-id pub-id-type="doi">10.7759/cureus.46454</pub-id><pub-id pub-id-type="medline">37927664</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maddox</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Rumsfeld</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Payne</surname><given-names>PRO</given-names> </name></person-group><article-title>Questions for artificial intelligence in health care</article-title><source>JAMA</source><year>2019</year><month>01</month><day>1</day><volume>321</volume><issue>1</issue><fpage>31</fpage><lpage>32</lpage><pub-id pub-id-type="doi">10.1001/jama.2018.18932</pub-id><pub-id pub-id-type="medline">30535130</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Greenhalgh</surname><given-names>T</given-names> </name><name name-style="western"><surname>Wherton</surname><given-names>J</given-names> </name><name name-style="western"><surname>Papoutsi</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Beyond adoption: a new framework for theorizing and evaluating nonadoption, abandonment, and challenges to the scale-up, spread, and sustainability of health and care technologies</article-title><source>J Med Internet Res</source><year>2017</year><month>11</month><day>1</day><volume>19</volume><issue>11</issue><fpage>e367</fpage><pub-id pub-id-type="doi">10.2196/jmir.8775</pub-id><pub-id pub-id-type="medline">29092808</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wiens</surname><given-names>J</given-names> </name><name name-style="western"><surname>Saria</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sendak</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Do no harm: a roadmap for responsible machine learning for health care</article-title><source>Nat Med</source><year>2019</year><month>09</month><volume>25</volume><issue>9</issue><fpage>1337</fpage><lpage>1340</lpage><pub-id pub-id-type="doi">10.1038/s41591-019-0548-6</pub-id><pub-id pub-id-type="medline">31427808</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hassan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kushniruk</surname><given-names>A</given-names> </name><name name-style="western"><surname>Borycki</surname><given-names>EM</given-names> </name></person-group><article-title>Barriers to and facilitators of artificial intelligence adoption in health care: scoping review</article-title><source>JMIR Hum Factors</source><year>2024</year><month>08</month><day>29</day><volume>11</volume><fpage>e48633</fpage><pub-id pub-id-type="doi">10.2196/48633</pub-id><pub-id pub-id-type="medline">39207831</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Casalino</surname><given-names>LP</given-names> </name><name name-style="western"><surname>Khullar</surname><given-names>D</given-names> </name></person-group><article-title>Deep learning in medicine&#x2014;promise, progress, and challenges</article-title><source>JAMA Intern Med</source><year>2019</year><month>03</month><day>1</day><volume>179</volume><issue>3</issue><fpage>293</fpage><lpage>294</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2018.7117</pub-id><pub-id pub-id-type="medline">30556825</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name></person-group><article-title>Artificial intelligence in Chinese healthcare: a review of applications and future prospects</article-title><source>Biomed Eng Lett</source><year>2025</year><month>11</month><volume>15</volume><issue>6</issue><fpage>1065</fpage><lpage>1072</lpage><pub-id pub-id-type="doi">10.1007/s13534-025-00515-2</pub-id><pub-id pub-id-type="medline">41280149</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>High-performance medicine: the convergence of human and artificial intelligence</article-title><source>Nat Med</source><year>2019</year><month>01</month><volume>25</volume><issue>1</issue><fpage>44</fpage><lpage>56</lpage><pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id><pub-id pub-id-type="medline">30617339</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Faes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kale</surname><given-names>AU</given-names> </name><etal/></person-group><article-title>A comparison of deep learning performance against health-care professionals in detecting diseases from medical imaging: a systematic review and meta-analysis</article-title><source>Lancet Digit Health</source><year>2019</year><month>10</month><volume>1</volume><issue>6</issue><fpage>e271</fpage><lpage>e297</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(19)30123-2</pub-id><pub-id pub-id-type="medline">33323251</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mittelstadt</surname><given-names>BD</given-names> </name><name name-style="western"><surname>Allo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Taddeo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wachter</surname><given-names>S</given-names> </name><name name-style="western"><surname>Floridi</surname><given-names>L</given-names> </name></person-group><article-title>The ethics of algorithms: mapping the debate</article-title><source>Big Data Soc</source><year>2016</year><month>12</month><volume>3</volume><issue>2</issue><fpage>2053951716679679</fpage><pub-id pub-id-type="doi">10.1177/2053951716679679</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Char</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>NH</given-names> </name><name name-style="western"><surname>Magnus</surname><given-names>D</given-names> </name></person-group><article-title>Implementing machine learning in health care&#x2014;addressing ethical challenges</article-title><source>N Engl J Med</source><year>2018</year><month>03</month><day>15</day><volume>378</volume><issue>11</issue><fpage>981</fpage><lpage>983</lpage><pub-id pub-id-type="doi">10.1056/NEJMp1714229</pub-id><pub-id pub-id-type="medline">29539284</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hogg</surname><given-names>HDJ</given-names> </name><name name-style="western"><surname>Al-Zubaidy</surname><given-names>M</given-names> </name><collab>Technology Enhanced Macular Services Study Reference Group</collab><etal/></person-group><article-title>Stakeholder perspectives of clinical artificial intelligence implementation: systematic review of qualitative evidence</article-title><source>J Med Internet Res</source><year>2023</year><month>01</month><day>10</day><volume>25</volume><fpage>e39742</fpage><pub-id pub-id-type="doi">10.2196/39742</pub-id><pub-id pub-id-type="medline">36626192</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>A</given-names> </name><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name></person-group><article-title>Role of artificial intelligence in patient safety outcomes: systematic literature review</article-title><source>JMIR Med Inform</source><year>2020</year><month>07</month><day>24</day><volume>8</volume><issue>7</issue><fpage>e18599</fpage><pub-id pub-id-type="doi">10.2196/18599</pub-id><pub-id pub-id-type="medline">32706688</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Floridi</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kinsey</surname><given-names>L</given-names> </name><name name-style="western"><surname>Elhalal</surname><given-names>A</given-names> </name></person-group><article-title>From what to how: an initial review of publicly available AI ethics tools, methods and research to translate principles into practices</article-title><source>Sci Eng Ethics</source><year>2020</year><month>08</month><volume>26</volume><issue>4</issue><fpage>2141</fpage><lpage>2168</lpage><pub-id pub-id-type="doi">10.1007/s11948-019-00165-5</pub-id><pub-id pub-id-type="medline">31828533</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shaw</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rudzicz</surname><given-names>F</given-names> </name><name name-style="western"><surname>Jamieson</surname><given-names>T</given-names> </name><name name-style="western"><surname>Goldfarb</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence and the implementation challenge</article-title><source>J Med Internet Res</source><year>2019</year><month>07</month><day>10</day><volume>21</volume><issue>7</issue><fpage>e13659</fpage><pub-id pub-id-type="doi">10.2196/13659</pub-id><pub-id pub-id-type="medline">31293245</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>Ethics and governance of artificial intelligence for health</article-title><source>World Health Organization</source><year>2021</year><month>06</month><day>28</day><access-date>2026-04-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240029200">https://www.who.int/publications/i/item/9789240029200</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reddy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Allan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Coghlan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cooper</surname><given-names>P</given-names> </name></person-group><article-title>A governance model for the application of AI in health care</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>03</month><day>1</day><volume>27</volume><issue>3</issue><fpage>491</fpage><lpage>497</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocz192</pub-id><pub-id pub-id-type="medline">31682262</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coiera</surname><given-names>E</given-names> </name></person-group><article-title>The last mile: where artificial intelligence meets reality</article-title><source>J Med Internet Res</source><year>2019</year><month>11</month><day>8</day><volume>21</volume><issue>11</issue><fpage>e16323</fpage><pub-id pub-id-type="doi">10.2196/16323</pub-id><pub-id pub-id-type="medline">31702559</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Panch</surname><given-names>T</given-names> </name><name name-style="western"><surname>Mattie</surname><given-names>H</given-names> </name><name name-style="western"><surname>Celi</surname><given-names>LA</given-names> </name></person-group><article-title>The &#x201C;inconvenient truth&#x201D; about AI in healthcare</article-title><source>NPJ Digit Med</source><year>2019</year><volume>2</volume><fpage>77</fpage><pub-id pub-id-type="doi">10.1038/s41746-019-0155-4</pub-id><pub-id pub-id-type="medline">31453372</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Greenhalgh</surname><given-names>T</given-names> </name><name name-style="western"><surname>Abimbola</surname><given-names>S</given-names> </name></person-group><article-title>The NASSS framework &#x2013; a synthesis of multiple theories of technology implementation</article-title><source>Stud Health Technol Inform</source><year>2019</year><month>07</month><day>30</day><volume>263</volume><fpage>193</fpage><lpage>204</lpage><pub-id pub-id-type="doi">10.3233/SHTI190123</pub-id><pub-id pub-id-type="medline">31411163</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nilsen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Seing</surname><given-names>I</given-names> </name><name name-style="western"><surname>Ericsson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Birken</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Schildmeijer</surname><given-names>K</given-names> </name></person-group><article-title>Characteristics of successful changes in health care organizations: an interview study with physicians, registered nurses and assistant nurses</article-title><source>BMC Health Serv Res</source><year>2020</year><month>02</month><day>27</day><volume>20</volume><issue>1</issue><fpage>147</fpage><pub-id pub-id-type="doi">10.1186/s12913-020-4999-8</pub-id><pub-id pub-id-type="medline">32106847</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nilsen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name><name name-style="western"><surname>Neher</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A framework to guide implementation of AI in health care: protocol for a cocreation research project</article-title><source>JMIR Res Protoc</source><year>2023</year><month>11</month><day>8</day><volume>12</volume><fpage>e50216</fpage><pub-id pub-id-type="doi">10.2196/50216</pub-id><pub-id pub-id-type="medline">37938896</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bates</surname><given-names>DW</given-names> </name><name name-style="western"><surname>Auerbach</surname><given-names>A</given-names> </name><name name-style="western"><surname>Schulam</surname><given-names>P</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>A</given-names> </name><name name-style="western"><surname>Saria</surname><given-names>S</given-names> </name></person-group><article-title>Reporting and implementing interventions involving machine learning and artificial intelligence</article-title><source>Ann Intern Med</source><year>2020</year><month>06</month><day>2</day><volume>172</volume><issue>11 Suppl</issue><fpage>S137</fpage><lpage>S144</lpage><pub-id pub-id-type="doi">10.7326/M19-0872</pub-id><pub-id pub-id-type="medline">32479180</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ajami</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bagheri-Tadi</surname><given-names>T</given-names> </name></person-group><article-title>Barriers for adopting electronic health records (EHRs) by physicians</article-title><source>Acta Inform Med</source><year>2013</year><volume>21</volume><issue>2</issue><fpage>129</fpage><lpage>134</lpage><pub-id pub-id-type="doi">10.5455/aim.2013.21.129-134</pub-id><pub-id pub-id-type="medline">24058254</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liao</surname><given-names>F</given-names> </name><name name-style="western"><surname>Adelaine</surname><given-names>S</given-names> </name><name name-style="western"><surname>Afshar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Patterson</surname><given-names>BW</given-names> </name></person-group><article-title>Governance of clinical AI applications to facilitate safe and equitable deployment in a large health system: key elements and early successes</article-title><source>Front Digit Health</source><year>2022</year><volume>4</volume><fpage>931439</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2022.931439</pub-id><pub-id pub-id-type="medline">36093386</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Dillon</surname><given-names>T</given-names> </name></person-group><article-title>Regulatory responses and approval status of artificial intelligence medical devices with a focus on China</article-title><source>NPJ Digit Med</source><year>2024</year><month>09</month><day>18</day><volume>7</volume><issue>1</issue><fpage>255</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01254-x</pub-id><pub-id pub-id-type="medline">39294318</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="web"><article-title>Guiding principles for registration review of artificial intelligence medical devices</article-title><source>Center for Medical Device Evaluation, National Medical Products Administration</source><year>2022</year><month>03</month><day>10</day><access-date>2026-05-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cncsdr.org/ggtz/ggzz/202203/t20220311_304188.html">https://www.cncsdr.org/ggtz/ggzz/202203/t20220311_304188.html</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Beecy</surname><given-names>A</given-names> </name></person-group><article-title>Implementing AI models in clinical workflows: a roadmap</article-title><source>BMJ Evid Based Med</source><year>2025</year><month>09</month><day>22</day><volume>30</volume><issue>5</issue><fpage>285</fpage><lpage>287</lpage><pub-id pub-id-type="doi">10.1136/bmjebm-2023-112727</pub-id><pub-id pub-id-type="medline">38914450</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nair</surname><given-names>M</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name><name name-style="western"><surname>Larsson</surname><given-names>I</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>JM</given-names> </name></person-group><article-title>A comprehensive overview of barriers and strategies for AI implementation in healthcare: mixed-method design</article-title><source>PLoS ONE</source><year>2024</year><volume>19</volume><issue>8</issue><fpage>e0305949</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0305949</pub-id><pub-id pub-id-type="medline">39121051</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Greenhalgh</surname><given-names>T</given-names> </name><name name-style="western"><surname>Papoutsi</surname><given-names>C</given-names> </name></person-group><article-title>Studying complexity in health services research: desperately seeking an overdue paradigm shift</article-title><source>BMC Med</source><year>2018</year><month>06</month><day>20</day><volume>16</volume><issue>1</issue><fpage>95</fpage><pub-id pub-id-type="doi">10.1186/s12916-018-1089-4</pub-id><pub-id pub-id-type="medline">29921272</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sendak</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Ratliff</surname><given-names>W</given-names> </name><name name-style="western"><surname>Sarro</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Real-world integration of a sepsis deep learning technology into routine clinical care: implementation study</article-title><source>JMIR Med Inform</source><year>2020</year><month>07</month><day>15</day><volume>8</volume><issue>7</issue><fpage>e15182</fpage><pub-id pub-id-type="doi">10.2196/15182</pub-id><pub-id pub-id-type="medline">32673244</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McKee</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wouters</surname><given-names>OJ</given-names> </name></person-group><article-title>The challenges of regulating artificial intelligence in healthcare; Comment on &#x201C;Clinical decision support and new regulatory frameworks for medical devices: are we ready for it? &#x2013; A viewpoint paper&#x201D;</article-title><source>Int J Health Policy Manag</source><year>2023</year><volume>12</volume><fpage>7261</fpage><pub-id pub-id-type="doi">10.34172/ijhpm.2022.7261</pub-id><pub-id pub-id-type="medline">36243948</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gama</surname><given-names>F</given-names> </name><name name-style="western"><surname>Tyskbo</surname><given-names>D</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>J</given-names> </name><name name-style="western"><surname>Barlow</surname><given-names>J</given-names> </name><name name-style="western"><surname>Reed</surname><given-names>J</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name></person-group><article-title>Implementation frameworks for artificial intelligence translation into health care practice: scoping review</article-title><source>J Med Internet Res</source><year>2022</year><month>01</month><day>27</day><volume>24</volume><issue>1</issue><fpage>e32215</fpage><pub-id pub-id-type="doi">10.2196/32215</pub-id><pub-id pub-id-type="medline">35084349</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Busuioc</surname><given-names>M</given-names> </name></person-group><article-title>Accountable artificial intelligence: holding algorithms to account</article-title><source>Public Adm Rev</source><year>2021</year><volume>81</volume><issue>5</issue><fpage>825</fpage><lpage>836</lpage><pub-id pub-id-type="doi">10.1111/puar.13293</pub-id><pub-id pub-id-type="medline">34690372</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Trocin</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mikalef</surname><given-names>P</given-names> </name><name name-style="western"><surname>Papamitsiou</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Conboy</surname><given-names>K</given-names> </name></person-group><article-title>Responsible AI for digital health: a synthesis and a research agenda</article-title><source>Inf Syst Front</source><year>2023</year><month>12</month><volume>25</volume><issue>6</issue><fpage>2139</fpage><lpage>2157</lpage><pub-id pub-id-type="doi">10.1007/s10796-021-10146-4</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name><name name-style="western"><surname>Blasimme</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>IG</given-names> </name></person-group><article-title>Machine learning in medicine: addressing ethical challenges</article-title><source>PLoS Med</source><year>2018</year><month>11</month><volume>15</volume><issue>11</issue><fpage>e1002689</fpage><pub-id pub-id-type="doi">10.1371/journal.pmed.1002689</pub-id><pub-id pub-id-type="medline">30399149</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mittelstadt</surname><given-names>B</given-names> </name></person-group><article-title>Principles alone cannot guarantee ethical AI</article-title><source>Nat Mach Intell</source><year>2019</year><volume>1</volume><issue>11</issue><fpage>501</fpage><lpage>507</lpage><pub-id pub-id-type="doi">10.1038/s42256-019-0114-4</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jobin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ienca</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name></person-group><article-title>The global landscape of AI ethics guidelines</article-title><source>Nat Mach Intell</source><year>2019</year><volume>1</volume><issue>9</issue><fpage>389</fpage><lpage>399</lpage><pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Owoyemi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Owoyemi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Osiyemi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Boyd</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence for healthcare in Africa</article-title><source>Front Digit Health</source><year>2020</year><volume>2</volume><fpage>6</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2020.00006</pub-id><pub-id pub-id-type="medline">34713019</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alami</surname><given-names>H</given-names> </name><name name-style="western"><surname>Lehoux</surname><given-names>P</given-names> </name><name name-style="western"><surname>Auclair</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Artificial intelligence and health technology assessment: anticipating a new level of complexity</article-title><source>J Med Internet Res</source><year>2020</year><month>07</month><day>7</day><volume>22</volume><issue>7</issue><fpage>e17707</fpage><pub-id pub-id-type="doi">10.2196/17707</pub-id><pub-id pub-id-type="medline">32406850</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Benraouane</surname><given-names>SA</given-names> </name></person-group><source>AI Management System Certification According to the ISO/IEC 42001 Standard</source><year>2024</year><edition>1</edition><publisher-name>Productivity Press</publisher-name><pub-id pub-id-type="doi">10.4324/9781003463979</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="web"><article-title>Artificial intelligence/machine learning (AI/ML)-based software as a medical device (SaMD) action plan</article-title><source>US Food and Drug Administration</source><year>2021</year><month>01</month><access-date>2026-05-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/media/145022/download">https://www.fda.gov/media/145022/download</ext-link></comment></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Yin</surname><given-names>RK</given-names> </name></person-group><source>Case Study Research: Design and Methods</source><year>2014</year><edition>5</edition><publisher-name>SAGE</publisher-name><pub-id pub-id-type="other">9781452242569</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>447</fpage><lpage>453</lpage><pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="medline">31649194</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bates</surname><given-names>DW</given-names> </name><name name-style="western"><surname>Levine</surname><given-names>D</given-names> </name><name name-style="western"><surname>Syrowatka</surname><given-names>A</given-names> </name><etal/></person-group><article-title>The potential of artificial intelligence to improve patient safety: a scoping review</article-title><source>NPJ Digit Med</source><year>2021</year><month>03</month><day>19</day><volume>4</volume><issue>1</issue><fpage>54</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00423-6</pub-id><pub-id pub-id-type="medline">33742085</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajkomar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dean</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kohane</surname><given-names>I</given-names> </name></person-group><article-title>Machine learning in medicine</article-title><source>N Engl J Med</source><year>2019</year><month>04</month><day>4</day><volume>380</volume><issue>14</issue><fpage>1347</fpage><lpage>1358</lpage><pub-id pub-id-type="doi">10.1056/NEJMra1814259</pub-id><pub-id pub-id-type="medline">30943338</pub-id></nlm-citation></ref></ref-list></back></article>