<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e76709</article-id><article-id pub-id-type="doi">10.2196/76709</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Bridging the AI-Literacy Gap in Health Care: Qualitative Analysis of the Flanders Case Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Chatzichristos</surname><given-names>Christos</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Chatzichristos</surname><given-names>Georgios</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Borremans</surname><given-names>Isabelle</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gruyaert</surname><given-names>Stefaan</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>De Vos</surname><given-names>Ilse</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>De Vos</surname><given-names>Maarten</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>De Backere</surname><given-names>Femke</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff5">5</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Electrical Engineering, STADIUS Center for Dynamical Systems, Signal Processing, and Data Analytics, KU Leuven</institution><addr-line>Kasteelpark Arenberg 10</addr-line><addr-line>Leuven</addr-line><country>Belgium</country></aff><aff id="aff2"><institution>Vlaamse AI Academie (VAIA)</institution><country>Belgium</country></aff><aff id="aff3"><institution>School of Political Sciences, Aristotle University of Thessaloniki</institution><addr-line>Thessaloniki</addr-line><country>Greece</country></aff><aff id="aff4"><institution>Department of Development and Regeneration, KU Leuven</institution><addr-line>Leuven</addr-line><country>Belgium</country></aff><aff id="aff5"><institution>IDLab, Department of Information Technology, Ghent University</institution><addr-line>Ghent</addr-line><country>Belgium</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Uchenna</surname><given-names>Akobundu</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Gorrepati</surname><given-names>Leela Prasad</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Potla</surname><given-names>Ravi Teja</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sangaraju</surname><given-names>Varun Varma</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Christos Chatzichristos, PhD, Department of Electrical Engineering, STADIUS Center for Dynamical Systems, Signal Processing, and Data Analytics, KU Leuven, Kasteelpark Arenberg 10, Leuven, 3001, Belgium, 32 0456087126; <email>christos.chatzichristos@kuleuven.be</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>8</day><month>12</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e76709</elocation-id><history><date date-type="received"><day>29</day><month>04</month><year>2025</year></date><date date-type="rev-recd"><day>08</day><month>10</month><year>2025</year></date><date date-type="accepted"><day>09</day><month>10</month><year>2025</year></date></history><copyright-statement>&#x00A9; Christos Chatzichristos, Georgios Chatzichristos, Isabelle Borremans, Stefaan Gruyaert, Ilse De Vos, Maarten De Vos, Femke De Backere. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 8.12.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e76709"/><abstract><sec><title>Background</title><p>Building on the assertion that nearly every clinician will eventually use artificial intelligence (AI), this study provides a triangulated qualitative analysis of the requirements, challenges, and prospects for integrating AI into routine health care practice. This skills gap contributes to cautious and uneven adoption across clinical settings. Despite advancements, many health care professionals report a self-perceived lack of proficiency in comprehending, critically evaluating, and ethically deploying AI tools, which contributes to cautious adoption in clinical settings.</p></sec><sec><title>Objective</title><p>While addressing key research questions, the study investigates the necessary prerequisites, barriers, and opportunities for AI adoption and specific training priorities that medical staff require. The study is uniquely focused on the health care workforce, moving beyond the predominant emphasis in the literature on medical students.</p></sec><sec sec-type="methods"><title>Methods</title><p>Situated in Flanders, Belgium, a recognized innovation leader but with moderate lifelong learning participation, this research combines 15 semistructured expert interviews, a regional survey of 134 health care professionals, and 3 co-interpretive focus groups with 39 stakeholders, all conducted in 2024.</p></sec><sec sec-type="results"><title>Results</title><p>The results expose small generational and mainly occupational divides. For instance, 85.07% (114/134) of survey respondents expressed interest in introductory AI courses tailored to health care, while 80% (107/134) of them sought practical, job-relevant AI skills. However, only 13.8% (19/134) of clinicians felt that their training adequately prepared them for AI integration. Notably, younger professionals (&#x003C;30 years of age) were most eager to engage with AI but also expressed greater concern about job displacement, while older professionals (&#x003E;50 years of age) prioritized reducing administrative burden. Physicians and dentists reported higher self-assessed AI knowledge, whereas nurses and physiotherapists showed the lowest familiarity. The survey also revealed differences in preferred learning formats, with doctors favoring flexible, asynchronous learning and nurses emphasizing the need for accredited, employer-supported training during work hours. Ethics, though emphasized in academic literature, ranked low in training interest among most practitioners, except for younger and palliative care professionals. Focus group participants confirmed the need for clear regulatory guidance and access to accredited, practically oriented training. A significant insight was that nurses often lacked institutional support and funding for training, despite their pivotal role in AI-enabled workflows.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Taken together, these findings indicate that a one-size-fits-all approach to AI education in health care is unlikely to be effective. By triangulating insights across research stages, this study highlights the need for occupation-specific, accessible, and accredited AI training programs that bridge gaps in digital literacy and align with practical clinical priorities. The qualitative insights obtained can inform policy and training priorities in light of the European Union (EU) AI literacy mandates, while highlighting persistent gaps in workforce preparation.</p></sec></abstract><kwd-group><kwd>AI literacy</kwd><kwd>cross-disciplinary collaboration</kwd><kwd>occupational divides</kwd><kwd>structural inequalities</kwd><kwd>tailored training</kwd><kwd>lifelong learning</kwd><kwd>upskilling</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The Information Age has spurred health care innovation by providing extensive data access and technology during the last 40 years, empowering the workforce to revolutionize health care. The emergence of artificial intelligence (AI) stands out as one of the most transformative elements of this era. Amid present conditions, AI technologies are increasingly becoming prominent in the analysis of diverse health data [<xref ref-type="bibr" rid="ref1">1</xref>]. They are accelerating the processes of diagnosis and therapy, enhancing imaging techniques, providing guidance for surgical procedures, and streamlining drug research, thereby enabling the development of more personalized therapies [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>In health care, AI has already shown remarkable promise, from analyzing chest X-rays and skin lesions (Keane and Topol [<xref ref-type="bibr" rid="ref3">3</xref>]) to increasing understanding of individual treatment response for COPD [<xref ref-type="bibr" rid="ref4">4</xref>] and significantly decreasing the time needed to review seizure events [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Despite these advancements, scaling AI faces hurdles, such as technical limitations, trust deficits, privacy concerns, algorithmic biases, and systemic issues, such as misaligned incentives, fragmented workflows [<xref ref-type="bibr" rid="ref7">7</xref>], or even lack of more general, basic data and AI literacy among health care professionals. Moreover, recent studies and frontline experiences reveal that stakeholder resistance (driven by concerns about professional autonomy, patient safety, and the opaque nature of certain AI systems) remains a substantial and persistent challenge [<xref ref-type="bibr" rid="ref8">8</xref>]. Regulatory frameworks remain in flux, and despite the European Union Artificial Intelligence (EU AI) Act and other policy initiatives, many clinical environments still grapple with gaps in guidance, accountability, and the standardization of ethical and safety protocols. These realities contribute to a landscape where the transformative promises of AI are frequently counterbalanced by practical and institutional constraints, underscoring the need for nuanced, context-specific analyses of implementation [<xref ref-type="bibr" rid="ref9">9</xref>]. Addressing these challenges requires a strategic approach that emphasizes investments in human capital, clear regulatory frameworks, and improved data and AI literacy. To overcome these barriers, a human-centered design approach and revised frameworks tailored to health care&#x2013;specific challenges are essential [<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>One increasingly acknowledged, yet still underresearched, area where AI could provide significant support is the reduction of administrative burden in health care. Studies suggest that clinicians spend up to half of their working hours on documentation, scheduling, and other nonclinical tasks, leading to burnout and reduced time with patients [<xref ref-type="bibr" rid="ref11">11</xref>]. AI-driven solutions, such as automated note-taking, intelligent triage systems, and smart scheduling assistants, hold promise in alleviating this load. However, while the literature often emphasizes diagnostic and clinical decision-making applications of AI, the administrative potential of AI remains relatively underexplored in both academic and policy discussions. This gap limits the development of training programs and tools that target what are likely among the most immediate and practical benefits of AI for the existing workforce.</p><p>Although the usage of AI tools in clinical practice is on the rise, there exists a self-reported deficiency in the ability to comprehend, critically evaluate, and ethically use these tools in everyday clinical settings [<xref ref-type="bibr" rid="ref12">12</xref>]. As a result, there is still an apparent reluctance by the medical workforce to use AI clinically [<xref ref-type="bibr" rid="ref13">13</xref>]. Given these contexts, an increasing number of researchers and organizations underscore the importance of integrating AI into medical education, residency training, and continuous education programs for medical staff.</p><p>There is a well-documented gap in postgraduate and lifelong learning opportunities for health care professionals to acquire AI-specific skills, particularly compared to students [<xref ref-type="bibr" rid="ref14">14</xref>]. Systematic reviews have found that most AI training initiatives target medical students or early-career phases, while working professionals (especially nurses and allied staff) must often seek out fragmented or informal learning, with formal, occupation-relevant programs remaining notably rare [<xref ref-type="bibr" rid="ref15">15</xref>]. While AI-related continuing education has expanded in theory, actual uptake remains low due to lack of system-wide support, limited funding, and time constraints among health care staff, as reported in a very recent Lancet publication [<xref ref-type="bibr" rid="ref16">16</xref>]. This literature underscores that competency gaps in areas such as algorithm appraisal, ethical and legal application, and digital literacy persist well after graduation, risking uneven and sometimes unsafe AI adoption in clinical practice. These findings demonstrate the urgency and relevance of research focused directly on health care workers&#x2019; lifelong learning needs and AI upskilling beyond academic education.</p><p>A survey found that only 13.8% of clinicians believed their training programs adequately prepared them for integrating AI into their practice [<xref ref-type="bibr" rid="ref3">3</xref>]. However, another survey [<xref ref-type="bibr" rid="ref17">17</xref>] shows that 71% of clinicians in ophthalmology, radiology, and dermatology reported optimism about AI improving their fields, particularly in disease screening and reducing monotonous tasks. This indicates that expectations for AI solutions are high among health care professionals. However, 80.9% had never used AI in practice, and only 5.5% rated their knowledge as &#x201C;excellent.&#x201D; Interestingly, ophthalmologists reported the highest usage of AI (15.7%), compared to radiologists (6.1%) and dermatologists (5.2%), highlighting disparities in AI adoption between occupations that reflect differences in exposure and training [<xref ref-type="bibr" rid="ref17">17</xref>]. Significant divergences are also identifiable between different generations. Clinicians with less than 5 years of experience were twice as likely to perceive AI&#x2019;s impact as profound compared to those with more than 30 years of practice. This generational divide underscores the need for tailored strategies to foster acceptance and implementation.</p><p>The successful integration of AI into health care systems faces fundamental challenges, including user acceptance, consistent usage in day-to-day operations, and seamless integration into the broader clinical workspace [<xref ref-type="bibr" rid="ref18">18</xref>]. Overcoming these hurdles is essential to ensure that AI technologies become an integral and widely accepted part of routine medical practice. As in other nonmedical professions, guidelines emphasize that training related to AI is crucial for its effective integration into everyday practice [<xref ref-type="bibr" rid="ref19">19</xref>], not only within preregistration health care programs but also as a means of continuously developing the skill set of the existing workforce. While a growing volume of literature advocates for the inclusion of AI-related content in medical curricula [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>], the studies examining the training needs of the medical workforce in relation to AI are relatively scarce [<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>In line with these considerations, this study explores the following research questions:</p><list list-type="order"><list-item><p>What are the requirements, obstacles, and prospects for medical staff in integrating AI into their daily tasks?</p></list-item><list-item><p>What are their key priorities and needs for AI training in these areas?</p></list-item></list><p>To address these questions, the study conducts a thorough needs assessment of the overall medical workforce [<xref ref-type="bibr" rid="ref22">22</xref>]. The studies that have explored the AI training among the medical workforce have mostly concentrated on specific specializations, such as clinical imaging [<xref ref-type="bibr" rid="ref13">13</xref>] or electrocardiogram interpretation [<xref ref-type="bibr" rid="ref23">23</xref>]. Few holistic approaches that explore the large-scale AI deployment in health care practices call for investments in human capital, clear regulations, and enhanced AI literacy [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>The study leverages existing literature to implement multistage, qualitative research focusing on the health care professionals in the Flemish region of Belgium, a region that is characterized as an innovation leader and reflected in a comprehensive AI policy plan [<xref ref-type="bibr" rid="ref24">24</xref>], but showing only moderate achievements in lifelong learning [<xref ref-type="bibr" rid="ref25">25</xref>]. Accordingly, this focused investigation holds promise for enriching existing literature. The aim of this study is to systematically assess the prerequisites, barriers, and opportunities for AI adoption in health care, with a specific focus on identifying the training needs of the existing health care workforce in Flanders. Foregrounding AI literacy gaps within the health care workforce is particularly urgent, as the forthcoming EU AI Act will impose new obligations on providers and institutions to ensure safe, equitable, and competent AI adoption. The rest of the study is structured as follows. The &#x201C;Methods&#x201D; section delves into the relevant literature for the adoption of AI into Flanders and integration of AI training in the health care systems and presents the design and methodology of this study. The &#x201C;Results&#x201D; section illuminates the results from our research, while the &#x201C;Discussion&#x201D; draws insights from the results and discusses limitations and pathways for future research.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Learning Needs for AI in Health Care</title><p>An international survey [<xref ref-type="bibr" rid="ref26">26</xref>] of 4596 medical, dentistry, and veterinary students from 48 countries found that 67.6% (n=3091) have positive attitudes toward AI in health care, and 76.1% (n=3474) desire more AI education in their curricula. Despite this interest, 75.3% (n=3451) reported limited general knowledge of AI and indicated the absence of AI-related courses in their programs. Additionally, most respondents felt unprepared to use AI in their future careers, underscoring the necessity for curriculum enhancements to include AI competencies. The receptiveness of medical and dental students to AI applications is particularly indicative of how AI can be integrated into graduate training, preparing the next generation of health care professionals for a future deeply intertwined with technological advancements. Cross-sectional studies, involving 3018 medical students across Turkey, further emphasized concerns about potential job reductions due to AI (44.9%), and the fear of AI devaluing the medical profession (1769, 58.6%). Along these lines, notably, only 6% (n=181) of the respondents felt competent to inform patients about AI&#x2019;s features and risks, highlighting a significant literacy gap. In this vein, the majority expressed a need for training in AI applications, error reduction, and ethical problem-solving related to AI use [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>Similar studies (with smaller samples) confirm these findings and further underscore the necessity of incorporating practical and ethical AI training into medical education to prepare students for the realities of AI-supported health care [<xref ref-type="bibr" rid="ref28">28</xref>]. Furthermore, experts emphasized the importance of integrating ethical considerations into AI education for medical students [<xref ref-type="bibr" rid="ref29">29</xref>]. In this regard, they advocated for a curriculum that balances technical AI skills with discussions on ethical implications, ensuring that future physicians can navigate the complexities of AI applications responsibly. This approach aims to foster a generation of health care professionals who are not only proficient in AI technologies but also mindful of their ethical responsibilities.</p><p>All the above findings indicate a pressing need to update medical education curricula to include comprehensive AI training [<xref ref-type="bibr" rid="ref3">3</xref>]. Such training should cover practical AI applications, ethical considerations, and strategies to mitigate potential negative impacts on the medical profession. By addressing these educational gaps, future health care professionals can be better equipped to integrate AI into their practice, ultimately enhancing patient care and maintaining the integrity of the profession. Moreover, medical education serves as the essential foundation for the entire continuum of health care workers&#x2019; training. Refined curricula should also consider inconsistencies in AI training across institutions, highlighting the challenges of standardizing education systems globally [<xref ref-type="bibr" rid="ref30">30</xref>].</p></sec><sec id="s2-2"><title>Contextualizing the Study: An Outline of Flanders&#x2019; Health Care</title><p>This study explores the integration of AI in health care training in Flanders, a region of Belgium (NUTS1 [Nomenclature des Unit&#x00E9;s Territoriales Statistiques] statistical region-Nomenclature of territorial units for statistics), which consists of 5 NUTS2 subregions, that is, Antwerp (BE21), Limburg (BE22), East Flanders (BE23), Flemish Brabant (BE24), and West Flanders (BE25). The health care system in Flanders is technologically advanced and operates under a mixed public-private model, offering universal coverage to residents. A network of hospitals, general practitioners, and specialists provides the care, which is mostly funded by social security payments. Primary care and preventative health services are highly valued in Flanders, with an emphasis on patient choice and accessibility.</p><p>Flanders&#x2019; health care system, although resource-rich and technologically advanced, is facing challenges due to an aging workforce that extends throughout the Belgian health care system. More analytically, Belgium&#x2019;s health care workforce is aging more rapidly compared to the EU average. The percentage of practicing physicians in Belgium aged 55 years or older was 24.1% in 2000, and this percentage increased rapidly to 44.9% in 2016, surpassing the EU-12 average of 34.5% for the same year. Most recent data indicate that this trend continues, as in 2020 the percentage of Belgian physicians aged 55 years and older was 43.3%, surpassing the EU-14 average of 35.1% and the EU-27 average of 37.4%. Against this background, several key specialties have an even higher proportion of aging practitioners, such as rheumatologists (46.1%), general practitioners (44.6%), and radiologists (41%). This aging workforce exacerbates existing shortages, straining the system&#x2019;s ability to meet the growing health care demands of an aging population. Despite efforts to address the issue, the percentage of older physicians in Belgium remains significantly higher than the EU-27 averages, posing a long-term challenge for health care sustainability [<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>Indeed, while AI is rapidly being adopted across the global economy, Flanders remains behind in this transformative shift. As of early 2024, an impressive 72% of companies worldwide reported using AI in at least one aspect of their operations [<xref ref-type="bibr" rid="ref32">32</xref>]. However, a more recent study (conducted in 2023) from Flanders indicates that only 32.1% of companies have embraced AI within their business processes [<xref ref-type="bibr" rid="ref33">33</xref>]. This difference underscores a need for greater awareness and integration of AI technologies in the region. One of the key challenges in Flanders is a lack of perceived necessity. A substantial 65.5% of Flemish companies believe that AI offers no significant benefits to their business. Compounding this issue, 64.5% admit to lacking the requisite knowledge, skills, and experience to effectively implement AI solutions. These barriers highlight an urgent need for targeted education and skill-building initiatives to bridge the gap between potential and practice. Although Flanders is recognized as a European innovation leader, it achieves only the European average when it comes to lifelong learning participation [<xref ref-type="bibr" rid="ref25">25</xref>]. This was further confirmed by the Monitoring Report from the Flemish Department of Work and Social Economy [<xref ref-type="bibr" rid="ref34">34</xref>]. As the workplace continues to evolve, 95% of employers anticipate that the skills required of professionals will change significantly in the coming years. However, this shift is met with resistance, as 40.7% of adults in Flanders currently do not participate in any form of training and express no interest in doing so, as they see no need for learning.</p><p>When asked about lifelong learning specifically related to AI, 44% of European respondents expressed skepticism, stating it is unlikely that their employer would provide AI-related training [<xref ref-type="bibr" rid="ref35">35</xref>]. This hesitancy creates a mismatch between the growing relevance of AI and the readiness of the workforce to engage with it. The European AI Act&#x2019;s Article 4 on &#x201C;AI Literacy&#x201D; could act as a catalyst for change [<xref ref-type="bibr" rid="ref36">36</xref>]. Starting from February 2, 2025, this directive requires &#x201C;providers and deployers of AI systems to take measures to ensure, to the best extent possible, a sufficient level of AI literacy among their staff and other persons involved in the operation and use of AI systems.&#x201D; This legislative push will likely increase the demand for AI-focused training and education in Flanders, creating a significant opportunity for initiatives to close the AI literacy gap and foster a workforce that is ready to thrive in an AI-driven economy.</p><p>Overall, the Flemish context was selected because it combines a strong digital infrastructure and high levels of innovation performance with comparatively moderate participation in lifelong learning initiatives. This creates a unique setting in which both the opportunities and the structural barriers to AI adoption in health care can be examined in parallel. Moreover, the Flemish health care system is characterized by a dense network of providers and a diverse workforce, making it particularly suitable for exploring how AI integration may affect professionals at different stages of their careers. Finally, the region&#x2019;s imminent obligations under the EU AI Act further underscore the urgency of assessing workforce readiness and advancing AI literacy, ensuring that the findings of this study carry both regional relevance and broader European significance.</p></sec><sec id="s2-3"><title>Research Methodology</title><p>This study investigated the requirements, obstacles, prospects, and needs for AI training among the health care workforce by following an exploratory 3-stage research design (refer to <xref ref-type="table" rid="table1">Table 1</xref>). Initially, preliminary semistructured interviews (stage 1) were conducted to gather qualitative insights that informed the development of a subsequent survey (stage 2). The survey was designed as an exploratory needs assessment and was not powered for statistical inference. In the third stage, focus groups were used to co-interpret the survey results and extract deeper insights. This multistage approach ensured that the study addressed relevant, context-specific topics and was grounded in real-world perspectives. The study feasibility was assessed through Vlaamse AI Academie (VAIA)&#x2019;s coordination role in terms of recruitment rates, participant retention, and completion rates at each stage of the study. Acceptability was evaluated, also via VAIA-facilitated feedback processes, based on participants&#x2019; ratings and qualitative comments regarding the relevance, clarity, and perceived applicability of the sessions. This study followed COREQ (Consolidated Criteria for Reporting Qualitative Research) and CHERRIES (Checklist for Reporting Results of Internet E-Surveys) guidelines; completed checklists are provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Research design.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Research stage</td><td align="left" valign="bottom">Stage 1</td><td align="left" valign="bottom">Stage 2</td><td align="left" valign="bottom">Stage 3</td></tr></thead><tbody><tr><td align="left" valign="top">Research objectives</td><td align="left" valign="top">Gaining insights for the use of AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> in health care</td><td align="left" valign="top">Knowledge, skills, and training needs on AI in health care</td><td align="left" valign="top">Data integration and interpretation</td></tr><tr><td align="left" valign="top">Research tool</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">Quantitative survey</td><td align="left" valign="top">Three focus groups</td></tr><tr><td align="left" valign="top">Research subjects</td><td align="left" valign="top">Key informants and representatives of medical staff</td><td align="left" valign="top">Medical staff across Flanders</td><td align="left" valign="top">Medical staff across Flanders</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>More analytically, in the first stage of the study, the semistructured interview guide was developed through an initial literature review on AI adoption in health care and digital training strategies [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. The guide was subsequently refined in consultation with an expert panel consisting of 3 academic researchers and 2 clinical educators familiar with AI integration and adult learning in clinical settings. Fifteen semistructured interviews were conducted with key informants from the health care sector selected to represent diverse levels of AI expertise (refer to <xref ref-type="table" rid="table2">Table 2</xref>). These individuals play a crucial role in shaping AI-related training strategies, curriculum development, policy adoption, and institutional decision-making. Their inclusion allowed us to capture insights beyond clinical practice and reflect the broader ecosystem necessary for successful AI integration. The earlier stages of the study (interviews and surveys) were focused predominantly on frontline health care practitioners. The inclusion of nonpractitioners in the focus groups during the final phase allowed us to validate, challenge, and expand the findings with institutional and educational perspectives, leading to more actionable conclusions. At the same time, given the co-interpretive nature of the final research stage, having a diverse group of stakeholders (including those responsible for educational programming, hospital-level innovation, and workforce development) aligned with our participatory research design and enhanced the practical utility of the findings.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Background of interviewees and artificial intelligence expertise level.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Interviewee</td><td align="left" valign="bottom">Background</td><td align="left" valign="bottom">Expertise on AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> (scale 1-5)</td></tr></thead><tbody><tr><td align="left" valign="top">Int1</td><td align="left" valign="top">General practitioner</td><td align="left" valign="top">2</td></tr><tr><td align="left" valign="top">Int2</td><td align="left" valign="top">Logopedist</td><td align="left" valign="top">1</td></tr><tr><td align="left" valign="top">Int3</td><td align="left" valign="top">Research nurse</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Int4</td><td align="left" valign="top">Physiotherapist</td><td align="left" valign="top">4</td></tr><tr><td align="left" valign="top">Int5</td><td align="left" valign="top">Professor of Geriatric Psychiatry, psychotherapist</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Int6</td><td align="left" valign="top">Professor Dentistry, Periodontologist</td><td align="left" valign="top">4</td></tr><tr><td align="left" valign="top">Int7</td><td align="left" valign="top">Professor of Cardiology and Head of Department</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Int8</td><td align="left" valign="top">Associate Professor Urology</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Int9</td><td align="left" valign="top">Cluster Manager Digital Health</td><td align="left" valign="top">3</td></tr><tr><td align="left" valign="top">Int10</td><td align="left" valign="top">Data Sciences Lead Pharmaceutical</td><td align="left" valign="top">4</td></tr><tr><td align="left" valign="top">Int11</td><td align="left" valign="top">Innovation Lead &#x2013; Hospital</td><td align="left" valign="top">4</td></tr><tr><td align="left" valign="top">Int12</td><td align="left" valign="top">Self-employed home care nurse</td><td align="left" valign="top">1</td></tr><tr><td align="left" valign="top">Int13</td><td align="left" valign="top">Pathologist Hospital</td><td align="left" valign="top">1</td></tr><tr><td align="left" valign="top">Int14</td><td align="left" valign="top">General practitioner</td><td align="left" valign="top">2</td></tr><tr><td align="left" valign="top">Int15</td><td align="left" valign="top">Professor of Internal Medicine and Engineering</td><td align="left" valign="top">5</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>A strategic sampling approach combined criterion sampling to target interviewees in pivotal health care roles and maximum variation sampling to ensure a broad spectrum of AI expertise. The incorporation of these methodological stages demonstrated the study&#x2019;s dedication to obtaining a comprehensive understanding of AI-related learning requirements while encouraging inclusiveness and usefulness in its conclusions. The aim was to explore the use of AI in health care within Flanders and to identify the existing AI training needs of the sector. The interview data were analyzed using thematic analysis [<xref ref-type="bibr" rid="ref37">37</xref>], supported by NVivo (QSR International) software to systematically identify themes. Following the principles of grounded theory, coding proceeded in three iterative stages: (1) open coding, in which transcripts were examined line-by-line to capture discrete concepts and assign initial codes; (2) axial coding, where relationships among codes were explored and grouped into broader categories by linking subthemes and identifying patterns; and (3) selective coding, which involved integrating these categories around central, overarching themes that addressed the research questions. During the process, a constant comparison approach was applied within and across interviews to refine category boundaries and ensure analytic consistency. NVivo&#x2019;s memo and annotation functions were used to document coding decisions, emerging theoretical insights, and reflexive notes, creating an auditable record that enhanced the transparency and credibility of the analysis.</p><p>In line with qualitative research best practices, data collection for interviews continued until thematic saturation was achieved; that is, the point at which no new codes or themes emerged. Rather than relying on the rather loose criterion of no new data, we assessed theoretical saturation in terms of the completeness and depth of the analysis (Low [<xref ref-type="bibr" rid="ref38">38</xref>]). This point was reached after the 13th interview, with the final 2 interviews serving to confirm saturation without introducing novel concepts. Reaching this stage indicated that further interviews were unlikely to generate additional analytical insights and thus informed the decision to conclude preliminary data collection at that point.</p><p>Building on the insights from the qualitative data, the study proceeded to design a survey aimed at conducting a comprehensive needs assessment [<xref ref-type="bibr" rid="ref27">27</xref>] across the regional medical workforce. The survey focused on several key areas, including health care professionals&#x2019; learning trajectories, their existing knowledge and skills related to AI, the impact of AI on their professional practices, and their specific learning needs regarding AI adoption. Developed using the EU Survey platform, the survey ran from July till the end of December of 2024. Prior to full deployment, the survey instrument underwent pretesting with 5 health care professionals (2 physicians, 1 nurse, 1 health policy advisor, and 1 hospital IT coordinator) to ensure clarity, relevance, and usability of the items. Based on their feedback, minor revisions were made to improve question wording, reduce ambiguity, and streamline the Likert-scale design. Regarding psychometric evaluation, the section on importance-of-topics for AI training consisted of multiple items rated on a 10-point Likert scale. We conducted an internal consistency analysis for this group of items. The resulting Cronbach &#x03B1; was 0.84, indicating good internal reliability of the scale.</p><p>The survey was digitally distributed to a wide array of health care organizations across Flanders, including 4 hospitals, 5 health care professional associations, 4 health care&#x2013;focused Small and Medium-sized Enterprises and enterprise associations, 6 additional Small and Medium-sized Enterprises, 2 university medical schools via their alumni lists, and 1 nongovernmental organization (NGO), namely, VAIA&#x2013;Flanders AI Academy, with which 5 of the authors are affiliated. Additionally, the survey was shared through personal networks and disseminated via health care channels and conferences in the region. While this approach ensured broad and diverse reach, it also meant that the exact number of individuals who received or engaged with the invitation is unknown, making it impossible to determine a precise denominator or formal response rate. This limitation is a recognized challenge in nonprobability, exploratory research, particularly when targeting hard-to-reach professional populations, such as health care workers. To mitigate institutional bias, survey distribution relied on multiple independent networks beyond VAIA. The study does not aim for statistical generalizability but follows a qualitative, leaning-needs assessment approach where the focus is on depth and diversity of perspectives, rather than representative prevalence estimates.</p><p>To mitigate the risk of nonresponse bias, we ensured diversity in occupation, age, and experience among respondents and triangulated survey findings with insights from in-depth interviews and focus groups, enhancing the trustworthiness and validity of our conclusions. Despite extensive efforts, the survey yielded only 139 responses (refer to <xref ref-type="table" rid="table3">Table 3</xref>) from an estimated total population of 5000 health care professionals, underscoring the challenges of engaging this demographic group in survey-based research [<xref ref-type="bibr" rid="ref39">39</xref>] (we need to note that the EUsurvey website was offline for 10 days in the first weeks of September, which we consider a very important time frame, since many potential participants returned from their holidays and multiple newsletters were shared). Five of the participants had filled less than 20% of the total questionnaire (more than 80% missing variables) and were excluded from the analysis, resulting in a final number of 134 respondents included in the analysis.</p><p>Health care professionals are a notoriously difficult group to engage in survey-based research due to time constraints, workload pressures, and survey fatigue [<xref ref-type="bibr" rid="ref39">39</xref>]. This is reflected in the survey responses (134 valid), which may appear limited in proportion to the estimated size of the target population (around 5000 professionals). While the challenges for engaging health care professions are well documented, the small sample still included a broad cross-section of health care roles across multiple regions in Flanders. This occupational heterogeneity enhances the analytical utility of the dataset. Most importantly, the survey was not intended as a stand-alone quantitative assessment but as part of a triangulated qualitative methodology that included in-depth interviews and focus groups. These multiple layers provided contextual richness and analytic depth that helped counterbalance the relatively modest survey response rate. Along these lines, the primary aim of the survey was to conduct a needs assessment, rather than to generalize findings statistically. Therefore, this study is positioned within a qualitative research tradition, aligning with existing literature that frames basic survey statistics as supporting exploratory, interpretive aims rather than pursuing broad statistical generalizability [<xref ref-type="bibr" rid="ref40">40</xref>]. Accordingly, the insights gained were intended to inform subsequent qualitative analysis and cocreation activities, rather than to produce definitive conclusions about prevalence or distribution. This approach aligns with the growing emphasis on qualitative, reflexive research in the field of AI adoption, which has gained momentum in recent years [<xref ref-type="bibr" rid="ref41">41</xref>]. Nevertheless, we recognize that the survey results should be interpreted as indicative rather than statistically representative of the entire health care workforce. This limitation reflects both the exploratory scope of the study and the methodological challenges of capturing a rapidly evolving field, such as AI in health care. Rather than aiming for generalizability, the survey was designed to generate insights, highlight emerging patterns, and guide the more in-depth qualitative inquiry. This recognition informed our decision to frame the study within an exploratory, qualitative-driven design, where the survey functions as a complementary entry point to contextualize and deepen subsequent analyses.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Demographics of participants in the survey.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Demographic characteristics</td><td align="left" valign="bottom">Participants, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Sex</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">47 (35.07)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">83 (61.94)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other and N/A<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">4 (2.98)</td></tr><tr><td align="left" valign="top" colspan="3">Age (years)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;30</td><td align="left" valign="top">16 (11.18)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>30-40</td><td align="left" valign="top">42 (27.37)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>40-50</td><td align="left" valign="top">34 (23.77)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>50-60</td><td align="left" valign="top">34 (23.77)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>60-70</td><td align="left" valign="top">7 (4.9)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>NA</td><td align="left" valign="top">10 (7)</td></tr><tr><td align="left" valign="top" colspan="3">Seniority (years)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>0-3</td><td align="char" char="." valign="top">8 (5.6)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>4-10</td><td align="char" char="." valign="top">38 (26.57)</td></tr><tr><td align="char" char="." valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>10 more than</td><td align="char" char="." valign="top">88 (58.74)</td></tr><tr><td align="left" valign="top" colspan="3">Occupation</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physician/doctor</td><td align="char" char="." valign="top">45 (33.58)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nurse/midwife</td><td align="char" char="." valign="top">41 (30.6)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Manager</td><td align="char" char="." valign="top">9 (6.72)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Dentist</td><td align="char" char="." valign="top">8 (5.97)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Pharmacist</td><td align="char" char="." valign="top">3 (2.24)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physiotherapist/kinesist</td><td align="char" char="." valign="top">12 (8.95)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></td><td align="char" char="." valign="top">16 (11.94)</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>Including Podiatrist, Lab supervisor or geneticist, Radiographer, Medical Physicist, Biomedical Informatics (3), Clinical Trial Center employee.</p></fn><fn id="table3fn2"><p><sup>b</sup>NA: Not available. </p></fn></table-wrap-foot></table-wrap><p>In the final stage, 3 focus groups were conducted in December 2024, involving 39 medical practitioners, nurses, organizers of courses, and lecturers each, to help with an inclusive and interpretive examination of the survey information gathered. The focus groups played a crucial role in analyzing the survey results, allowing stakeholders from the health care sector to validate key findings and translate them into concrete actions (refer to Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> mapping high-level themes to illustrative quotations). Each session lasted approximately 2 hours and was structured around a semistructured facilitation protocol that included (1) presentation of key survey findings, (2) guided discussion on their relevance and interpretation, and (3) open dialog to surface additional perspectives or contextual nuances not captured by the survey. The facilitation was carried out by trained moderators of the core analysis team, with note-takers documenting group dynamics in addition to verbatim audio recordings. The discussions were transcribed in full and subjected to thematic analysis. Coding followed a hybrid approach, with an initial deductive framework based on survey-derived themes and policy priorities, complemented by inductive codes emerging from participants&#x2019; reflections and narratives. Coding was conducted independently by at least 2 researchers per transcript, followed by iterative rounds of comparison, discussion, and refinement to enhance transparency and consistency. Intercoder reliability was not treated as a purely statistical exercise but as a qualitative consensus-building process, where discrepancies were discussed until agreement was reached on coding decisions. NVivo software supported the organization of codes, memo-writing, and linking of themes to illustrative quotations. This process ensured that the focus groups were not simply used as a validation step but as interpretive tools that added depth, nuance, and contextual grounding to the survey results. By capturing a broad spectrum of viewpoints and applying systematic procedures for facilitation and analysis, the study strengthened the credibility, rigor, and transferability of its findings.</p></sec><sec id="s2-4"><title>Ethical Considerations</title><p>This study was conducted in line with ethical standards for research involving human participants. It was carried out under the ethical framework of an ongoing approved project at the School of Political Sciences, Aristotle University of Thessaloniki (ethics approval number 237317/2024). In phase 1, interviewees gave written informed consent, and in phase 2, survey respondents consented via an opt-in form, and no personal identifiers were accessed by the researchers (email handling for prize draws was done solely by a VAIA communication staff member). In phase 3, all focus group participants consented to the use of anonymized ideas and images for scientific purposes. At no point was sensitive personal data collected, and all procedures were designed to ensure participant anonymity and voluntary participation. All consent materials will be made available upon request.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Preliminary Insights From Key Informants</title><p>In order to inform the design of the following research survey, the preliminary semistructured interviews explored the opportunities and challenges of AI integration in health care. The interviews highlighted a collective emphasis on the need for AI literacy as an overarching concern, as well as the reduction of administrative burdens and ethical considerations arising from AI use as specific content-related issues. Importantly, they highlighted references for learning modalities and the role of accreditation for health care workers. To actively engage health care professionals in relevant training, the most common argument was that AI tools need to provide practical benefits to their daily workflows soon after their training. As one of the interviewees emphasized, &#x201C;there will be more people interested when it is really practical, when you know how to use it in your daily practice&#x201D; (Int_1). Similarly, another one pointed out that many professionals have &#x201C;little time to really delve completely into something new&#x201D; (Int_2), underscoring the need for tools that integrate seamlessly into existing systems. By implication, online courses were widely preferred, but they were perceived as coming with challenges, such as reduced engagement compared to in-person settings. Although many professionals preferred online learning for its flexibility, a researcher pointed out that specifically, nurses often favor in-person training (Int_3). One way or another, the critical parameter for AI training integration that had to be explored was its contribution to the peoples&#x2019; workload.</p><p>On this point, the most important potential contribution of AI that emerged from the interviews was related to the overwhelming administrative burden faced by health care professionals, with the use of AI to automate repetitive tasks, such as form completion and report generation, being viewed as a promising solution. Most interviewees consented to the view that health care&#x2014;and nursing, in particular&#x2014;could benefit from AI tools to decrease the administrative burden and get more time for the patients. In this context, the head of a cardiology department highlighted that administrative tasks had grown over the years, reversing the time allocation between patient care and paperwork: &#x201C;If I compare with 20 years ago, my focus was more than 80% on the patient and 20% on the administration. What we do see now is that it is just the opposite&#x201D; (Int_7). In this context, several key informants shared the sentiment that saving time spent in administration could be the most critical contribution of AI.</p><p>The shortage of time was also related to workforce shortages, especially in nursing and psychiatry. Accordingly, AI&#x2019;s role in assisting with tasks that are increasingly difficult to manage due to workforce shortages &#x201C;help make the job of a nurse easier to get care where it&#x2019;s needed&#x201D; (Int_15). AI&#x2019;s potential to improve patient care was another focus area. From simplifying patient tracking to offering personalized health information, professionals identified various ways in which AI could benefit patients. For instance, someone stressed that &#x201C;anything that can ensure that the patient is better informed and gets information tailored to them at the time they are ready&#x201D; (Int_2) would be valuable. More information does not always come without challenges, as it can exacerbate the tendency to overtest, potentially tipping the balance further in favor of overdiagnosis, as one associate professor of medicine expressed his concerns (Int_8). This perspective highlights the need for careful consideration of the role of AI in health care to avoid unintended consequences. Rather than reducing the administrative burden due to overdiagnosis, AI may ultimately enhance it.</p><p>In addition to this, AI&#x2019;s adoption brings inherent challenges, particularly regarding privacy, data security, and trust in opaque &#x201C;black box&#x201D; systems. A doctor underscored this issue, stating, &#x201C;Everything that we helped validate or set up ourselves is fully trustable, but everything else developed as black boxes outside our environment, I don&#x2019;t trust at all&#x201D; (Int_6). This skepticism underscores the importance of transparency and stakeholder involvement in AI development and deployment. Along this line, another interviewee emphasized the critical importance of using AI judiciously. While acknowledging its potential to assist with tasks, such as language translation, he cautioned against overreliance on AI systems, advocating for a balanced approach to ensure its proper application in diverse contexts.</p><p>These normative and ethical challenges raise the need for extensive training in order for the integration of AI in health care to come organically. Along these lines, many interviewees agreed on the importance of accredited courses for AI literacy. One of them noted that professionals are drawn to accredited programs because &#x201C;they get points for the accreditation&#x201D; (Int_6). In Belgium, many health care professionals must follow continuous professional development (CPD) to maintain their accreditation, which is legally required for some professions. This system ensures they stay up to date and explains why they value training that aligns with their daily practice. However, these courses must directly impact clinical practice to attract interest. As another interviewee observed, &#x201C;You need something that&#x2019;s directly influencing the practice before you go to a course&#x201D; (Int_8). From a research perspective, the challenges and training needs of AI integration in health care must include well-structured training trajectories grounded in extensive, multidisciplinary research. This is where the current research survey plays a pivotal role, offering wide insights across diverse professional groups and health care settings.</p></sec><sec id="s3-2"><title>Mapping AI Training Trajectories</title><p>Informed by the preliminary interviews with key informants, the second stage of this research used a survey to evaluate the learning needs and interests of health care professionals in Flanders regarding AI literacy in health care. The survey garnered responses from a diverse group, including doctors, nurses, and health care managers, revealing both a strong interest in AI literacy and significant gaps in existing knowledge and training.</p><p>More analytically, a significant survey indication was that AI knowledge levels vary across different professions. Managers, physicians, and dentists reported higher AI expertise while nurses, physiotherapists, and pharmacists showed lower familiarity (refer to <xref ref-type="fig" rid="figure1">Figure 1</xref>). This indicated that AI exposure is not uniform across medical fields, with some roles integrating AI more actively than others. Age played a crucial role in these variations, as expected. Midsenior professionals (40&#x2010;50 years old) reported the highest levels of AI knowledge, whereas older professionals (&#x003E;60 years old) had the least, with some indicating no AI expertise at all (refer to <xref ref-type="fig" rid="figure2">Figure 2</xref>). Responses from the age group of 50&#x2010;60 years were mixed, suggesting varying degrees of AI exposure within this demographic. The age group of 50&#x2010;60 years and the physicians exhibited the highest SD in their replies. Contrary to what was initially expected, we can see that younger ages (&#x003C;40 years) are not yet as familiar with AI, indicating the fact that AI is not yet part of the main curricula of the universities in the health care faculties but a knowledge obtained by individuals in a later stage (either by postgraduate programs or by extra training courses). The only question where we observed a sex-based difference in responses was related to self-estimated AI knowledge (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Male respondents generally reported higher self-estimates than female respondents. However, when analyzing responses within the same occupation, the differences disappeared. The overall sex disparity arises because our sample includes more female nurses (who tend to report lower AI knowledge self-estimations) and more male physicians, who report higher self-estimations. Therefore, the apparent sex difference shown in <xref ref-type="fig" rid="figure3">Figure 3</xref> primarily reflects an occupational disparity rather than a true sex-based variation.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Self-estimation of artificial intelligence knowledge based on occupation. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig01.png"/></fig><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Self-estimation of artificial intelligence knowledge based on age. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Self-estimation of artificial intelligence knowledge based on sex. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig03.png"/></fig><p>Concerning the type of knowledge that the respondents would like to acquire for AI, most respondents prioritized &#x201C;gaining basic AI knowledge&#x201D; (74.62%, 100/134), learning to &#x201C;be able to facilitate the use of AI in my field (design a study, management, knowledge of ethics, etc)-no coding&#x201D; (70.29%, 94/134), and &#x201C;efficiently use predeveloped AI tools&#x201D; (66.42%, 89/134). Doctors and dentists were the 2 groups that were more interested (75.37%, 101/134 and 72.39%, 97/134, respectively) in the facilitation of AI use in their field rather than the efficient use of predeveloped AI tools. Related to the exact topic of an AI course that they would like to follow, a significant majority of respondents expressed strong interest in an introductory AI course tailored to health care (85.07%, 114/34), with nearly as many (80.6%, 108/134) eager to acquire practical AI skills relevant to their field (refer to <xref ref-type="fig" rid="figure4">Figure 4</xref>). Learning priorities varied by age, with younger professionals (&#x003C;30 years old) actively seeking AI education, especially in decision-making and generative AI, while older professionals (&#x003E;50 years old) showed less interest in learning AI but acknowledged its role in reducing errors and improving patient confidence. The age group of 30&#x2010;50 years balanced curiosity with concerns about practical implementation. These findings suggest AI education should be tailored for younger professionals and practical implementation strategies for older ones. The survey further highlighted a strong preference for immediately applicable AI tools over advanced technical expertise.</p><p>No significant difference was noted between respondents of different genders, while again a variability of the choices based on the occupation has been identified. The 2 most interesting topics for almost all the occupations are the first two of <xref ref-type="fig" rid="figure4">Figure 4</xref>, &#x201C;Knowledge and skills for AI applications&#x201D; and &#x201C;Introduction to AI for Healthcare.&#x201D; For nurses, though, &#x201C;Rostering and planning&#x201D; appears as the second most interesting topic (after the &#x201C;Introduction to AI for Healthcare&#x201D;), a topic that is much lower in the selection of the other occupations. &#x201C;Personalized medicine&#x201D; appears to be the third option for dentists, others, and physicians (doctors), while &#x201C;Methods to reduce medical error&#x201D; appears more important for managers, nurses, and pharmacists. Another interesting finding is the fact that, contrary to previous studies [<xref ref-type="bibr" rid="ref27">27</xref>], the topic of &#x201C;Ethics&#x201D; is low in the interest of the respondents. The least interesting of the topics among almost all the groups (except for dentists, for whom it was penultimate) was the &#x201C;Generation of synthetic data.&#x201D;</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Topics of interest for artificial intelligence learning (Likert scale 1-10). AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig04.png"/></fig><p>Different occupations exhibited distinct patterns in their training preferences and frequency. Dentists and clinicians were the most engaged in continuous learning, with the majority attending training sessions at least once per month. In contrast, professionals in other occupations typically participated in training once every 3 months or less. When selecting a course provider or topic, dentists, clinicians, and other health care professionals primarily trusted specialists in their respective domains, followed by their training institutions (universities or technical schools). However, physiotherapists and nurses prioritized recommendations from their employers over their training institutions. Managers stand out as the only group that places the highest trust in their employees&#x2019; suggestions regarding the choice of courses, rather than relying on peers or experts in their field. Across all groups, social media and government recommendations play only a small role in influencing training choices.</p><p>In parallel, perceptions of AI&#x2019;s impact on improving health care jobs varied by profession. Physicians and nurses were the most optimistic while dentists and physiotherapists were more skeptical (refer to <xref ref-type="fig" rid="figure5">Figure 5</xref>). Managers and pharmacists held moderate to positive views. Similarly, younger professionals (&#x003C;30 years old) strongly believed AI would enhance their work, whereas older professionals (&#x003E;50 years old) were more cautious (refer to <xref ref-type="fig" rid="figure6">Figure 6</xref>). A notably greater SD was also observed among doctors and individuals aged 50&#x2010;60 years, which may reflect the diversity of clinical specialties within these groups (such as cardiology, neurology, and others) that influence their experiences and attitudes toward AI integration. This heterogeneity suggests that the field of practice could be a key moderating factor underlying the observed variability [<xref ref-type="bibr" rid="ref42">42</xref>]. Regarding AI&#x2019;s future in health care, professionals identified its greatest contributions as increasing patient confidence, reducing diagnostic bias, enhancing decision support, and improving access to health care information (refer to <xref ref-type="fig" rid="figure6">Figure 6</xref>).</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Opinion about the improvement that artificial intelligence will bring into their job by occupation. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig05.png"/></fig><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Opinion about the improvement that artificial intelligence will bring into their job by age. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig06.png"/></fig><p>An interesting finding of the survey was how health care professionals perceive the risk AI poses to their work. Again, AI perception in health care varied significantly by profession, age, and potentially gender. Dentists and pharmacists felt the highest risk of AI replacing their jobs, likely due to automation in diagnostics and prescriptions. Physicians and nurses were more confident, with a broader distribution of responses (refer to <xref ref-type="fig" rid="figure7">Figure 7</xref>). Age also played a role, as younger professionals (&#x003C;30 years old) felt more at risk, whereas older professionals (&#x003E;50 years old) were less concerned, although neither was significantly concerned (refer to <xref ref-type="fig" rid="figure8">Figure 8</xref>). This generational difference suggests that younger professionals, entering a rapidly evolving workforce, may feel more vulnerable to technological changes, whereas older professionals, with more established careers, perceive AI as less of a direct threat.</p><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Opinion of the participants by occupation about the danger that artificial intelligence might bring to their job. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig07.png"/></fig><fig position="float" id="figure8"><label>Figure 8.</label><caption><p>Opinion of the participants by age about the danger that artificial intelligence might bring to their job. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig08.png"/></fig><p>Younger professionals were more concerned about job displacement, likely because they recognized AI&#x2019;s growing role in health care and wanted to stay competitive. This contrast reflects a mix of ambition and modest anxiety among younger professionals versus confidence and stability among older ones. As mentioned by Webb [<xref ref-type="bibr" rid="ref43">43</xref>], the impact of AI on employment may manifest more through reduced entry into occupations&#x2014;fewer young people starting these jobs&#x2014;rather than through increased exits by older workers, leading to a disproportionate effect on younger workers, and this &#x201C;entry margin&#x201D; effect might be reflected in the answers of the participants. If you consider the answers based on the occupation, we can note that pharmacists and dentists are the 2 groups that were slightly more concerned about their jobs and believed that AI will improve their jobs less (refer to <xref ref-type="fig" rid="figure5">Figures 5</xref> and <xref ref-type="fig" rid="figure6">6</xref>, respectively).</p><p>The responses regarding the area where AI will have the greatest impact were quite diverse (refer to <xref ref-type="fig" rid="figure9">Figure 9</xref>). Participants generally agreed that AI would influence almost all the mentioned fields (access to information, patients&#x2019; access to service, reduction of errors in health care, patients&#x2019; education, control of patients over their medical conditions, reduction of bias in diagnosis, improvement of disease prevention, and overall effect in health care). The only exception (or at least the area with the fewest responses based on the Likert score) was &#x201C;increasing patients&#x2019; confidence in medicine.&#x201D; The most significant areas of impact, though closely followed by others, were &#x201C;facilitating access to information,&#x201D; &#x201C;overall healthcare,&#x201D; and &#x201C;decision support systems.&#x201D; Notably, no significant differences were observed across various demographic categories, including gender, age, occupation, or experience level.</p><fig position="float" id="figure9"><label>Figure 9.</label><caption><p>Impact of artificial intelligence in health care. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e76709_fig09.png"/></fig></sec><sec id="s3-3"><title>Co-interpreting AI in Flanders&#x2019; Health Care</title><p>The latest stage of this study focused on interpreting data through a collaborative framework of focus groups. A total of 39 participants took part in 3 focus groups, representing key stakeholders in health care and education, including lecturers from various disciplines (excluding practitioners), education and innovation hospital managers, representatives of nursing and radiology associations, and members of health care communities. To ensure efficient discussion and facilitation, the groups were structured as follows: 14 participants in the first group, 12 in the second, and 13 in the third. The focus groups were conducted in late December 2024, following a presentation of the initial survey results by the last author. Groups 1 and 3 were facilitated by the first author, while Group 2 was led by the last author. Groups 1 and 2 took place simultaneously following the &#x201C;Split Groups Discussion&#x201D; methodology [<xref ref-type="bibr" rid="ref44">44</xref>]. Each group provided distinct perspectives on the integration of AI in health care, yet several overarching concerns emerged, particularly regarding generational differences in attitudes toward AI, trust in technology, and the evolving role of health care professionals. The demographic information of the participants in the round tables discussion can be found in <xref ref-type="table" rid="table4">Table 4</xref>.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Demographics of participants in round tables discussions.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Demographic characteristics</td><td align="left" valign="bottom" colspan="2">Participants, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Sex</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">12 (30.76)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">27 (69.24)</td></tr><tr><td align="left" valign="top" colspan="3">Age (years)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;30</td><td align="left" valign="top">1 (2.56)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>30-40</td><td align="left" valign="top">9 (23.08)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>40-50</td><td align="left" valign="top">12 (30.77)</td></tr><tr><td align="char" char="hyphen" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>50-60</td><td align="left" valign="top">4 (10.26)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>NA</td><td align="left" valign="top">13 (33.33)</td></tr><tr><td align="left" valign="top" colspan="3">Occupation</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Manager</td><td align="char" char="." valign="top">12 (30.77)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Lecturer</td><td align="char" char="." valign="top">9 (23.08)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nurses/midwives</td><td align="char" char="." valign="top">4 (10.26)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physiotherapist/kinesist</td><td align="char" char="." valign="top">2 (5.13)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Course provider</td><td align="char" char="." valign="top">9 (23.08)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="char" char="." valign="top">3 (7.69)</td></tr></tbody></table></table-wrap><p>Each focus group provided unique perspectives on AI integration in health care, confirming several key survey insights while adding deeper practical and ethical dimensions to the discussion. Echoing survey responses, all 3 groups emphasized AI&#x2019;s role in reducing administrative burden, with particular interest in AI-driven solutions for documentation, scheduling, and workflow optimization. Discussions in the focus groups revealed a range of perspectives on AI in health care, with a particular emphasis on differences between professional groups, challenges in training, regulatory barriers, and disparities in available resources.</p><p>One of the central themes raised in the discussions was the difference between nurses and doctors in their access to and engagement with AI training. Participants noted that AI tools are primarily designed for doctors, while significantly fewer applications exist for nurses. This was seen as a major limitation, making it difficult for nurses to integrate AI into their daily routines. Additionally, nurses and doctors follow training at different times. Nurses prefer courses during working hours and often expect compensation, whereas doctors tend to attend evening sessions. Some participants expressed frustration that funding for AI training is more readily available for doctors and research-oriented professionals while nurses have far fewer opportunities.</p><p>A recurring concern was the difficulty of developing hands-on AI courses. Many participants pointed out that the most relevant knowledge on AI implementation resides within private companies, but these companies are not interested in creating educational programs; rather, their focus is on selling AI products. This dynamic created a gap in practical training, as health care professionals needed guidance on AI applications but did not trust company-led training initiatives, rather than courses coming from universities. A few participants mentioned that radiologists were an exception, as companies do provide AI training in collaboration with the Radiology Society. However, in most other fields, there was a clear demand for cocreated courses where universities and companies work together to ensure both credibility and relevance.</p><p>Another major topic discussed was the regulatory complexity surrounding AI in hospitals. Participants frequently emphasized that many AI tools were not allowed to be used in clinical practice, creating uncertainty about what is permitted. There was a strong sentiment that hospitals need clearer guidelines and policies to help professionals navigate AI implementation. Some participants felt that the lack of clarity leads to hesitation, making AI adoption slower than it could be.</p><p>Time constraints for training were another issue raised in the discussions. Doctors, especially those in private practices, had more flexibility in choosing when to follow AI courses, while nurses required training during work hours, which limited their opportunities. There was agreement that hospital management plays a crucial role in ensuring all health care professionals, not just doctors, have access to AI education. Several participants pointed out that doctors, who often earn more by seeing more patients, may have a financial incentive to adopt AI tools that improve efficiency, while nurses, who work on fixed salaries, focus more on how AI can support patient care and workflow improvements (supported also by the survey and their interest in courses related to rostering and planning). Participants also discussed the general lack of resources for AI education. There was widespread agreement that funding constraints made it difficult for hospitals and universities to develop robust training programs, especially for lifelong learning. Some attendees noted that universities tend to focus on AI education for students rather than professionals, as resources are limited, and institutional priorities are not always aligned with the needs of the workforce. A broader issue raised in the discussions was the lack of ready-to-use AI tools in clinical practice. Many participants emphasized that health care professionals need AI solutions that can be integrated immediately into their workflows, yet such tools are not widely available. Some expressed frustration that AI was often presented as a future technology rather than something that can help them today. Others pointed out that existing AI training often focuses on theoretical knowledge rather than practical applications.</p><p>A particularly interesting discussion arose around ethical considerations. While ethics courses are often integrated into broader AI programs, the focus groups revealed a clear generational divide in how participants approached AI ethics. Some participants pointed out that younger professionals, particularly students, tend to prioritize ethical concerns (as has also been pointed out by previous studies conducted with students [<xref ref-type="bibr" rid="ref27">27</xref>]), whereas professionals in the workforce are more focused on AI as a functional tool. A common argument was that health care workers simply do not have the time to engage deeply with ethical discussions in their daily routines and believe that ethical concerns should primarily be handled by AI developers. However, there were diverging views within the groups. One participant argued that financial incentives drive much of the AI interest in health care, particularly among doctors who earn more with increased patient volume. In contrast, another participant from a postgraduate palliative care clinic shared that ethics was a major focus in their discussions, suggesting that ethical concerns become more prominent in fields dealing with sensitive end-of-life care. This small disagreement highlighted that while some professionals may deprioritize ethics, others see it as essential depending on the context in which AI is applied.</p><p>Across all 3 groups, discussions highlighted a complex and evolving landscape for AI adoption in health care. In <xref ref-type="table" rid="table5">Table 5</xref>, the key cross-cutting themes (ie, administrative burden, ethics, training logistics, trust, and generational and occupational differences) can be viewed. It illustrates how each was supported by data from interviews, surveys, and focus groups. While there was general recognition of AI&#x2019;s potential, participants made it clear that their trust, learning needs, and expectations vary significantly based on their profession, work environment, and access to resources. Addressing these concerns will require targeted training strategies, clearer regulations, and better collaboration between academia, industry, and health care institutions.</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Joint display of the cross-cutting themes in the 3 different phases of the study (ie, inverviews, surveys, and focus groups).</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Theme</td><td align="left" valign="bottom">Interview insights</td><td align="left" valign="bottom">Survey findings</td><td align="left" valign="bottom">Focus group insights</td></tr></thead><tbody><tr><td align="left" valign="top">Administrative burden</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Viewed AI<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup> as a potential tool to reduce time spent on documentation and form-filling</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Identified as a high-impact area for AI use, especially by nurses and midcareer professionals</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Strong consensus that administrative relief is AI&#x2019;s most practical benefit in the short term</p></list-item></list></td></tr><tr><td align="left" valign="top">Ethical concerns</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Raised, but often secondary to practical concerns. Some distrust in &#x201C;black box&#x201D; systems</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Ethics ranked low in topic interest, especially low among clinicians</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Disagreement between participants</p></list-item><list-item><p>Opinion for decreased importance of ethics once you graduate and start working, whereas palliative care workers emphasized ethical sensitivity</p></list-item></list></td></tr><tr><td align="left" valign="top">Training logistics</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Online is preferred for flexibility, but in-person is better for engagement</p></list-item><list-item><p>Nurses need compensated training</p></list-item><list-item><p>Call for accredited courses</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Minor variability among different ages and occupations</p></list-item><list-item><p>Everyone wants accredited courses</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Gaps in training availability were noted, especially for nurses</p></list-item><list-item><p>Call for accredited, hands-on programs</p></list-item></list></td></tr><tr><td align="left" valign="top">Trust and transparency</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>AI developed in-house is seen as more trustworthy than external, opaque systems</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Trust in AI is lower among older and nonclinical respondents</p></list-item><list-item><p>Survey shows moderate optimism but limited usage</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Professionals want clearer regulatory guidelines</p></list-item><list-item><p>Co-development seen as a way to build trust</p></list-item></list></td></tr><tr><td align="left" valign="top">Occupational differences</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Nurses need AI for workflow</p></list-item><list-item><p>Doctors focus on diagnostics;</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Clear role-based differences in learning needs and perceived usefulness of AI</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Frustration over unequal access to training funding</p></list-item><list-item><p>Nurses feel underserved</p></list-item></list></td></tr><tr><td align="left" valign="top">Generational differences</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Not observed</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>&#x003C;30 years of age group most interested in learning AI</p></list-item><list-item><p>&#x003E;50 years of age group least engaged but acknowledge AI benefits</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Generational divide on ethics, tool adoption, and openness to innovation</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The current multifaceted, qualitative study provided a nuanced view of both opportunities and challenges in integrating AI into health care. The study showed that while the vast majority of respondents recognized AI&#x2019;s potential to improve efficiency, it also revealed a complex relationship between age, occupation, resource availability, and attitudes toward AI in health care. Younger professionals, nonphysicians were generally more concerned about AI potentially replacing jobs, yet they remained more positive about using AI in their work. This dual perspective is likely linked to their limited access to resources, such as institutional support and opportunities for professional development, and the fact that AI is not yet implemented in the bachelor programs of any medical (or para-medical) university or school in Flanders. Early-career professionals, facing restricted resources and administrative pressures (especially nonphysicians), may see AI as a valuable tool to alleviate workload and enhance their capabilities, making them more eager to integrate AI into their practice despite concerns about job security. In contrast, older professionals (&#x003E;60 years old) appear less concerned about AI replacing jobs but are more skeptical about actively using AI. This skepticism may stem from their well-established positions and greater access to resources, including institutional support, funding, and established networks, which reduce their perceived need for AI. Having navigated the early stages of their careers, they may feel less urgency to adopt new technologies, especially when their roles are more secure and supported. Essentially, their resource-rich environment provides them with stability, reducing their perceived risk from AI but also limiting their motivation to engage deeply with new technology. These observed generational and occupational divides directly inform our proposed multitiered strategy. Younger, resource-constrained professionals call for subsidized and practice-oriented training schemes while older, resource-secure professionals highlight the need for embedding AI within existing CPD frameworks rather than new standalone programs. By explicitly mapping these divides to differentiated policy interventions, we ensure that recommendations are grounded in lived workforce realities.</p><p>Occupations in health care show varying levels of exposure to AI. Clinicians, such as radiologists and lab technicians, face high exposure due to AI&#x2019;s growing capacity for data analysis and pattern recognition. In contrast, roles like nurses and care managers (where social intelligence, adaptability, and hands-on judgment are essential) are less exposed and more resilient to automation. This does not mean clinicians are at risk of replacement, but rather that their tasks are shifting. To stay relevant, they will need to adapt by integrating AI into their workflow, focusing on what AI cannot do, such as contextual reasoning, communication, and ethical decision-making. Meanwhile, roles with strong interpersonal and organizational components may see their value reinforced, provided they also adapt to AI-enhanced systems [<xref ref-type="bibr" rid="ref45">45</xref>]. While AI adoption might hold the potential to promote fairer resource allocation, offering individuals in socioeconomic constraints enhanced access to knowledge, skills, and opportunities. The very same groups might also be more vulnerable to job displacement and precarity, as AI-driven transformations may exacerbate existing inequalities rather than alleviate them. The uneven trust and adoption patterns we observed across age and occupational groups illustrate how digital divides are reproduced within the workforce itself, making equity considerations not only a systemic policy concern but also an empirically grounded outcome of our study. This alignment between our data and broader digital equity debates underscores why workforce training cannot be designed as a &#x201C;one-size-fits-all&#x201D; model but must instead be differentiated by age, occupation, and institutional support.</p><p>These findings are embedded within a broader discussion on digital health equity. The digitalization of health care carries the risk of deepening existing health and social inequities if not accompanied by deliberate, equity-oriented policy frameworks. As digital tools increasingly mediate access to services and reshape clinical workflows, the potential for exclusion becomes particularly acute among already marginalized groups. This has been observed in multiple contexts, including the Netherlands, where digital care policies are framed as progressive and universally beneficial, yet structural risks, such as disparities in digital literacy, socioeconomic access, and institutional support, receive limited attention [<xref ref-type="bibr" rid="ref46">46</xref>]. Such gaps reflect what has been described as a policy &#x201C;blind spot&#x201D; [<xref ref-type="bibr" rid="ref46">46</xref>], the tendency to depoliticize digital care and detach equity considerations from discussions of infrastructure and governance. In contrast, this study&#x2019;s results resonate with research highlighting digital literacy training as a core enabler of equitable digital health [<xref ref-type="bibr" rid="ref47">47</xref>]. This perspective treats digital health, including data and AI applications, not as a neutral technological progression but as a socially embedded transformation shaped by uneven resource distributions, institutional priorities, and workforce dynamics. For example, Danish digital health policy has struggled to reconcile legal commitments to equality with the realities of digital stratification [<xref ref-type="bibr" rid="ref48">48</xref>]. Similarly, the Flemish context illustrates how even innovation-leading regions may fall short in translating ambition into inclusive practice. In this light, digital health equity must be approached as a governance concern, rather than merely a design feature. In our case, the absence of these enabling conditions&#x2014;particularly tailored training and institutional recognition of diverse learning needs&#x2014;contributed to uneven levels of trust in AI and unequal adoption patterns. Nurses emphasized rostering and workflow integration while physicians and dentists were more focused on clinical facilitation. However, few training initiatives accounted for these differentiated needs. This occupational stratification reflects broader patterns of digital exclusion described in the literature, where standardized solutions fail to address context-specific barriers [<xref ref-type="bibr" rid="ref49">49</xref>]. By explicitly linking these findings to policy debates, such as the EU AI Act, our study shows how occupational and generational inequities in AI adoption mirror broader structural risks, reinforcing the urgency of equity-driven governance frameworks.</p><p>Along these lines, we propose a multitiered strategy aligned with key stakeholder roles. First, health care institutions (eg, hospitals and professional associations) should embed mandatory data and AI literacy modules into existing CPD frameworks, ensuring these are accredited, occupation-specific, and available in both online and in-person formats. Training content should prioritize practical integration (eg, rostering tools, triage systems, and clinical decision support) and include embedded ethical scenarios rather than separate ethics modules. Second, regional health authorities and ministries should establish targeted funding schemes that enable subsidized training for underresourced staff groups (particularly nurses) and early-career professionals. These schemes should recognize the structural disadvantage some groups face in accessing noncompensated learning time. Also, providing equal chances for AI literacy for all health care professionals will improve the qualitative adoption of AI systems. Parallel to this, employers should be incentivized (eg, via tax credits or public grants) to allocate protected time for (data and AI) training during work hours.</p><p>Third, academic institutions and universities should codevelop interdisciplinary AI education tracks in collaboration with clinical partners and technology developers. These tracks should offer microcredentialing (as came out from our study since accreditation was a request) pathways and include hands-on exposure to real-world AI tools, ideally integrated into existing clinical education curricula to reduce learning redundancy. While concerns regarding the ethical implications of AI in health care were frequently raised, ranging from bias in AI algorithms to patient data privacy, there was surprisingly little interest in dedicated training programs focused solely on AI ethics. This discrepancy suggests that while ethics remains a key concern, it is not seen as an immediate priority for skill development among health care professionals, who are more focused on practical AI applications and workflow integration. Given this reality, embedding ethical considerations within broader AI training, rather than presenting them as standalone courses, may be a more effective strategy. By integrating discussions on bias, transparency, and accountability into technical and clinical AI education, ethics can become a natural part of AI adoption rather than an isolated, optional subject. This approach ensures that health care professionals develop a nuanced understanding of AI&#x2019;s ethical challenges while remaining engaged in training that aligns with their immediate professional needs. This generational and occupational divergence helps explain why ethics was acknowledged as important yet deprioritized in practice. For younger professionals facing immediate workload pressures, applied utility outweighed abstract ethical debates, whereas older, more established professionals expressed skepticism rooted in systemic trust and governance issues. Importantly, this divergence supports our recommendation to embed ethics within practice-oriented modules, ensuring that ethical reasoning develops in parallel with technical competence rather than in isolation.</p><p>Fourth, technology providers must be engaged as co-designers of training, but under strict ethical and regulatory oversight. Partnerships with trusted educational bodies (eg, medical colleges and nursing federations) can help mitigate bias and ensure neutrality while also fostering greater clinician trust in AI tools. Finally, to overcome the fragmentation in training provision and ensure inclusivity, we recommend establishing a regional AI training coordination body, led by health ministries or national digital health agencies, which would curate, accredit, and evaluate AI-related training offers across providers, occupations, and settings. By operationalizing these stakeholder-specific roles, training efforts can be scaled in a way that reflects real-world needs, acknowledges occupational and generational divides, and upholds digital equity as a core principle of health system innovation.</p><p>By placing our findings within the broader discourse on digital health equity, we echo recent calls for a multilevel approach to digital transformation&#x2014;one that embeds structural awareness into both policy development and training implementation. Without such a shift, digital care risks reinforcing the very disparities it promises to address, particularly as regions prepare for the rollout of EU legislation, such as the AI Act, which will impose new obligations on providers and deployers to ensure AI literacy. Given the uneven starting conditions, regional health authorities and ministries should act first by establishing funding schemes and coordination bodies to reduce disparities in access to AI training. This creates the enabling environment in which health care institutions and employers can embed structured training into CPD frameworks. Building on this foundation, universities and technology providers can co-develop and scale interdisciplinary, practice-oriented modules under clear ethical oversight. Our study thus contributes not only to the literature on AI integration in health care but also to the wider debate on how to build equitable digital futures in health systems already marked by occupational, generational, and structural divides.</p><p>Concerning limitations of our study, we reckon that the findings are exploratory and based on a nonprobability sample, which means they should be interpreted as indicative rather than representative. The focus on the Flemish context also limits the transferability of results to other health care systems. Moreover, the rapidly evolving nature of AI in health care means that attitudes and readiness may shift over time. At the same time, participation may have been skewed toward individuals with strong opinions, either highly enthusiastic about AI or skeptical of its role in health care, potentially underrepresenting the broader, more neutral majority. Given an estimated distribution, we recognize that our sample overrepresents physicians, nurses, and dentists relative to their actual proportions in the health care system and underrepresents allied health professionals. Comprehensive and up-to-date statistics detailing the full distribution of health care professionals by occupation specifically for the Flemish region are not publicly available in an aggregated format. Nurses (~45%&#x2010;50%), physicians (~15%&#x2010;20%), dentists (~2%&#x2010;3%), and allied health professionals, including physiotherapists, occupational therapists, imaging technologists, laboratory staff, etc (~25%&#x2010;30%) [<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. Furthermore, the relatively limited number of respondents constrains the ability to conduct robust analyses of intersectional factors, such as examining smaller subgroups defined by combinations of profession, age, or other demographic variables. This limitation reduces the statistical power and granularity needed to uncover nuanced trends within these intersections. This imbalance likely reflects a response bias, as physicians, nurses, and dentists are more frequently targeted in institutional mailing lists and professional networks and may also be more accustomed to participating in research or policy consultations. In contrast, allied health professionals often have less visibility in centralized databases and may face greater workloads or institutional barriers that limit their participation in voluntary research studies and probably also training. Future research should therefore build on these insights through larger-scale and comparative studies, as well as longitudinal approaches that capture changing dynamics.</p></sec><sec id="s4-2"><title>Conclusion</title><p>Crucially, the findings suggest that a one-size-fits-all approach to AI training is inadequate. Instead, future learning trajectories must be stratified by occupation, age, and available resources, and they should prioritize hands-on applications over theoretical content. Embedding ethical considerations into practical modules&#x2014;rather than isolating them&#x2014;may also bridge the current disconnect between ethical awareness and perceived learning relevance. As Flanders prepares for the implementation of the EU AI Act, this research highlights a timely opportunity to cocreate inclusive, accredited, and accessible AI training programs that reflect the lived realities of health care professionals. Doing so will be essential not only for scaling AI integration locally but also for informing broader European and global efforts to build an equitable, AI-ready health care workforce.</p></sec></sec></body><back><ack><p>The authors would like to express their gratitude to all the employees of the associated organizations, including VAIA, for their invaluable support in disseminating the survey across various stakeholder networks. Special thanks go to Myriam Slock from VLHORA for her instrumental role in organizing the round table event held at the AALST Odisee, which provided critical insights and perspectives that greatly enriched the study. We also sincerely thank all the interviewees and the participants who completed the questionnaires and contributed actively during the round tables. Their openness and engagement made this research possible. The authors&#x2019; affiliation with VAIA (Flanders AI Academy) did not influence the study&#x2019;s findings or policy recommendations. Importantly, the necessity of AI training itself was not under debate in this research; rather, discussion focused on the specific format, content, and delivery of such training to best match stakeholder and workforce needs. VAIA&#x2019;s mission is centered on lifelong learning in technology -including but not limited to AI (artificial intelligence)-, with no bias toward any particular training model, provider, or technological focus. Furthermore, we acknowledge the use of AI tools to support the development of this manuscript. The transcription of the interviews was facilitated by the Microsoft Teams AI assistant, while ChatGPT was used to improve the language and clarity of the introduction.</p></ack><notes><sec><title>Funding</title><p>The research was funded by VAIA, Flanders AI Academy, supported by the Flemish Government as part of the Flanders Artificial Intelligence Policy Plan. Authors CC and MDV received funding from the Flemish Government under the &#x201C;Onderzoeksprogramma Artifici&#x00EB;le Intelligentie (AI) Vlaanderen.&#x201D;</p></sec><sec><title>Data Availability</title><p>The dataset generated and analyzed during this study from the survey is available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>CC, GC, IB and FDB conceived the study. CC performed all the interviews. CC, GC, IB, SG, IDV, and FDB prepared the questionnaire. CC and FDB lead the discussions during round table sessions. CC and GC performed the data analysis. IDV, IB, MDV, and FDB contributed to the interpretation of results. MDV provided domain expertise and revised the manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CHERRIES</term><def><p>Checklist for Reporting Results of Internet E-Surveys</p></def></def-item><def-item><term id="abb3">COREQ</term><def><p>Consolidated Criteria for Reporting Qualitative Research</p></def></def-item><def-item><term id="abb4">CPD</term><def><p>continuous professional development</p></def></def-item><def-item><term id="abb5">EU</term><def><p>European Union</p></def></def-item><def-item><term id="abb6">EU AI</term><def><p>European Union Artificial Intelligence</p></def></def-item><def-item><term id="abb7">NGO</term><def><p>nongovernmental organization</p></def></def-item><def-item><term id="abb8">NUTS1</term><def><p>Nomenclature des Unit&#x00E9;s Territoriales Statistiques</p></def></def-item><def-item><term id="abb9">VAIA</term><def><p>Vlaamse AI Academie</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Preininger</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence in health: state of the art, challenges, and future directions</article-title><source>Yearb Med Inform</source><year>2019</year><month>08</month><volume>28</volume><issue>1</issue><fpage>16</fpage><lpage>26</lpage><pub-id pub-id-type="doi">10.1055/s-0039-1677908</pub-id><pub-id pub-id-type="medline">31419814</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><collab>National Academy of Medicine</collab><collab>The Learning Health System Series</collab></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Whicher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>M</given-names> </name><name name-style="western"><surname>Israni</surname><given-names>ST</given-names> </name><name name-style="western"><surname>Matheny</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence in health care: the hope, the hype, the promise, the peril</article-title><source>Washington (DC): National Academies Press (US)</source><year>2019</year><pub-id pub-id-type="doi">10.17226/27111</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Keane</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>AI-facilitated health care requires education of clinicians</article-title><source>Lancet</source><year>2021</year><month>04</month><day>3</day><volume>397</volume><issue>10281</issue><fpage>1254</fpage><pub-id pub-id-type="doi">10.1016/S0140-6736(21)00722-4</pub-id><pub-id pub-id-type="medline">33812482</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huts</surname><given-names>H</given-names> </name><name name-style="western"><surname>Verstraete</surname><given-names>K</given-names> </name><name name-style="western"><surname>Staes</surname><given-names>M</given-names> </name><name name-style="western"><surname>Elmahy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Janssens</surname><given-names>W</given-names> </name><name name-style="western"><surname>De Vos</surname><given-names>M</given-names> </name></person-group><article-title>Machine learning models for different outcomes to better understand individual treatment response on COPD exacerbations in the IMPACT study</article-title><source>Eur Respir J</source><year>2024</year><month>09</month><day>14</day><pub-id pub-id-type="doi">10.1183/13993003.congress-2024.PA2993</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhagubai</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vandecasteele</surname><given-names>K</given-names> </name><name name-style="western"><surname>Swinnen</surname><given-names>L</given-names> </name><etal/></person-group><article-title>The power of ECG in semi-automated seizure detection in addition to two-channel behind-the-ear EEG</article-title><source>Bioengineering (Basel)</source><year>2023</year><month>04</month><day>20</day><volume>10</volume><issue>4</issue><fpage>491</fpage><pub-id pub-id-type="doi">10.3390/bioengineering10040491</pub-id><pub-id pub-id-type="medline">37106678</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chatzichristos</surname><given-names>C</given-names> </name><name name-style="western"><surname>Swinnen</surname><given-names>L</given-names> </name><name name-style="western"><surname>Macea</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bhagubai</surname><given-names>M</given-names> </name><name name-style="western"><surname>Van Paesschen</surname><given-names>W</given-names> </name><name name-style="western"><surname>De Vos</surname><given-names>M</given-names> </name></person-group><article-title>Multimodal detection of typical absence seizures in home environment with wearable electrodes</article-title><source>Front Signal Process</source><year>2022</year><volume>2</volume><pub-id pub-id-type="doi">10.3389/frsip.2022.1014700</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esmaeilzadeh</surname><given-names>P</given-names> </name></person-group><article-title>Challenges and strategies for wide-scale artificial intelligence (AI) deployment in healthcare practices: a perspective for healthcare organizations</article-title><source>Artif Intell Med</source><year>2024</year><month>05</month><volume>151</volume><fpage>102861</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2024.102861</pub-id><pub-id pub-id-type="medline">38555850</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nair</surname><given-names>M</given-names> </name><name name-style="western"><surname>Svedberg</surname><given-names>P</given-names> </name><name name-style="western"><surname>Larsson</surname><given-names>I</given-names> </name><name name-style="western"><surname>Nygren</surname><given-names>JM</given-names> </name></person-group><article-title>A comprehensive overview of barriers and strategies for AI implementation in healthcare: mixed-method design</article-title><source>PLOS ONE</source><year>2024</year><volume>19</volume><issue>8</issue><fpage>e0305949</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0305949</pub-id><pub-id pub-id-type="medline">39121051</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Palaniappan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>EYT</given-names> </name><name name-style="western"><surname>Vogel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>JCW</given-names> </name></person-group><article-title>Gaps in the global regulatory frameworks for the use of artificial intelligence (AI) in the healthcare services sector and key recommendations</article-title><source>Healthcare (Basel)</source><year>2024</year><month>08</month><day>30</day><volume>12</volume><issue>17</issue><fpage>1730</fpage><pub-id pub-id-type="doi">10.3390/healthcare12171730</pub-id><pub-id pub-id-type="medline">39273754</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hua</surname><given-names>D</given-names> </name><name name-style="western"><surname>Petrina</surname><given-names>N</given-names> </name><name name-style="western"><surname>Young</surname><given-names>N</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Poon</surname><given-names>SK</given-names> </name></person-group><article-title>Understanding the factors influencing acceptability of AI in medical imaging domains among healthcare professionals: a scoping review</article-title><source>Artif Intell Med</source><year>2024</year><month>01</month><volume>147</volume><fpage>102698</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2023.102698</pub-id><pub-id pub-id-type="medline">38184343</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adler-Milstein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>W</given-names> </name><name name-style="western"><surname>Willard-Grace</surname><given-names>R</given-names> </name><name name-style="western"><surname>Knox</surname><given-names>M</given-names> </name><name name-style="western"><surname>Grumbach</surname><given-names>K</given-names> </name></person-group><article-title>Electronic health records and burnout: time spent on the electronic health record after hours and message volume associated with exhaustion but not with cynicism among primary care clinicians</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>04</month><day>1</day><volume>27</volume><issue>4</issue><fpage>531</fpage><lpage>538</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocz220</pub-id><pub-id pub-id-type="medline">32016375</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hashmi</surname><given-names>OU</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>N</given-names> </name><name name-style="western"><surname>de Vries</surname><given-names>CF</given-names> </name><name name-style="western"><surname>Gangi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Jehanli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lip</surname><given-names>G</given-names> </name></person-group><article-title>Artificial intelligence in radiology: trainees want more</article-title><source>Clin Radiol</source><year>2023</year><month>04</month><volume>78</volume><issue>4</issue><fpage>e336</fpage><lpage>e341</lpage><pub-id pub-id-type="doi">10.1016/j.crad.2022.12.017</pub-id><pub-id pub-id-type="medline">36746724</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doherty</surname><given-names>G</given-names> </name><name name-style="western"><surname>McLaughlin</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hughes</surname><given-names>C</given-names> </name><name name-style="western"><surname>McConnell</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bond</surname><given-names>R</given-names> </name><name name-style="western"><surname>McFadden</surname><given-names>S</given-names> </name></person-group><article-title>A scoping review of educational programmes on artificial intelligence (AI) available to medical imaging staff</article-title><source>Radiography (Lond)</source><year>2024</year><month>03</month><volume>30</volume><issue>2</issue><fpage>474</fpage><lpage>482</lpage><pub-id pub-id-type="doi">10.1016/j.radi.2023.12.019</pub-id><pub-id pub-id-type="medline">38217933</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mitchell</surname><given-names>S</given-names> </name><name name-style="western"><surname>Phaneuf</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Astefanei</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>A changing landscape for lifelong learning in health globally</article-title><source>J CME</source><year>2023</year><volume>12</volume><issue>1</issue><fpage>2154423</fpage><pub-id pub-id-type="doi">10.1080/21614083.2022.2154423</pub-id><pub-id pub-id-type="medline">36969486</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gazquez-Garcia</surname><given-names>J</given-names> </name><name name-style="western"><surname>S&#x00E1;nchez-Bocanegra</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Sevillano</surname><given-names>JL</given-names> </name></person-group><article-title>AI in the health sector: systematic review of key skills for future health professionals</article-title><source>JMIR Med Educ</source><year>2025</year><month>02</month><day>5</day><volume>11</volume><fpage>e58161</fpage><pub-id pub-id-type="doi">10.2196/58161</pub-id><pub-id pub-id-type="medline">39912237</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schubert</surname><given-names>T</given-names> </name><name name-style="western"><surname>Oosterlinck</surname><given-names>T</given-names> </name><name name-style="western"><surname>Stevens</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Maxwell</surname><given-names>PH</given-names> </name><name name-style="western"><surname>van der Schaar</surname><given-names>M</given-names> </name></person-group><article-title>AI education for clinicians</article-title><source>EClinicalMedicine</source><year>2025</year><month>01</month><volume>79</volume><fpage>102968</fpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2024.102968</pub-id><pub-id pub-id-type="medline">39720600</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheetz</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rothschild</surname><given-names>P</given-names> </name><name name-style="western"><surname>McGuinness</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A survey of clinicians on the use of artificial intelligence in ophthalmology, dermatology, radiology and radiation oncology</article-title><source>Sci Rep</source><year>2021</year><month>03</month><day>4</day><volume>11</volume><issue>1</issue><fpage>5193</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-84698-5</pub-id><pub-id pub-id-type="medline">33664367</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Desolda</surname><given-names>G</given-names> </name><name name-style="western"><surname>Dimauro</surname><given-names>G</given-names> </name><name name-style="western"><surname>Esposito</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lanzilotti</surname><given-names>R</given-names> </name><name name-style="western"><surname>Matera</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zancanaro</surname><given-names>M</given-names> </name></person-group><article-title>A human-AI interaction paradigm and its application to rhinocytology</article-title><source>Artif Intell Med</source><year>2024</year><month>09</month><volume>155</volume><fpage>102933</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2024.102933</pub-id><pub-id pub-id-type="medline">39094227</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Malamateniou</surname><given-names>C</given-names> </name><name name-style="western"><surname>McFadden</surname><given-names>S</given-names> </name><name name-style="western"><surname>McQuinlan</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Artificial intelligence: guidance for clinical imaging and therapeutic radiography professionals, a summary by the Society of Radiographers AI working group</article-title><source>Radiography (Lond)</source><year>2021</year><month>11</month><volume>27</volume><issue>4</issue><fpage>1192</fpage><lpage>1202</lpage><pub-id pub-id-type="doi">10.1016/j.radi.2021.07.028</pub-id><pub-id pub-id-type="medline">34420888</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Masters</surname><given-names>K</given-names> </name></person-group><article-title>Artificial intelligence in medical education</article-title><source>Med Teach</source><year>2019</year><month>09</month><volume>41</volume><issue>9</issue><fpage>976</fpage><lpage>980</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2019.1595557</pub-id><pub-id pub-id-type="medline">31007106</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mehta</surname><given-names>N</given-names> </name><name name-style="western"><surname>Harish</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bilimoria</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Knowledge and attitudes on artificial intelligence in health care: a provincial survey study of medical students</article-title><source>MedEdPublish</source><year>2021</year><volume>10</volume><issue>1</issue><fpage>75</fpage><comment><ext-link ext-link-type="uri" xlink:href="https://mededpublish.org/articles/10-75">https://mededpublish.org/articles/10-75</ext-link></comment><pub-id pub-id-type="doi">10.15694/mep.2021.000075.1</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Topol</surname><given-names>E</given-names> </name></person-group><article-title>The Topol Review: Preparing the healthcare workforce to deliver the digital future</article-title><year>2019</year><access-date>2025-11-30</access-date><publisher-name>Health Education England</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://topol.digitalacademy.nhs.uk/the-topol-review">https://topol.digitalacademy.nhs.uk/the-topol-review</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tahri Sqalli</surname><given-names>M</given-names> </name><name name-style="western"><surname>Aslonov</surname><given-names>B</given-names> </name><name name-style="western"><surname>Gafurov</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nurmatov</surname><given-names>S</given-names> </name></person-group><article-title>Humanizing AI in medical training: ethical framework for responsible design</article-title><source>Front Artif Intell</source><year>2023</year><volume>6</volume><fpage>1189914</fpage><pub-id pub-id-type="doi">10.3389/frai.2023.1189914</pub-id><pub-id pub-id-type="medline">37261331</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><article-title>Policy plan</article-title><source>Flanders AI</source><year>2024</year><access-date>2025-11-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.flandersai.be/en/beleidsplan-artificiele-intelligentie#:~:text=The%20Flanders%20AI%20Policy%20Plan,of%20roughly%20%E2%82%AC36%20million">https://www.flandersai.be/en/beleidsplan-artificiele-intelligentie#:~:text=The%20Flanders%20AI%20Policy%20Plan,of%20roughly%20%E2%82%AC36%20million</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="web"><article-title>EIS interactive tool</article-title><source>European Comission</source><year>2024</year><access-date>2025-11-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://projects.research-and-innovation.ec.europa.eu/en/statistics/performance-indicators/european-innovation-scoreboard/eis-2024#/ris/countries/BE?region=BE2">https://projects.research-and-innovation.ec.europa.eu/en/statistics/performance-indicators/european-innovation-scoreboard/eis-2024#/ris/countries/BE?region=BE2</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Busch</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hoffmann</surname><given-names>L</given-names> </name><name name-style="western"><surname>Truhn</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Global cross-sectional student survey on AI in medical, dental, and veterinary education and practice at 192 faculties</article-title><source>BMC Med Educ</source><year>2024</year><month>09</month><day>28</day><volume>24</volume><issue>1</issue><fpage>1066</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-06035-4</pub-id><pub-id pub-id-type="medline">39342231</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Civaner</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Uncu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Bulut</surname><given-names>F</given-names> </name><name name-style="western"><surname>Chalil</surname><given-names>EG</given-names> </name><name name-style="western"><surname>Tatli</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence in medical education: a cross-sectional needs assessment</article-title><source>BMC Med Educ</source><year>2022</year><month>11</month><day>9</day><volume>22</volume><issue>1</issue><fpage>772</fpage><pub-id pub-id-type="doi">10.1186/s12909-022-03852-3</pub-id><pub-id pub-id-type="medline">36352431</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weidener</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence in medicine: cross-sectional study among medical students on application, education, and ethical aspects</article-title><source>JMIR Med Educ</source><year>2024</year><month>01</month><day>5</day><volume>10</volume><fpage>e51247</fpage><pub-id pub-id-type="doi">10.2196/51247</pub-id><pub-id pub-id-type="medline">38180787</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weidener</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence teaching as part of medical education: qualitative analysis of expert interviews</article-title><source>JMIR Med Educ</source><year>2023</year><month>04</month><day>24</day><volume>9</volume><fpage>e46428</fpage><pub-id pub-id-type="doi">10.2196/46428</pub-id><pub-id pub-id-type="medline">36946094</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>&#x0130;&#x00E7;en</surname><given-names>M</given-names> </name></person-group><article-title>The future of education utilizing artificial in Turkey</article-title><source>Humanit Soc Sci Commun</source><year>2022</year><volume>9</volume><issue>1</issue><fpage>268</fpage><pub-id pub-id-type="doi">10.1057/s41599-022-01284-4</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="web"><article-title>Health workforce capacity</article-title><source>For a healthy Belgium</source><year>2024</year><month>05</month><access-date>2025-11-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.healthybelgium.be/en/health-system-performance-assessment/sustainability-of-the-health-system/health-workforce-capacity">https://www.healthybelgium.be/en/health-system-performance-assessment/sustainability-of-the-health-system/health-workforce-capacity</ext-link></comment></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Singla</surname><given-names>A</given-names> </name></person-group><article-title>The state of AI in 2025: agents, innovation, and transformation</article-title><source>McKinsey &#x0026; Company</source><year>2024</year><access-date>2025-11-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.mckinsey.com/capabilities/quantumblack/our-insights/the-state-of-ai">https://www.mckinsey.com/capabilities/quantumblack/our-insights/the-state-of-ai</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Standaert</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lecocq</surname><given-names>C</given-names> </name><name name-style="western"><surname>Andries</surname><given-names>P</given-names> </name><name name-style="western"><surname>Evens</surname><given-names>T</given-names> </name></person-group><article-title>AI Barometer. Adoption and use of artificial intelligence in Flemish companies. Situation 2023</article-title><year>2024</year><access-date>2025-11-19</access-date><publisher-name>Department of Economics, Science and Innovation</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.vlaanderen.be/publicaties/ai-barometer-adoptie-en-gebruik-van-artificiele-intelligentie-bij-vlaamse-bedrijven-situatie-2023">https://www.vlaanderen.be/publicaties/ai-barometer-adoptie-en-gebruik-van-artificiele-intelligentie-bij-vlaamse-bedrijven-situatie-2023</ext-link></comment></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Geraert</surname><given-names>F</given-names> </name><name name-style="western"><surname>Debroey</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hannon</surname><given-names>E</given-names> </name><name name-style="western"><surname>Vansteenkiste</surname><given-names>S</given-names> </name><name name-style="western"><surname>Boey</surname><given-names>R</given-names> </name></person-group><article-title>Monitoring report 2024: Individual training participation and training efforts of employers in Flanders</article-title><access-date>2025-11-19</access-date><publisher-name>Steunpunt Werk</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.vlaanderen.be/kennisplatform-werk-en-sociale-economie/bijdragen-van-het-kennisplatform/monitoringsrapport-2024-individuele-opleidingsdeelname-en-de-opleidingsinspanningen-van-werkgevers-in-vlaanderen">https://www.vlaanderen.be/kennisplatform-werk-en-sociale-economie/bijdragen-van-het-kennisplatform/monitoringsrapport-2024-individuele-opleidingsdeelname-en-de-opleidingsinspanningen-van-werkgevers-in-vlaanderen</ext-link></comment></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Pouliakas</surname><given-names>K</given-names> </name><name name-style="western"><surname>Becuwe</surname><given-names>N</given-names> </name></person-group><article-title>AI and the EU skilling challenge: first insights from cedefop&#x2019;s AI skills survey</article-title><access-date>2025-11-19</access-date><conf-name>16th Cedefop Brussels Seminar</conf-name><conf-date>Jun 24, 2024</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cedefop.europa.eu/files/ai_and_the_eu_skilling_challenge_first_insights_from_cedefops_ai_skills_survey_-_konstantinos_pouliakas_cedefop_and_nicolas_becuwe_verian.pdf">https://www.cedefop.europa.eu/files/ai_and_the_eu_skilling_challenge_first_insights_from_cedefops_ai_skills_survey_-_konstantinos_pouliakas_cedefop_and_nicolas_becuwe_verian.pdf</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><article-title>Document 32024R1689</article-title><source>EUR-Lex: European Union</source><year>2024</year><access-date>2025-12-03</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX%3A32024R1689">https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX%3A32024R1689</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><month>01</month><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Low</surname><given-names>J</given-names> </name></person-group><article-title>A pragmatic definition of the concept of theoretical saturation</article-title><source>Sociol Focus</source><year>2019</year><month>04</month><day>3</day><volume>52</volume><issue>2</issue><fpage>131</fpage><lpage>139</lpage><pub-id pub-id-type="doi">10.1080/00380237.2018.1544514</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hutchinson</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Sutherland</surname><given-names>MA</given-names> </name></person-group><article-title>Conducting surveys with multidisciplinary health care providers: current challenges and creative approaches to sampling, recruitment, and data collection</article-title><source>Res Nurs Health</source><year>2019</year><month>12</month><volume>42</volume><issue>6</issue><fpage>458</fpage><lpage>466</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/toc/1098240x/42/6">https://onlinelibrary.wiley.com/toc/1098240x/42/6</ext-link></comment><pub-id pub-id-type="doi">10.1002/nur.21976</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Georgios</surname><given-names>C</given-names> </name><name name-style="western"><surname>Barra&#x00ED;</surname><given-names>H</given-names> </name></person-group><article-title>Social innovation in rural governance: a comparative case study across the marginalised rural EU</article-title><source>J Rural Stud</source><year>2023</year><month>04</month><volume>99</volume><fpage>193</fpage><lpage>203</lpage><pub-id pub-id-type="doi">10.1016/j.jrurstud.2021.06.004</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chatzichristos</surname><given-names>G</given-names> </name></person-group><article-title>Qualitative research in the era of AI: a return to positivism or a new paradigm?</article-title><source>Int J Qual Methods</source><year>2025</year><month>04</month><volume>24</volume><pub-id pub-id-type="doi">10.1177/16094069251337583</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Daniyal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Qureshi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Marzo</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Aljuaid</surname><given-names>M</given-names> </name><name name-style="western"><surname>Shahid</surname><given-names>D</given-names> </name></person-group><article-title>Exploring clinical specialists&#x2019; perspectives on the future role of AI: evaluating replacement perceptions, benefits, and drawbacks</article-title><source>BMC Health Serv Res</source><year>2024</year><month>05</month><day>9</day><volume>24</volume><issue>1</issue><fpage>587</fpage><pub-id pub-id-type="doi">10.1186/s12913-024-10928-x</pub-id><pub-id pub-id-type="medline">38725039</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Webb</surname><given-names>M</given-names> </name></person-group><article-title>The impact of artificial intelligence on the labor market</article-title><source>SSRN Journal</source><year>2019</year><access-date>2025-11-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ssrn.com/abstract=3482150">https://ssrn.com/abstract=3482150</ext-link></comment><pub-id pub-id-type="doi">10.2139/ssrn.3482150</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Krueger</surname><given-names>R</given-names> </name><name name-style="western"><surname>Cassey</surname><given-names>MA</given-names> </name></person-group><source>Focus Groups A Practical Guide for Applied Research</source><year>2015</year><edition>5</edition><publisher-name>SAGE</publisher-name></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Lane</surname><given-names>M</given-names> </name><name name-style="western"><surname>Saint-Martin</surname><given-names>A</given-names> </name></person-group><article-title>The impact of artificial intelligence on the labour market</article-title><year>2021</year><publisher-name>OECD</publisher-name></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coetzer</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Goedhart</surname><given-names>NS</given-names> </name><name name-style="western"><surname>Schuitmaker-Warnaar</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Dedding</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zuiderent-Jerak</surname><given-names>T</given-names> </name></person-group><article-title>Health equity in the digital age: exploring health policy and inclusive digital care</article-title><source>Health Policy Technol</source><year>2025</year><month>09</month><volume>14</volume><issue>5</issue><fpage>101039</fpage><pub-id pub-id-type="doi">10.1016/j.hlpt.2025.101039</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Tolley</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mc Ardle</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Recommendations to advance digital health equity: a systematic review of qualitative studies</article-title><source>NPJ Digit Med</source><year>2024</year><month>06</month><day>29</day><volume>7</volume><issue>1</issue><fpage>173</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01177-7</pub-id><pub-id pub-id-type="medline">38951666</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eriksen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ebbesen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Eriksen</surname><given-names>KT</given-names> </name><etal/></person-group><article-title>Equity in digital healthcare &#x2013; the case of Denmark</article-title><source>Front Public Health</source><year>2023</year><volume>11</volume><pub-id pub-id-type="doi">10.3389/fpubh.2023.1225222</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richardson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lawrence</surname><given-names>K</given-names> </name><name name-style="western"><surname>Schoenthaler</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Mann</surname><given-names>D</given-names> </name></person-group><article-title>A framework for digital health equity</article-title><source>NPJ Digit Med</source><year>2022</year><month>08</month><day>18</day><volume>5</volume><issue>1</issue><fpage>119</fpage><pub-id pub-id-type="doi">10.1038/s41746-022-00663-0</pub-id><pub-id pub-id-type="medline">35982146</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="web"><article-title>National institute for sickness and disability insurance</article-title><source>Riziv</source><access-date>2025-11-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.riziv.fgov.be">https://www.riziv.fgov.be</ext-link></comment></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="web"><article-title>Labour force survey: publication with detailed figures 2024</article-title><source>Statbel</source><year>2023</year><access-date>2025-11-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://statbel.fgov.be">https://statbel.fgov.be</ext-link></comment></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="web"><article-title>Statistics</article-title><source>Riziv</source><year>2023</year><access-date>2025-11-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.riziv.fgov.be/nl/statistieken/statistieken-geneeskundige-verzorging/statistieken-2023">https://www.riziv.fgov.be/nl/statistieken/statistieken-geneeskundige-verzorging/statistieken-2023</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Mapping of focus group themes to illustrative quotation.</p><media xlink:href="jmir_v27i1e76709_app1.docx" xlink:title="DOCX File, 15 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>COREQ checklist</p><media xlink:href="jmir_v27i1e76709_app2.pdf" xlink:title="PDF File, 358 KB"/></supplementary-material><supplementary-material id="app3"><label>Checklist 2</label><p>CHERRIES checklist</p><media xlink:href="jmir_v27i1e76709_app3.pdf" xlink:title="PDF File, 332 KB"/></supplementary-material></app-group></back></article>