<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e77501</article-id><article-id pub-id-type="doi">10.2196/77501</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Listening to Patients&#x2019; Voices on the Use of AI in Health Care: Cross-Sectional Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Chandrasekaran</surname><given-names>Ranganathan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Takale</surname><given-names>Lavanya</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Moustakas</surname><given-names>Evangelos</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Information &#x0026; Decision Sciences, University of Illinois Chicago</institution><addr-line>2428 Univ Hall, 601 S Morgan Street</addr-line><addr-line>Chicago</addr-line><addr-line>IL</addr-line><country>United States</country></aff><aff id="aff2"><institution>Department of Communication and Media, Canadian University of Dubai</institution><addr-line>Dubai</addr-line><country>United Arab Emirates</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Hungbo</surname><given-names>Akonasu</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Obianyo</surname><given-names>Chekwube</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Shang</surname><given-names>Di</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Singh</surname><given-names>Reenu</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Baig</surname><given-names>Saad Ilyas</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ranganathan Chandrasekaran, PhD, Department of Information &#x0026; Decision Sciences, University of Illinois Chicago, 2428 Univ Hall, 601 S Morgan Street, Chicago, IL, 60607, United States, 1 3129962847; <email>ranga@uic.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>5</day><month>12</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e77501</elocation-id><history><date date-type="received"><day>14</day><month>05</month><year>2025</year></date><date date-type="rev-recd"><day>11</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>11</day><month>11</month><year>2025</year></date></history><copyright-statement>&#x00A9; Ranganathan Chandrasekaran, Lavanya Takale, Evangelos Moustakas. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 5.12.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e77501"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI) holds great promise in transforming health care delivery. However, successful implementation of AI projects in health care depends on patients&#x2019; acceptance and trust. There is limited empirical research examining public perceptions, particularly the use of personal health data in AI applications in health care.</p></sec><sec><title>Objective</title><p>This study examined public knowledge and comfort levels with AI use in health care, including use of personal health data with and without consent, and assessed how sociodemographic factors, digital literacy, and health conditions influence these perceptions.</p></sec><sec sec-type="methods"><title>Methods</title><p>We analyzed data from 6904 Canadian adults who participated in the 2023 Canadian Digital Health Survey. AI-related knowledge and comfort levels were measured using ordinal scales. Sociodemographic characteristics, digital health literacy, and self-reported chronic health conditions were included as predictors. Ordinal logistic regression models were used to assess associations between these factors and AI-related attitudes.</p></sec><sec sec-type="results"><title>Results</title><p>A majority of 2919 (42.3%) reported moderate knowledge of AI; only 7.8% (542) described themselves as very knowledgeable. Overall, 44.6% were comfortable with AI use in health care, increasing to 64.7% when personal health data were used with consent but decreasing when used without consent (52.6% uncomfortable). Respondents were most comfortable with AI use for epidemic tracking and workflow management and less for clinical tasks. Fully weighted ordinal logistic regression models indicated that men (odds ratio [OR]=1.57, <italic>P</italic>&#x003C;.001), noncitizens (OR=1.71, <italic>P</italic>&#x003C;.001), higher-income respondents (OR=1.29, <italic>P</italic>&#x003C;.001), those with graduate education (OR=1.43, <italic>P</italic>&#x003C;.001), higher digital health literacy (OR=1.08, <italic>P</italic>&#x003C;.001), and more chronic conditions (OR=1.08, <italic>P</italic>&#x003C;.001) exhibited greater odds of reporting higher AI knowledge. For comfort with AI use in health care, those aged 65+ years (OR=1.47, <italic>P</italic>&#x003C;.001), men (OR=1.50, <italic>P</italic>&#x003C;.001), noncitizens (OR=1.49, <italic>P</italic>&#x003C;.001), higher-income respondents (OR=1.21, <italic>P</italic>&#x003C;.001), and those with higher digital health literacy (OR=1.06, <italic>P</italic>&#x003C;.001) or more chronic conditions (OR=1.04, <italic>P</italic>=.04) exhibited greater comfort. Lower-income (OR=0.87, <italic>P</italic>=.03) and White respondents (OR=0.77, <italic>P</italic>&#x003C;.001) reported lower comfort levels. For comfort with using personal health data in AI with consent, adults aged 35&#x2010;54 years (OR=0.72, <italic>P</italic>&#x003C;.001) were less comfortable than those aged 16&#x2010;24 years. Men (OR=1.39, <italic>P</italic>&#x003C;.001), higher-income respondents (OR=1.16, <italic>P</italic>=.01), and those with higher digital health literacy (OR=1.05, <italic>P</italic>&#x003C;.001) or more chronic conditions (OR=1.07, <italic>P</italic>&#x003C;.001) showed greater comfort; White (OR=0.78, <italic>P</italic>&#x003C;.001), other racial groups (OR=0.77, <italic>P</italic>=.03), and lower-income respondents were less comfortable (OR=0.83, <italic>P</italic>=.01). For comfort with using personal health data in AI without consent, men (OR=1.56, <italic>P</italic>&#x003C;.001), noncitizens (OR=1.28, <italic>P</italic>=.03), and those with higher digital health literacy (OR=1.04, <italic>P</italic>&#x003C;.001) exhibited greater comfort. Lower-income respondents (OR=0.86, <italic>P</italic>=.02), adults aged 35&#x2010;54 years (OR=0.73, <italic>P</italic>&#x003C;.001) or 55&#x2010;64 years (OR=0.77, <italic>P</italic>=.01), and White (OR=0.69, <italic>P</italic>&#x003C;.001) and Black or African-origin (OR=0.71, <italic>P</italic>=.02) respondents reported lower comfort levels.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The findings point to enhancing transparent policies, digital literacy, and ethical data governance as key to increasing public trust in AI-driven health care.</p></sec></abstract><kwd-group><kwd>patient attitudes</kwd><kwd>artificial Intelligence</kwd><kwd>technology acceptance</kwd><kwd>responsible AI</kwd><kwd>survey</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Artificial intelligence (AI) in health care broadly refers to the use of advanced computational techniques and algorithms, including machine learning, deep learning, natural language processing, large language and image models, and computer vision to extract insights from complex medical data and enhance clinical decision-making [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. By augmenting human expertise with data-driven insights, AI has the potential to revolutionize multiple aspects of health care delivery, from early disease detection [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>] and drug discovery [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>] to improving operational efficiency and resource allocation in health care systems [<xref ref-type="bibr" rid="ref7">7</xref>]. Advanced generative AI models can help analyze a variety of digital content, including clinical images, videos, text, and audio, as well as clinical data from electronic health records [<xref ref-type="bibr" rid="ref8">8</xref>]. The global AI in health care market is projected to grow at a compound annual growth rate of 36.83% from 2024 to 2034, increasing from US $26.69 billion to US $613.81 billion [<xref ref-type="bibr" rid="ref9">9</xref>], signaling a significant shift in how health care is delivered and managed. This rapid integration of AI into clinical practice has generated both excitement and concern among health care professionals, policymakers, and patients. As AI becomes more prevalent in clinical settings, there is a growing concern about its unintended consequences. AI could deepen the digital divide and increase disparities, especially among older adults, low-income groups, and rural communities [<xref ref-type="bibr" rid="ref10">10</xref>]. Further, the ethical and regulatory challenges surrounding the use of AI in health care continue to be widely debated [<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>While the transformative potential of AI in health care is well understood, its expanded adoption raises important questions about its impact on patient care, equity, and ethics. Patients, as the ultimate beneficiaries of AI-driven health care solutions, play a critical role in shaping how AI technologies are designed, implemented, and trusted. However, patient perspectives on AI remain underexplored. Understanding these perspectives is important for many reasons. First, it ensures patient trust and acceptance, which are essential for successful implementation of AI-based health care solutions [<xref ref-type="bibr" rid="ref12">12</xref>]. Second, it helps address ethical concerns and potential biases in AI algorithms, which could lead to unequal access to care or diagnostic errors [<xref ref-type="bibr" rid="ref13">13</xref>]. Third, a good understanding of patients&#x2019; views on AI allows for tailoring AI solutions that align with patient needs and preferences, ultimately improving health outcomes and satisfaction [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>Despite the importance of understanding patient perspectives on AI use in clinical practice, research in this area remains limited. While several studies have explored health care professionals&#x2019; attitudes toward AI [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref22">22</xref>], comparatively little attention has been given to patients&#x2019; attitudes and concerns. Some studies have reported a generally positive patient attitude toward the use of AI in health care [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>], though they also highlight concerns about privacy and control over personal health data. Other studies have documented patient resistance [<xref ref-type="bibr" rid="ref25">25</xref>], distrust in AI [<xref ref-type="bibr" rid="ref26">26</xref>], and a preference for restricting AI use to nonclinical tasks such as administrative or scheduling functions [<xref ref-type="bibr" rid="ref27">27</xref>]. In addition, studies have also identified patient apprehensions due to perceived safety risks, threats to patient autonomy, potential increases in health care costs, algorithmic biases, and data security issues [<xref ref-type="bibr" rid="ref28">28</xref>].</p><p>This study examines patients&#x2019; knowledge levels regarding AI in health care, their comfort with AI use across clinical applications, and their attitudes toward the use of personal health information for AI purposes. Our research framework is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>. We also explore how sociodemographic characteristics, digital health literacy, and health conditions are associated with patient attitudes and comfort levels. Specifically, we address two research questions: (1) What are the levels of public knowledge and comfort with AI in health care, including the use of personal health data with and without consent? and (2) How do sociodemographic factors, digital health literacy, and chronic health conditions influence these attitudes? By addressing these questions, this study aims to fill critical gaps in understanding patient perspectives and to inform ethical, equitable, and effective implementation of AI solutions in health care.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Research framework. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e77501_fig01.png"/></fig></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Dataset</title><p>Data used in this study were obtained from the 2023 Canadian Digital Health Survey (CDHS) that was commissioned by Canada Health Infoway to assess Canadians&#x2019; experiences and perceptions regarding digital health services, including the use of AI in health care [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. The web-based survey was administered by Leger, one of Canada&#x2019;s leading market research firms, between November 28 and December 28, 2023. Participants, aged 16 years and older, were recruited from Leger Opinion&#x2019;s nationally representative online panel using computer-assisted web interviewing technology. A total of 10,130 respondents participated in the survey, which was available in both English and French.</p></sec><sec id="s2-2"><title>Ethical Considerations</title><p>The 2023 Canadian Digital Health Survey obtained informed consent from its 10,130 participants and adhered to the public opinion research standards of the Canadian Research and Insights Council and the global ESOMAR (European Society for Opinion and Marketing Research) network to ensure methodological rigor and data quality [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. Information about respondents was deidentified and anonymized to protect privacy and confidentiality. Patients provided consent for data collection and evaluation.</p></sec><sec id="s2-3"><title>Variables</title><p>The survey assessed four key variables related to patients&#x2019; perceptions of AI in health care using 4-point ordinal scales. To evaluate participants&#x2019; understanding of AI, they were asked to rate their knowledge on a scale from 1 (not at all knowledgeable) to 4 (very knowledgeable). The question &#x201C;How comfortable are you with AI being used as a tool in health care?&#x201D; was used to assess participants&#x2019; comfort level on a scale ranging from 1 (very uncomfortable) to 4 (very comfortable). A similar approach was used to assess attitudes toward the use of personal health data in AI research. Participants were asked how comfortable they felt about scientists using their personal health data for AI research when informed consent was provided, using the same 4-point scale. To examine privacy concerns, the survey also asked about comfort levels regarding AI research using deidentified health data without explicit consent. Participants were also asked about their comfort levels in applying AI in 7 areas: monitoring and predicting health conditions, decision support for health care professionals, precision medicine, drug and vaccine development, disease monitoring at home, tracking epidemics, and optimizing health care workflows.</p><p>Participants were asked to self-report any serious or chronic health conditions diagnosed by a health professional. The survey defined chronic illness as a condition expected to last, or already lasting, 6 months or more. Respondents could choose from a predefined list of 15 chronic conditions: chronic pain, cancer, diabetes, cardiovascular disease, Alzheimer&#x2019;s disease, developmental disabilities, obesity, mental health conditions, and physical or sensory disabilities. Additionally, participants had the option to specify any other chronic illness not listed or indicate no chronic illness. We calculated a composite score representing the total number of chronic conditions reported by each respondent.</p><p>Digital health literacy was assessed using 8 items from eHealth Literacy Scale [<xref ref-type="bibr" rid="ref32">32</xref>], which measures the ability to find, evaluate, and use health information on the internet. Items were rated on a 5-point Likert scale (1=strongly disagree to 5=strongly agree). After confirming their convergence and reliability using exploratory principal component analysis with varimax rotation (which extracted one factor, confirming unidimensionality) and Cronbach &#x03B1; (0.934), responses were summed to create a digital health literacy score, reflecting a respondent&#x2019;s overall proficiency in navigating and utilizing online health resources. The following sociodemographic variables were also captured in the survey: age, sex, annual household income, citizenship, race, educational attainment, and employment status.</p></sec><sec id="s2-4"><title>Analytic Sample and Nonresponse Bias</title><p>CDHS collected data from 10,130 Canadian adults on a broad range of topics related to digital health. Given this study&#x2019;s focus on AI use in health care, only 6904 respondents who provided complete responses to AI-related questions were included in the analytic sample.</p><p>To assess potential selection or nonresponse bias, <italic>&#x03C7;</italic><sup>2</sup> tests were conducted to compare included and excluded respondents across all the sociodemographic variables: age, sex, income, education, race, employment, and citizenship. The &#x03C7;<sup>2</sup> tests revealed significant differences between the overall respondents and our analytic sample across five variables (age, sex, employment, education, and income; <italic>P</italic>&#x003C;.05), with our analytic sample overrepresenting males (53.2% vs 48.6%), individuals aged 25&#x2010;54 years (51.8% vs 48.6%), higher household incomes (35.7% above CAD 100,000 vs 33.6%), higher education levels (eg, graduate college or above: 39.7% vs 37.4%), and employed respondents (61.6% vs 58.6%).</p><p>To address this bias, we derived inverse probability weights (IPW) to adjust for the likelihood of inclusion in the analytic sample. The IPW values were estimated using a logistic regression model predicting inclusion based on sociodemographic variables. These weights were then combined with the original CDHS survey design weights (which account for sampling and nonresponse at the national level) to create a composite total weight. This combined weighting approach ensured that both survey design and sample selection bias were accounted for in all weighted analyses.</p></sec><sec id="s2-5"><title>Statistical Analysis</title><p>All statistical analyses were conducted using STATA 18 software. Descriptive statistics summarized respondent characteristics. To evaluate potential selection bias, the IPW was derived as described above.</p><p>Ordinal logistic regression models were estimated for four AI-related attitudinal outcomes: (1) knowledge of AI in health care, (2) comfort with use of AI in health care, (3) comfort with use of personal health data for AI with consent, and (4) comfort with use of personal health data for AI without consent. Each model was estimated three ways: unweighted, nonresponse-adjusted weighted (IPW), and fully weighted (combining IPW and CDHS-provided survey design weights) to evaluate robustness. All weighted models were estimated using survey (svy) commands in STATA to account for the complex survey design. Potential multicollinearity among predictors was assessed using Cramer&#x2019;s V for categorical variables and variance inflation factors from proxy linear regressions.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>The demographic profile of survey respondents is presented in <xref ref-type="table" rid="table1">Table 1</xref>. Our dataset had the majority of respondents aged 35&#x2010;54 years (2412, 34.94%), followed by those aged 65+ years (1585, 22.96%) and 55&#x2010;64 years (1211, 17.54%). There was a slight majority of male respondents (53.2%, 3673), with female respondents comprising 46.8% (3231) of the sample. Regarding household income, 4699 (68.06%) reported earnings of CAD 60,000 or more, while 2205 (31.94%) earned less. The majority were Canadian citizens (6581, 95.32%), with noncitizens (323, 4.68%) forming a smaller proportion. Racially, the sample had predominantly White respondents (5104, 73.93%), followed by Asian-origin (972, 14.08%), other (575, 8.33%), and Black or African-origin (253, 3.66%) respondents. Education levels varied, with most having at least some college education (2831, 41.01%) or a graduate degree (2738, 39.66%). Fewer had a high school diploma (1174, 17%) or less than high school education (161, 2.33%). Employment status showed that 4253 (61.6%) were employed.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Sociodemographic characteristics of respondents (n=6904).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Demographic variable and category</td><td align="left" valign="bottom">n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Age group (y)</td><td align="left" valign="top"/></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>16&#x2010;24</td><td align="left" valign="top">527 (7.63)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>25&#x2010;34</td><td align="left" valign="top">1169 (16.93)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>35&#x2010;54</td><td align="left" valign="top">2412 (34.94)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>55&#x2010;64</td><td align="left" valign="top">1211 (17.54)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>65+</td><td align="left" valign="top">1585 (22.96)</td></tr><tr><td align="left" valign="top">Sex</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">3231 (46.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">3673 (53.2)</td></tr><tr><td align="left" valign="top">Household income (CAD)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;60,000</td><td align="left" valign="top">2205 (31.94)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>60,000&#x2010;100,000</td><td align="left" valign="top">2237 (32.4)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;100,000</td><td align="left" valign="top">2462 (35.66)</td></tr><tr><td align="left" valign="top">Citizenship</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Citizen</td><td align="left" valign="top">6581 (95.32)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Noncitizen</td><td align="left" valign="top">323 (4.68)</td></tr><tr><td align="left" valign="top">Race</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Asian origin</td><td align="left" valign="top">972 (14.08)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black/African origin</td><td align="left" valign="top">253 (3.66)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">575 (8.33)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="top">5104 (73.93)</td></tr><tr><td align="left" valign="top">Education</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Less than high school</td><td align="left" valign="top">161 (2.33)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High school</td><td align="left" valign="top">1174 (17)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>College level</td><td align="left" valign="top">2831 (41.01)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Graduate college or above</td><td align="left" valign="top">2738 (39.66)</td></tr><tr><td align="left" valign="top">Employment</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Employed</td><td align="left" valign="top">4253 (61.6)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Unemployed</td><td align="left" valign="top">2651 (38.4)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>A currency exchange rate of CAD $1=approximately US $0.72 is applicable.</p></fn></table-wrap-foot></table-wrap><p>Our analysis found varying levels of knowledge levels and comfort regarding AI use in health care among respondents (<xref ref-type="fig" rid="figure2">Figure 2</xref>). While a majority (2919, 42.3%) reported being moderately knowledgeable about AI, only 7.8% (542) considered themselves very knowledgeable. Conversely, nearly half of the respondents (49.9%) considered themselves less knowledgeable, with 38.7% (2669) reporting they were &#x201C;not very knowledgeable&#x201D; and 11.2% (774) reporting &#x201C;not at all knowledgeable.&#x201D;</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Distribution of self-reported AI knowledge and comfort levels among Canadian adults (n=6904) in the 2023 Canadian Digital Health Survey. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e77501_fig02.png"/></fig><p>When it comes to AI use in health care, 44.6% (3077) of respondents reported being moderately comfortable, while 42.4% (2927) expressed some level of discomfort. Comfort levels increased when AI involved use of personal health data under informed consent, with 64.7% (4466, moderately or very comfortable) supporting such AI use. However, comfort levels declined when AI research used deidentified data without consent, with only 47.4% (3272) reporting comfort and 52.6% (3632) expressing discomfort. When asked about comfort levels pertaining to AI use in various health care areas (<xref ref-type="fig" rid="figure3">Figure 3</xref>), moderate comfort levels (40%&#x2010;47%) were observed across all areas. However, respondents expressed relatively greater support for AI use in tracking epidemics and optimizing health care workflows, where a higher proportion of respondents felt &#x201C;very comfortable&#x201D; compared to other areas.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Comfort levels with AI applications in specific health care areas among Canadian adults (n=6904) in the 2023 Canadian Digital Health Survey. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e77501_fig03.png"/></fig><p><xref ref-type="table" rid="table2">Table 2</xref> presents results from the fully weighted ordinal logistic regression models assessing associations between respondents&#x2019; self-reported levels of knowledge about AI and their sociodemographic characteristics, digital health literacy, and health conditions. These results were consistent with those from the unweighted and nonresponse-adjusted models, showing similar effect sizes and significance patterns (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Ordinal regression results (fully weighted): association between AI knowledge levels and sociodemographic factors, digital health literacy, and Health Conditions in 2023 Canadian Digital Health Survey.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Predictors and category</td><td align="left" valign="bottom">OR (95% CI)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Age group (ref: 16&#x2010;24 y)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>25&#x2010;34 y</td><td align="left" valign="top">0.69 (0.54&#x2010;0.87)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>35&#x2010;54 y</td><td align="left" valign="top">0.59 (0.47&#x2010;0.73)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>55&#x2010;64 y</td><td align="left" valign="top">0.44 (0.35&#x2010;0.56)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>65+ y</td><td align="left" valign="top">0.39 (0.31&#x2010;0.49)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Sex (ref: female)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">1.57 (1.42&#x2010;1.73)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Household income (ref: CAD 60,000&#x2010;100,000)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>CAD &#x003E;100,000</td><td align="left" valign="top">1.29 (1.14&#x2010;1.45)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>CAD &#x003C;60,000</td><td align="left" valign="top">1.07 (0.94&#x2010;1.22)</td><td align="left" valign="top">.34</td></tr><tr><td align="left" valign="top">Citizenship (ref: citizen)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Noncitizen</td><td align="left" valign="top">1.71 (1.32&#x2010;2.21)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Race (ref: Asian)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black/African origin</td><td align="left" valign="top">1.00 (0.74&#x2010;1.36)</td><td align="left" valign="top">.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">0.93 (0.72&#x2010;1.20)</td><td align="left" valign="top">.56</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="top">0.79 (0.68&#x2010;0.93)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Education (ref: college level)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Graduate college and higher</td><td align="left" valign="top">1.43 (1.27&#x2010;1.60)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High school</td><td align="left" valign="top">1.03 (0.89&#x2010;1.20)</td><td align="left" valign="top">.70</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;High school</td><td align="left" valign="top">0.97 (0.66&#x2010;1.41)</td><td align="left" valign="top">.87</td></tr><tr><td align="left" valign="top">Employment (ref: employed)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Unemployed</td><td align="left" valign="top">1.09 (0.95&#x2010;1.24)</td><td align="left" valign="top">.21</td></tr><tr><td align="left" valign="top">Digital health literacy</td><td align="left" valign="top">1.08 (1.07&#x2010;1.09)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Number of chronic conditions</td><td align="left" valign="top">1.08 (1.03&#x2010;1.12)</td><td align="left" valign="top">&#x003C;.001</td></tr></tbody></table></table-wrap><p>Age was a significant predictor, with respondents in older age groups exhibiting lower odds of having higher AI knowledge compared to those aged 16&#x2010;24 years: 25&#x2010;34 years (odds ratio [OR] 0.69, 95% CI 0.54&#x2010;0.87; <italic>P</italic>&#x003C;.001), 35&#x2010;54 years (OR 0.59, 95% CI 0.47&#x2010;0.73; <italic>P</italic>&#x003C;.001), 55&#x2010;64 years (OR 0.44, 95% CI 0.35&#x2010;0.56; <italic>P</italic>&#x003C;.001), and 65+ years (OR 0.39, 95% CI 0.31&#x2010;0.49; <italic>P</italic>&#x003C;.001). Men were significantly more likely to report higher AI knowledge than women (OR 1.57, 95% CI 1.42&#x2010;1.73; <italic>P</italic>&#x003C;.001).</p><p>Among socioeconomic factors, those with higher annual household incomes (CAD &#x003E;100,000) exhibited higher odds for greater AI knowledge (OR 1.29, 95% CI 1.14&#x2010;1.45; <italic>P</italic>&#x003C;.001), while those with lower incomes (CAD &#x003C;$60,000) showed no significant difference (OR 1.07, 95% CI 0.94&#x2010;1.22; <italic>P</italic>=.34). Noncitizens exhibited higher AI knowledge levels (OR 1.71, 95% CI 1.32&#x2010;2.21; <italic>P</italic>&#x003C;.001) compared to citizens. Race also was a significant factor, with White respondents exhibiting lower odds of AI knowledge (OR 0.79, 95% CI 0.68&#x2010;0.93; <italic>P</italic>&#x003C;.001) relative to Asian-origin respondents, while differences for Black or African-origin and Other groups were not statistically significant.</p><p>Education was another key predictor, with graduates showing significantly higher AI knowledge (OR 1.43, 95% CI 1.27&#x2010;1.60; <italic>P</italic>&#x003C;.001) compared to those with a college-level education, while respondents with only a high school education or less showed no significant difference. Employment status was not significantly associated with the odds of reporting higher AI knowledge.</p><p>Higher digital health literacy was strongly associated with increased AI knowledge (OR 1.08, 95% CI 1.07&#x2010;1.09; <italic>P</italic>&#x003C;.001). Additionally, respondents with more chronic health conditions had higher odds of reporting greater AI knowledge (OR 1.08, 95% CI 1.03&#x2010;1.12; <italic>P</italic>&#x003C;.001), suggesting that health experiences may influence awareness of AI applications.</p><p><xref ref-type="table" rid="table3">Table 3</xref> presents results from the fully weighted ordinal logistic regression models, each examining the association between respondents&#x2019; comfort levels with AI in health care, the use of personal health data for AI with and without consent, and key factors including sociodemographics, digital health literacy, and health conditions. Nonresponse weighted and unweighted models (Tables 4&#x2013;6 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) yielded results similar to the fully weighted analyses, supporting the sensitivity and robustness of the findings.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Ordinal regression results (fully weighted): associations between AI comfort levels, use of personal health data, and sociodemographic factors, digital health literacy, and health conditions in 2023 Canadian Digital Health Survey.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom"/><td align="left" valign="bottom" colspan="2">Model 1: Comfort level with the use of AI in health care</td><td align="left" valign="bottom" colspan="2">Model 2: Comfort level with the use of personal health data in AI with consent</td><td align="left" valign="bottom" colspan="2">Model 3: Comfort level with the use of personal health data in AI without consent</td></tr><tr><td align="left" valign="top">Predictor and category</td><td align="left" valign="top">OR (95% CI)</td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">OR (95% CI)</td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">OR (95% CI)</td><td align="left" valign="top"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Age group (years) (ref=16&#x2010;24)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>25&#x2010;34</td><td align="left" valign="top">1.00 (0.80&#x2010;1.25)</td><td align="left" valign="top">.99</td><td align="left" valign="top">0.83 (0.67&#x2010;1.04)</td><td align="left" valign="top">.10</td><td align="left" valign="top">0.83 (0.68&#x2010;1.03)</td><td align="left" valign="top">.09</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>35&#x2010;54</td><td align="left" valign="top">0.91 (0.74&#x2010;1.12)</td><td align="left" valign="top">.35</td><td align="left" valign="top">0.72 (0.59&#x2010;0.89)</td><td align="left" valign="top">.00</td><td align="left" valign="top">0.73 (0.60&#x2010;0.88)</td><td align="left" valign="top">.00</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>55&#x2010;64</td><td align="left" valign="top">1.02 (0.82&#x2010;1.28)</td><td align="left" valign="top">.84</td><td align="left" valign="top">0.93 (0.74&#x2010;1.15)</td><td align="left" valign="top">.49</td><td align="left" valign="top">0.77 (0.63&#x2010;0.95)</td><td align="left" valign="top">.01</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>65+</td><td align="left" valign="top">1.47 (1.17&#x2010;1.84)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.22 (0.97&#x2010;1.54)</td><td align="left" valign="top">.09</td><td align="left" valign="top">0.96 (0.78&#x2010;1.20)</td><td align="left" valign="top">.74</td></tr><tr><td align="left" valign="top">Sex (ref=female)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">1.50 (1.36&#x2010;1.65)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.39 (1.27&#x2010;1.53)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.56 (1.42&#x2010;1.71)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Household income (ref=60,000&#x2010;100,000)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;100,000</td><td align="left" valign="top">1.21 (1.08&#x2010;1.37)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.16 (1.03&#x2010;1.30)</td><td align="left" valign="top">.01</td><td align="left" valign="top">1.05 (0.94&#x2010;1.18)</td><td align="left" valign="top">.37</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;60,000</td><td align="left" valign="top">0.87 (0.77&#x2010;0.99)</td><td align="left" valign="top">.03</td><td align="left" valign="top">0.83 (0.74&#x2010;0.95)</td><td align="left" valign="top">.01</td><td align="left" valign="top">0.86 (0.76&#x2010;0.97)</td><td align="left" valign="top">.02</td></tr><tr><td align="left" valign="top">Citizenship (ref=citizen)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Noncitizen</td><td align="left" valign="top">1.49 (1.18&#x2010;1.89)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.20 (0.96&#x2010;1.49)</td><td align="left" valign="top">.11</td><td align="left" valign="top">1.28 (1.02&#x2010;1.61)</td><td align="left" valign="top">.03</td></tr><tr><td align="left" valign="top">Race (ref=Asian)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black/African origin</td><td align="left" valign="top">0.96 (0.71&#x2010;1.28)</td><td align="left" valign="top">.76</td><td align="left" valign="top">0.78 (0.59&#x2010;1.02)</td><td align="left" valign="top">.07</td><td align="left" valign="top">0.71 (0.54&#x2010;0.94)</td><td align="left" valign="top">.02</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">0.78 (0.61&#x2010;1.00)</td><td align="left" valign="top">.05</td><td align="left" valign="top">0.77 (0.62&#x2010;0.97)</td><td align="left" valign="top">.03</td><td align="left" valign="top">0.83 (0.67&#x2010;1.04)</td><td align="left" valign="top">.11</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="top">0.77 (0.66&#x2010;0.89)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">0.78 (0.68&#x2010;0.90)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">0.69 (0.60&#x2010;0.80)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Education (ref=college level)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Graduate college and higher</td><td align="left" valign="top">1.29 (1.15&#x2010;1.44)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.25 (1.12&#x2010;1.40)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.08 (0.97&#x2010;1.21)</td><td align="left" valign="top">.15</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High school</td><td align="left" valign="top">0.90 (0.77&#x2010;1.05)</td><td align="left" valign="top">.17</td><td align="left" valign="top">0.89 (0.76&#x2010;1.03)</td><td align="left" valign="top">.11</td><td align="left" valign="top">0.90 (0.78&#x2010;1.04)</td><td align="left" valign="top">.15</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003C;High school</td><td align="left" valign="top">0.88 (0.62&#x2010;1.23)</td><td align="left" valign="top">.44</td><td align="left" valign="top">1.08 (0.76&#x2010;1.53)</td><td align="left" valign="top">.66</td><td align="left" valign="top">0.75 (0.54&#x2010;1.05)</td><td align="left" valign="top">.09</td></tr><tr><td align="left" valign="top">Employment Status (Ref= Employed)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Unemployed</td><td align="left" valign="top">1.01 (0.88&#x2010;1.14)</td><td align="left" valign="top">.94</td><td align="left" valign="top">1.06 (0.93&#x2010;1.21)</td><td align="left" valign="top">.38</td><td align="left" valign="top">0.89 (0.78&#x2010;1.01)</td><td align="left" valign="top">.07</td></tr><tr><td align="left" valign="top">Digital health literacy</td><td align="left" valign="top">1.06 (1.05&#x2010;1.07)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.05 (1.04&#x2010;1.06)</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">1.04 (1.03&#x2010;1.05)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Number of chronic conditions</td><td align="left" valign="top">1.04 (1.00&#x2010;1.08)</td><td align="left" valign="top">.04</td><td align="left" valign="top">1.07 (1.03&#x2010;1.11)</td><td align="left" valign="top">.00</td><td align="left" valign="top">1.03 (0.99&#x2010;1.07)</td><td align="left" valign="top">.12</td></tr></tbody></table></table-wrap><p>Age showed significant association with comfort levels with AI use in health care. In Model 1, older adults aged 65+ years exhibited higher odds of greater comfort with AI in health care (OR 1.47, 95% CI 1.17&#x2010;1.84; <italic>P</italic>&#x003C;.001) compared to respondents aged 16&#x2010;24 years. In Model 2, respondents aged 35&#x2010;54 years (OR 0.72, 95% CI 0.59&#x2010;0.89; <italic>P</italic>&#x003C;.001) exhibited lower odds of comfort when personal health data were used in AI with consent, while other age groups showed no statistically significant difference. In Model 3, comfort declined further among respondents aged 35&#x2010;54 years (OR 0.73, 95% CI 0.60&#x2010;0.88; <italic>P</italic>&#x003C;.001) and 55&#x2010;64 years (OR 0.77, 95% CI 0.63&#x2010;0.95; <italic>P</italic>=.01) when personal health data were used in AI without consent, suggesting greater sensitivity to consent among middle-aged adults.</p><p>Sex was a consistent predictor across all three models, with men exhibiting higher odds of greater comfort of AI use in health care than women (Model 1: OR 1.50, 95% CI 1.36&#x2010;1.65; <italic>P</italic>&#x003C;.001; Model 2: OR 1.39, 95% CI 1.27&#x2010;1.53; <italic>P</italic>&#x003C;.001; Model 3: OR 1.56, 95% CI 1.42&#x2010;1.71; <italic>P</italic>&#x003C;.001), indicating that men consistently report greater comfort with AI use in health when personal health data were used irrespective of consent.</p><p>Respondents with higher annual household incomes (above CAD 100,000) were significantly more likely to be comfortable with AI use in health care (OR 1.21, 95% CI 1.08&#x2010;1.37; <italic>P</italic>&#x003C;.001) and with the use of personal data with consent (OR 1.16, 95% CI 1.03&#x2010;1.30; <italic>P</italic>=.01) when compared to those earning between CAD 60,000 and 100,000. Further, lower-income respondents (CAD &#x003C;60,000) consistently reported lower comfort levels across all three models (OR range 0.83&#x2010;0.87, <italic>P</italic>&#x003C;.05), suggesting that financial disparities may influence attitudes toward AI in health care.</p><p>We also found citizenship status in Canada to be a significant predictor in 2 out of 3 models. Noncitizens exhibited higher odds of comfort with AI use in health care (OR 1.49, 95% CI 1.18&#x2010;1.89; <italic>P</italic>&#x003C;.001) and when personal health data were used without consent (OR 1.28, 95% CI 1.02&#x2010;1.61; <italic>P</italic>=.03), though the association was not significant in the with-consent model (OR 1.20, 95% CI 0.96&#x2010;1.49; <italic>P</italic>=.11). Overall, these findings suggest that noncitizens may perceive AI applications in health care more positively than citizens and are likely to exhibit more comfort level in personal health data being used for AI applications in healthcare irrespective of the consent.</p><p>Compared with Asian-origin respondents, White respondents exhibited lower odds of comfort across all models (OR range=0.69&#x2010;0.78, <italic>P</italic>&#x003C;.001). Those identifying as &#x201C;Other&#x201D; racial groups also had lower odds in Models 1 and 2 (OR range=0.77&#x2010;0.78, <italic>P</italic>&#x003C;.05). For Black or African-origin respondents, the association was significant only in Model 3 (OR 0.71, 95% CI 0.54&#x2010;0.94; <italic>P</italic>=.02), indicating reduced comfort when personal health data were used without consent.</p><p>Our analysis also showed higher educational attainment to be positively associated with comfort with AI use in health care and when personal health data were used with consent. Respondents with graduate-level or more education were significantly more comfortable (Model 1: OR 1.29, 95% CI 1.15&#x2010;1.44; <italic>P</italic>&#x003C;.001; Model 2: OR 1.25, 95% CI 1.12&#x2010;1.40; <italic>P</italic>&#x003C;.001). Those with only a high school education or lower did not show significant differences compared to the reference group (college-level education).</p><p>Digital health literacy emerged as a strong and consistent predictor across all three models. Each one-unit increase in digital literacy was associated with a 4%&#x2010;6% increase in odds of greater comfort (Model 1: OR 1.06, 95% CI 1.05&#x2010;1.07; <italic>P</italic>&#x003C;.001; Model 2: OR 1.05, 95% CI 1.04&#x2010;1.06; <italic>P</italic>&#x003C;.001; Model 3: OR 1.04, 95% CI 1.03&#x2010;1.05; <italic>P</italic>&#x003C;.001), indicating that individuals with greater proficiency in using digital health tools were more comfortable with AI use in health care, both in general health care settings and when personal health data were involved.</p><p>The number of chronic health conditions was positively associated with comfort in Models 1 (OR 1.04, 95% CI 1.00&#x2010;1.08; <italic>P</italic>=.04) and 2 (OR 1.07, 95% CI 1.03&#x2010;1.11; <italic>P</italic>&#x003C;.001) but not in Model 3 (OR 1.03, 95% CI 0.99&#x2010;1.07; <italic>P</italic>=.12), suggesting that individuals with multiple chronic illnesses were more comfortable with AI use in health care, particularly when personal data were used with consent. Employment status was not significantly associated with AI comfort in any of the three models.</p></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>To our knowledge, this is one of the first few studies to examine public attitudes toward use of AI in health care, with specific focus on the influence of sociodemographic, digital health literacy, and health-related factors. Overall, study respondents reported mixed levels of knowledge about AI and a considerable proportion (42.39%) expressing discomfort with AI use in health care. When personal health data were used for AI solutions with consent, the proportion of individuals becoming comfortable with AI use increased (64.69%), and when AI applications used personal deidentified health data without consent, a higher proportion (52.61%) expressed discomfort. A relatively higher proportion of respondents expressed greater comfort when AI was used in nonclinical areas like tracking epidemics and for improving healthcare workflows.</p><p>We found significant variations in knowledge levels of AI and comfort levels pertaining to AI use in health care based on sociodemographic, digital literacy, and the number of health conditions. Our results indicate that men, noncitizens, higher-income respondents, and respondents with greater digital health literacy exhibited higher odds of reporting comfort with AI use in health care and with the use of personal health data for AI. Older adults (65+ y) demonstrated higher comfort with AI use in health care, while younger (25&#x2013;34 y) and middle-aged (35&#x2013;54 y) adults were less comfortable with AI using their personal data, especially without consent.</p><p>Compared to Asian-origin respondents, White and Other racial groups had significantly lower comfort levels across models, while Black or African-origin respondents were notably less comfortable, only when personal health data were used for AI applications without consent. This finding suggests that lower comfort among Black respondents is not a general discomfort with AI but rather a heightened sensitivity to nonconsensual data use, thus underscoring the critical importance of transparency and opt-in data policies to foster trust among minority groups. Our findings also indicated that noncitizens exhibited higher comfort levels with AI use in health care as compared to Canadian citizens. The observed higher comfort among Asian-origin and noncitizen respondents and lower comfort among Black respondents suggests that broader cultural or experiential factors may influence attitudes toward AI in health care. Future qualitative or mixed-methods studies are needed to explore these factors. Prior research has shown that historical experiences, prior exposure to technology [<xref ref-type="bibr" rid="ref33">33</xref>], and privacy concerns shape levels of trust in health technologies, particularly among minority populations [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Additionally, there is also some evidence that suggests that willingness to share personal health data for AI use depends strongly on the institution collecting the data and its intended purpose [<xref ref-type="bibr" rid="ref24">24</xref>].</p><p>While digital health literacy improved comfort levels with use of AI, we also found that individuals with multiple health conditions were more accepting of AI when personal health data use was consensual. Individuals with multiple chronic conditions may have greater familiarity with varied health technologies and tools like wearables due to their increased prevalence [<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref39">39</xref>], promoting appreciation for AI&#x2019;s potential in managing complex conditions.</p></sec><sec id="s4-2"><title>Implications</title><p>One of the critical challenges in deploying AI solutions in health care is ensuring fairness and reducing algorithmic bias, which often arises from unrepresentative training datasets [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]. To build AI models that produce accurate, equitable, and generalizable outcomes, they must be trained on large, diverse, and high-quality datasets that reflect the complete range of patient demographics and health conditions [<xref ref-type="bibr" rid="ref43">43</xref>]. Our findings point to the importance of placing explicit patient consent at the core of all efforts in developing AI solutions.</p><p>Health care institutions and policymakers must establish standardized protocols for obtaining patient consent for AI use, ensuring that data collection aligns with ethical and legal frameworks (eg, HIPAA [Health Insurance Portability and Accountability Act] in USA, PIPEDA [Personal Information Protection and Electronic Documents Act] in Canada, and GDPR [General Data Protection Regulation] in Europe) [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. These policies must define whether patient data is being used for research, commercial development, or clinical decision-making. They must also clarify how long the data will be stored, who can access it, and whether patients have the right to withdraw consent at any time [<xref ref-type="bibr" rid="ref46">46</xref>]. Without well-defined guidelines, the risk of unauthorized data usage and breaches increases, undermining public confidence in health care AI.</p><p>Even when patients provide consent, strong privacy protections must be in place, particularly when data are pooled across multiple health systems. There is a growing concern about deidentification and whether anonymized health data can still be reidentified using advanced AI techniques [<xref ref-type="bibr" rid="ref47">47</xref>]. To mitigate these risks, health care institutions must implement privacy-preserving solutions, such as federated learning, where AI models are trained across decentralized data sources without transferring raw patient data [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. Blockchain-based consent management can offer a secure way for patients to track and manage their data access, while strict data governance frameworks are essential to ensure AI developers use health data more responsibly.</p><p>This study shows that comfort with AI in health care is strongly influenced by sociodemographic factors and digital literacy. Individuals who trust AI and understand how it works are more likely to support AI-driven health care applications, while those with privacy concerns or lower digital literacy may resist AI use in health care, specifically when personal health data is used without consent. Addressing these concerns through educational initiatives, transparent policies, and patient engagement strategies can help build public confidence in AI solutions in health care.</p><p>Our findings also indicate that respondents were significantly less comfortable with AI use when personal health data were used without explicit consent. This highlights a crucial ethical dilemma&#x2014;even if deidentified, patient data still carries risks if used without oversight or patient involvement. Future research and policy discussions should explore how much control patients should have over their deidentified data, what level of transparency AI developers must provide to patients, and how AI models trained on patient data should be evaluated for fairness and accountability. Algorithmic bias, as seen in lower comfort among certain racial groups like African Americans, especially when consent is absent, could exacerbate health care disparities if AI models are trained on unrepresentative datasets [<xref ref-type="bibr" rid="ref41">41</xref>]. Mitigating algorithmic bias through diverse dataset inclusion and continuous performance monitoring can help reduce disparities [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Accountability in clinical decision-making is critical to ensure that AI supports, rather than overrides, clinician judgment, while prioritizing patient autonomy in data use strengthens trust [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. These ethical challenges highlight the need for aligning AI deployment with patient expectations and equitable outcomes.</p><p>To build trustworthy AI-enabled health care solutions, policymakers and health administrators need to design targeted public awareness campaigns, co-developed with patient advocates, that clearly explain AI&#x2019;s clinical role and how it uses patient data [<xref ref-type="bibr" rid="ref54">54</xref>]. In addition, implementing opt-in consent policies [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>] can help alleviate patient fears about misuse of their data. Culturally tailored digital literacy programs can also help boost patient confidence about AI use in health care. Beyond patient engagement, standardized bias audits for all clinical AI tools [<xref ref-type="bibr" rid="ref52">52</xref>], while establishing patient review boards or advisory committees to gather patient feedback and assess ethical implications before deployment can be effective [<xref ref-type="bibr" rid="ref57">57</xref>].</p></sec><sec id="s4-3"><title>Limitations</title><p>This study has several important limitations. First, the cross-sectional design captures public attitudes at a single point in time. The survey was done at the end of 2023 in Canada, when new generational AI technologies were still emerging. As advances in AI technologies and public awareness evolve, attitudes may change, making longitudinal studies necessary. Second, the self-reported nature of the data may introduce bias. Respondents could have misestimated their AI knowledge and comfort levels due to social desirability. Additionally, self-reported digital literacy may not accurately reflect actual proficiency in digital health technologies. Third, the study explored respondent attitudes toward AI, without assessing whether they had any prior exposure to any AI health care tools, such as chatbots, etc. Fourth, though we had a fairly large sample size, it may not be representative of the general population, restricting the generalizability of results. Specifically, our sample included relatively small proportions of noncitizens (4.68%) and Black or African-origin respondents (3.66%), which reduces statistical power for these subgroups. Consequently, subgroup findings should be interpreted with caution. Fifth, our operationalization of chronic health conditions as a composite count is a measurement limitation. Different health conditions, based on their nature and severity, could influence varied levels of technological use and engagement. Future research could explore how specific health conditions, and their nature and severity, influence attitudes toward AI. We also acknowledge possible nonresponse bias, as over 3226 cases were removed due to missing answers on AI-related questions. This reduction in analytic sample size may have excluded individuals with systematically different attitudes toward AI. Although weighting adjustments were used to correct for this, some bias may remain, as the weighting cannot account for unobserved factors. For instance, respondents who systematically avoided answering AI-related questions may hold strong, unmeasured attitudes such as anxiety or mistrust toward AI, which could bias our analytic sample. Finally, while the study examined broad sociodemographic and health factors, it did not delve into more specific determinants of AI trust, such as ethical concerns, data security apprehensions, or past experiences with health care technologies. Future research could explore these in greater detail to better understand the specific reasons behind patient acceptance of AI in health care. Additional investigation through qualitative or mixed-method studies could throw light on specific nuances that shape the patient attitudes toward AI use in health care.</p></sec><sec id="s4-4"><title>Conclusions</title><p>In conclusion, this study documents moderate levels of knowledge and comfort levels of the public regarding AI use in health care. Further, it highlights how sociodemographic characteristics, digital literacy, and health conditions are associated with public knowledge and comfort levels regarding AI use in health care. Our findings suggest significant socioeconomic disparities around the comfort levels with AI use in health care, while concerns persist around AI use without patient consent. These findings highlight the importance of transparent policies, patient education, and ethical data governance to improve public trust in AI-driven health care.</p></sec></sec></body><back><notes><sec><title>Funding</title><p>The authors received no funding for this study.</p></sec><sec><title>Data Availability</title><p>All data and survey materials associated with this study are publicly available at the following sites [<xref ref-type="bibr" rid="ref30">30</xref>].</p></sec></notes><fn-group><fn fn-type="con"><p>RC and EM designed and conceptualized the study. LT preprocessed the survey data, and LT and RC performed data analysis. RC and EM wrote the manuscript. All authors had full access to the data and had final responsibility for the manuscript submitted for publication.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CDHS</term><def><p>Canadian Digital Health Survey</p></def></def-item><def-item><term id="abb3">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb4">HIPAA</term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb5">IPW</term><def><p>inverse probability weight</p></def></def-item><def-item><term id="abb6">OR</term><def><p>odds ratio</p></def></def-item><def-item><term id="abb7">PIPEDA</term><def><p>Personal Information Protection and Electronic Documents Act</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davenport</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kalakota</surname><given-names>R</given-names> </name></person-group><article-title>The potential for artificial intelligence in healthcare</article-title><source>Future Healthcare J</source><year>2019</year><month>06</month><volume>6</volume><issue>2</issue><fpage>94</fpage><lpage>98</lpage><pub-id pub-id-type="doi">10.7861/futurehosp.6-2-94</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bekbolatova</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mayer</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ong</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Toma</surname><given-names>M</given-names> </name></person-group><article-title>Transformative potential of AI in healthcare: definitions, applications, and navigating the ethical landscape and public perspectives</article-title><source>Healthcare (Basel)</source><year>2024</year><month>01</month><day>5</day><volume>12</volume><issue>2</issue><fpage>125</fpage><pub-id pub-id-type="doi">10.3390/healthcare12020125</pub-id><pub-id pub-id-type="medline">38255014</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alizadehsani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Khosravi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Roshanzamir</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Coronary artery disease detection using artificial intelligence techniques: a survey of trends, geographical differences and diagnostic features 1991-2020</article-title><source>Comput Biol Med</source><year>2021</year><month>01</month><volume>128</volume><fpage>104095</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.104095</pub-id><pub-id pub-id-type="medline">33217660</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dentamaro</surname><given-names>V</given-names> </name><name name-style="western"><surname>Impedovo</surname><given-names>D</given-names> </name><name name-style="western"><surname>Musti</surname><given-names>L</given-names> </name><name name-style="western"><surname>Pirlo</surname><given-names>G</given-names> </name><name name-style="western"><surname>Taurisano</surname><given-names>P</given-names> </name></person-group><article-title>Enhancing early Parkinson&#x2019;s disease detection through multimodal deep learning and explainable AI: insights from the PPMI database</article-title><source>Sci Rep</source><year>2024</year><month>09</month><day>9</day><volume>14</volume><issue>1</issue><fpage>20941</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-70165-4</pub-id><pub-id pub-id-type="medline">39251639</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blanco-Gonz&#x00E1;lez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cabez&#x00F3;n</surname><given-names>A</given-names> </name><name name-style="western"><surname>Seco-Gonz&#x00E1;lez</surname><given-names>A</given-names> </name><etal/></person-group><article-title>The role of AI in drug discovery: challenges, opportunities, and strategies</article-title><source>Pharmaceuticals (Basel)</source><year>2023</year><month>06</month><day>18</day><volume>16</volume><issue>6</issue><fpage>891</fpage><pub-id pub-id-type="doi">10.3390/ph16060891</pub-id><pub-id pub-id-type="medline">37375838</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Qureshi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Irfan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gondal</surname><given-names>TM</given-names> </name><etal/></person-group><article-title>AI in drug discovery and its clinical relevance</article-title><source>Heliyon</source><year>2023</year><month>07</month><volume>9</volume><issue>7</issue><fpage>e17575</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2023.e17575</pub-id><pub-id pub-id-type="medline">37396052</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alowais</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Alghamdi</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Alsuhebany</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Revolutionizing healthcare: the role of artificial intelligence in clinical practice</article-title><source>BMC Med Educ</source><year>2023</year><month>09</month><day>22</day><volume>23</volume><issue>1</issue><fpage>689</fpage><pub-id pub-id-type="doi">10.1186/s12909-023-04698-z</pub-id><pub-id pub-id-type="medline">37740191</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bharel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Auerbach</surname><given-names>J</given-names> </name><name name-style="western"><surname>Nguyen</surname><given-names>V</given-names> </name><name name-style="western"><surname>DeSalvo</surname><given-names>KB</given-names> </name></person-group><article-title>Transforming public health practice with generative artificial intelligence</article-title><source>Health Aff (Millwood)</source><year>2024</year><month>06</month><volume>43</volume><issue>6</issue><fpage>776</fpage><lpage>782</lpage><pub-id pub-id-type="doi">10.1377/hlthaff.2024.00050</pub-id><pub-id pub-id-type="medline">38830160</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="web"><article-title>Artificial intelligence (AI) in healthcare market size expected to reach USD 61381 bn by 2034</article-title><source>Precedence Research</source><year>2024</year><access-date>2025-02-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.globenewswire.com/news-release/2024/08/12/2928598/0/en/Artificial-Intelligence-AI-in-Healthcare-Market-Size-Expected-to-Reach-USD-613-81-Bn-by-2034.html">https://www.globenewswire.com/news-release/2024/08/12/2928598/0/en/Artificial-Intelligence-AI-in-Healthcare-Market-Size-Expected-to-Reach-USD-613-81-Bn-by-2034.html</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bentley</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Naughtin</surname><given-names>CK</given-names> </name><name name-style="western"><surname>McGrath</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Irons</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Cooper</surname><given-names>PS</given-names> </name></person-group><article-title>The digital divide in action: how experiences of digital technology shape future relationships with artificial intelligence</article-title><source>AI Ethics</source><year>2024</year><month>11</month><volume>4</volume><issue>4</issue><fpage>901</fpage><lpage>915</lpage><pub-id pub-id-type="doi">10.1007/s43681-024-00452-3</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mennella</surname><given-names>C</given-names> </name><name name-style="western"><surname>Maniscalco</surname><given-names>U</given-names> </name><name name-style="western"><surname>De Pietro</surname><given-names>G</given-names> </name><name name-style="western"><surname>Esposito</surname><given-names>M</given-names> </name></person-group><article-title>Ethical and regulatory challenges of AI technologies in healthcare: A narrative review</article-title><source>Heliyon</source><year>2024</year><month>02</month><day>29</day><volume>10</volume><issue>4</issue><fpage>e26297</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e26297</pub-id><pub-id pub-id-type="medline">38384518</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Kee</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Schulz</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Sung</surname><given-names>JJY</given-names> </name></person-group><article-title>Will AI jeopardize the uniqueness of a patient? Challenges for patients&#x2019; acceptance of AI in medicine</article-title><source>Studies in Neuroscience, Psychology and Behavioral Economics</source><year>2025</year><publisher-name>Springer Nature</publisher-name><fpage>71</fpage><lpage>86</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-70355-3_6</pub-id><pub-id pub-id-type="other">9783031703546</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karimian</surname><given-names>G</given-names> </name><name name-style="western"><surname>Petelos</surname><given-names>E</given-names> </name><name name-style="western"><surname>Evers</surname><given-names>S</given-names> </name></person-group><article-title>The ethical issues of the application of artificial intelligence in healthcare: a systematic scoping review</article-title><source>AI Ethics</source><year>2022</year><month>11</month><volume>2</volume><issue>4</issue><fpage>539</fpage><lpage>551</lpage><pub-id pub-id-type="doi">10.1007/s43681-021-00131-7</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Asan</surname><given-names>O</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name></person-group><article-title>Shaping the future of chronic disease management: insights into patient needs for AI-based homecare systems</article-title><source>Int J Med Inform</source><year>2024</year><month>01</month><volume>181</volume><issue>105301</issue><fpage>105301</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2023.105301</pub-id><pub-id pub-id-type="medline">38029700</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamedani</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Moradi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kalroozi</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Evaluation of acceptance, attitude, and knowledge towards artificial intelligence and its application from the point of view of physicians and nurses: a provincial survey study in Iran: a cross-sectional descriptive-analytical study</article-title><source>Health Sci Rep</source><year>2023</year><month>09</month><volume>6</volume><issue>9</issue><fpage>e1543</fpage><pub-id pub-id-type="doi">10.1002/hsr2.1543</pub-id><pub-id pub-id-type="medline">37674620</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Al-Medfa</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Al-Ansari</surname><given-names>AMS</given-names> </name><name name-style="western"><surname>Darwish</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Qreeballa</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Jahrami</surname><given-names>H</given-names> </name></person-group><article-title>Physicians&#x2019; attitudes and knowledge toward artificial intelligence in medicine: benefits and drawbacks</article-title><source>Heliyon</source><year>2023</year><month>04</month><volume>9</volume><issue>4</issue><fpage>e14744</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2023.e14744</pub-id><pub-id pub-id-type="medline">37035387</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Hong</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kwon</surname><given-names>SH</given-names> </name></person-group><article-title>Physician confidence in artificial intelligence: an online mobile survey</article-title><source>J Med Internet Res</source><year>2019</year><month>03</month><day>25</day><volume>21</volume><issue>3</issue><fpage>e12422</fpage><pub-id pub-id-type="doi">10.2196/12422</pub-id><pub-id pub-id-type="medline">30907742</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buck</surname><given-names>C</given-names> </name><name name-style="western"><surname>Doctor</surname><given-names>E</given-names> </name><name name-style="western"><surname>Hennrich</surname><given-names>J</given-names> </name><name name-style="western"><surname>J&#x00F6;hnk</surname><given-names>J</given-names> </name><name name-style="western"><surname>Eymann</surname><given-names>T</given-names> </name></person-group><article-title>General practitioners&#x2019; attitudes toward artificial intelligence-enabled systems: interview study</article-title><source>J Med Internet Res</source><year>2022</year><month>01</month><day>27</day><volume>24</volume><issue>1</issue><fpage>e28916</fpage><pub-id pub-id-type="doi">10.2196/28916</pub-id><pub-id pub-id-type="medline">35084342</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cornelissen</surname><given-names>L</given-names> </name><name name-style="western"><surname>Egher</surname><given-names>C</given-names> </name><name name-style="western"><surname>van Beek</surname><given-names>V</given-names> </name><name name-style="western"><surname>Williamson</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hommes</surname><given-names>D</given-names> </name></person-group><article-title>The drivers of acceptance of artificial intelligence-powered care pathways among medical professionals: web-based survey study</article-title><source>JMIR Form Res</source><year>2022</year><month>06</month><day>21</day><volume>6</volume><issue>6</issue><fpage>e33368</fpage><pub-id pub-id-type="doi">10.2196/33368</pub-id><pub-id pub-id-type="medline">35727614</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huo</surname><given-names>W</given-names> </name><name name-style="western"><surname>Yuan</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Luo</surname><given-names>W</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>B</given-names> </name></person-group><article-title>Increasing acceptance of medical AI: the role of medical staff participation in AI development</article-title><source>Int J Med Inform</source><year>2023</year><month>07</month><volume>175</volume><issue>105073</issue><fpage>105073</fpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2023.105073</pub-id><pub-id pub-id-type="medline">37119693</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>AlZaabi</surname><given-names>A</given-names> </name><name name-style="western"><surname>AlMaskari</surname><given-names>S</given-names> </name><name name-style="western"><surname>AalAbdulsalam</surname><given-names>A</given-names> </name></person-group><article-title>Are physicians and medical students ready for artificial intelligence applications in healthcare?</article-title><source>Digit Health</source><year>2023</year><volume>9</volume><fpage>20552076231152167</fpage><pub-id pub-id-type="doi">10.1177/20552076231152167</pub-id><pub-id pub-id-type="medline">36762024</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ng</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Maduranayagam</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Suthakar</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Attitudes and perceptions of medical researchers towards the use of artificial intelligence chatbots in the scientific process: an international cross-sectional survey</article-title><source>Lancet Digit Health</source><year>2025</year><month>01</month><volume>7</volume><issue>1</issue><fpage>e94</fpage><lpage>e102</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(24)00202-4</pub-id><pub-id pub-id-type="medline">39550312</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fritsch</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Blankenheim</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wahl</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Attitudes and perception of artificial intelligence in healthcare: a cross-sectional survey among patients</article-title><source>Digit Health</source><year>2022</year><volume>8</volume><fpage>20552076221116772</fpage><pub-id pub-id-type="doi">10.1177/20552076221116772</pub-id><pub-id pub-id-type="medline">35983102</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beets</surname><given-names>B</given-names> </name><name name-style="western"><surname>Newman</surname><given-names>TP</given-names> </name><name name-style="western"><surname>Howell</surname><given-names>EL</given-names> </name><name name-style="western"><surname>Bao</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>S</given-names> </name></person-group><article-title>Surveying public perceptions of artificial intelligence in health care in the United States: systematic review</article-title><source>J Med Internet Res</source><year>2023</year><month>04</month><day>4</day><volume>25</volume><issue>1</issue><fpage>e40337</fpage><pub-id pub-id-type="doi">10.2196/40337</pub-id><pub-id pub-id-type="medline">37014676</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Robertson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Woods</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bergstrand</surname><given-names>K</given-names> </name><name name-style="western"><surname>Findley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Balser</surname><given-names>C</given-names> </name><name name-style="western"><surname>Slepian</surname><given-names>MJ</given-names> </name></person-group><article-title>Diverse patients&#x2019; attitudes towards artificial intelligence (AI) in diagnosis</article-title><source>PLOS Digit Health</source><year>2023</year><month>05</month><volume>2</volume><issue>5</issue><fpage>e0000237</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000237</pub-id><pub-id pub-id-type="medline">37205713</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yakar</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ongena</surname><given-names>YP</given-names> </name><name name-style="western"><surname>Kwee</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Haan</surname><given-names>M</given-names> </name></person-group><article-title>Do people favor artificial intelligence over physicians? A survey among the general population and their view on artificial intelligence in medicine</article-title><source>Value Health</source><year>2022</year><month>03</month><volume>25</volume><issue>3</issue><fpage>374</fpage><lpage>381</lpage><pub-id pub-id-type="doi">10.1016/j.jval.2021.09.004</pub-id><pub-id pub-id-type="medline">35227448</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Witkowski</surname><given-names>K</given-names> </name><name name-style="western"><surname>Dougherty</surname><given-names>RB</given-names> </name><name name-style="western"><surname>Neely</surname><given-names>SR</given-names> </name></person-group><article-title>Public perceptions of artificial intelligence in healthcare: ethical concerns and opportunities for patient-centered care</article-title><source>BMC Med Ethics</source><year>2024</year><month>06</month><day>22</day><volume>25</volume><issue>1</issue><fpage>74</fpage><pub-id pub-id-type="doi">10.1186/s12910-024-01066-4</pub-id><pub-id pub-id-type="medline">38909180</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richardson</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>C</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Patient apprehensions about the use of artificial intelligence in healthcare</article-title><source>NPJ Digit Med</source><year>2021</year><month>09</month><day>21</day><volume>4</volume><issue>1</issue><fpage>140</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00509-1</pub-id><pub-id pub-id-type="medline">34548621</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="web"><article-title>2023 Canadian digital health survey: what Canadians think</article-title><source>Canadian Health Infoway: Insights</source><year>2024</year><access-date>2025-03-04</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://insights.infoway-inforoute.ca/docs/component/edocman/389-2023-canadian-digital-health-survey-results-what-canadians-think?Itemid=0">https://insights.infoway-inforoute.ca/docs/component/edocman/389-2023-canadian-digital-health-survey-results-what-canadians-think?Itemid=0</ext-link></comment></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="report"><person-group person-group-type="author"><collab>Canada Health Infoway</collab></person-group><article-title>Canadian digital health survey</article-title><year>2023</year><pub-id pub-id-type="doi">10.5683/SP3/5C7HSO</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="web"><article-title>Clients&#x2019; guide to data quality in online research</article-title><source>Canadian Research Insights Council</source><access-date>2025-03-04</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.canadianresearchinsightscouncil.ca/wp-content/uploads/2024/05/CRIC-Clients-Guide-to-Data-Quality-in-Online-Research.pdf">https://www.canadianresearchinsightscouncil.ca/wp-content/uploads/2024/05/CRIC-Clients-Guide-to-Data-Quality-in-Online-Research.pdf</ext-link></comment></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Norman</surname><given-names>CD</given-names> </name><name name-style="western"><surname>Skinner</surname><given-names>HA</given-names> </name></person-group><article-title>eHEALS: The eHealth Literacy Scale</article-title><source>J Med Internet Res</source><year>2006</year><month>11</month><day>14</day><volume>8</volume><issue>4</issue><fpage>e27</fpage><pub-id pub-id-type="doi">10.2196/jmir.8.4.e27</pub-id><pub-id pub-id-type="medline">17213046</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boyd</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tian</surname><given-names>S</given-names> </name></person-group><article-title>Educational and labor market attainments of the 1.5- and second-generation children of east Asian immigrants in Canada</article-title><source>Am Behav Sci</source><year>2016</year><month>05</month><volume>60</volume><issue>5-6</issue><fpage>705</fpage><lpage>729</lpage><pub-id pub-id-type="doi">10.1177/0002764216632830</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fairfax</surname><given-names>FG</given-names> </name></person-group><article-title>Sharing patient-generated health data: the role of healthcare quality, mistrust, and discrimination across race-gender identities</article-title><source>J Racial Ethn Health Disparities</source><year>2025</year><month>03</month><day>10</day><pub-id pub-id-type="doi">10.1007/s40615-025-02342-8</pub-id><pub-id pub-id-type="medline">40063296</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wells</surname><given-names>L</given-names> </name><name name-style="western"><surname>Gowda</surname><given-names>A</given-names> </name></person-group><article-title>A legacy of mistrust: African Americans and the US healthcare system</article-title><source>Proceedings of UCLA Health</source><year>2020</year><access-date>2025-11-26</access-date><volume>24</volume><comment><ext-link ext-link-type="uri" xlink:href="https://escholarship.org/content/qt0p85k34s/qt0p85k34s.pdf">https://escholarship.org/content/qt0p85k34s/qt0p85k34s.pdf</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mattison</surname><given-names>G</given-names> </name><name name-style="western"><surname>Canfell</surname><given-names>O</given-names> </name><name name-style="western"><surname>Forrester</surname><given-names>D</given-names> </name><etal/></person-group><article-title>The influence of wearables on health care outcomes in chronic disease: systematic review</article-title><source>J Med Internet Res</source><year>2022</year><month>07</month><day>1</day><volume>24</volume><issue>7</issue><fpage>e36690</fpage><pub-id pub-id-type="doi">10.2196/36690</pub-id><pub-id pub-id-type="medline">35776492</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gagnon</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Ouellet</surname><given-names>S</given-names> </name><name name-style="western"><surname>Attisso</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Wearable devices for supporting chronic disease self-management: scoping review</article-title><source>Interact J Med Res</source><year>2024</year><month>12</month><day>9</day><volume>13</volume><issue>1</issue><fpage>e55925</fpage><pub-id pub-id-type="doi">10.2196/55925</pub-id><pub-id pub-id-type="medline">39652850</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lauche</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sibbritt</surname><given-names>D</given-names> </name><name name-style="western"><surname>Olaniran</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cook</surname><given-names>R</given-names> </name><name name-style="western"><surname>Adams</surname><given-names>J</given-names> </name></person-group><article-title>Comparison of health information technology use between American adults with and without chronic health conditions: findings from The National Health Interview Survey 2012</article-title><source>J Med Internet Res</source><year>2017</year><month>10</month><day>5</day><volume>19</volume><issue>10</issue><fpage>e335</fpage><pub-id pub-id-type="doi">10.2196/jmir.6989</pub-id><pub-id pub-id-type="medline">28982644</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chandrasekaran</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sadiq T</surname><given-names>M</given-names> </name><name name-style="western"><surname>Moustakas</surname><given-names>E</given-names> </name></person-group><article-title>Usage trends and data sharing practices of healthcare wearable devices among US adults: cross-sectional study</article-title><source>J Med Internet Res</source><year>2025</year><month>02</month><day>21</day><volume>27</volume><issue>1</issue><fpage>e63879</fpage><pub-id pub-id-type="doi">10.2196/63879</pub-id><pub-id pub-id-type="medline">39982763</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Williamson</surname><given-names>DFK</given-names> </name><etal/></person-group><article-title>Algorithmic fairness in artificial intelligence for medicine and healthcare</article-title><source>Nat Biomed Eng</source><year>2023</year><month>06</month><volume>7</volume><issue>6</issue><fpage>719</fpage><lpage>742</lpage><pub-id pub-id-type="doi">10.1038/s41551-023-01056-8</pub-id><pub-id pub-id-type="medline">37380750</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Agarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bjarnadottir</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rhue</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Addressing algorithmic bias and the perpetuation of health inequities: an AI bias aware framework</article-title><source>Health Policy Technol</source><year>2023</year><month>03</month><volume>12</volume><issue>1</issue><fpage>100702</fpage><pub-id pub-id-type="doi">10.1016/j.hlpt.2022.100702</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Norori</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Aellen</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Faraci</surname><given-names>FD</given-names> </name><name name-style="western"><surname>Tzovara</surname><given-names>A</given-names> </name></person-group><article-title>Addressing bias in big data and AI for health care: a call for open science</article-title><source>Patterns</source><year>2021</year><month>10</month><day>8</day><volume>2</volume><issue>10</issue><fpage>100347</fpage><pub-id pub-id-type="doi">10.1016/j.patter.2021.100347</pub-id><pub-id pub-id-type="medline">34693373</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reddy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Allan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Coghlan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cooper</surname><given-names>P</given-names> </name></person-group><article-title>A governance model for the application of AI in health care</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>03</month><day>1</day><volume>27</volume><issue>3</issue><fpage>491</fpage><lpage>497</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocz192</pub-id><pub-id pub-id-type="medline">31682262</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pruski</surname><given-names>M</given-names> </name></person-group><article-title>AI-enhanced healthcare: not a new paradigm for informed consent</article-title><source>J Bioeth Inq</source><year>2024</year><month>09</month><volume>21</volume><issue>3</issue><fpage>475</fpage><lpage>489</lpage><pub-id pub-id-type="doi">10.1007/s11673-023-10320-0</pub-id><pub-id pub-id-type="medline">38300443</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Astromsk&#x0117;</surname><given-names>K</given-names> </name><name name-style="western"><surname>Pei&#x010D;ius</surname><given-names>E</given-names> </name><name name-style="western"><surname>Astromskis</surname><given-names>P</given-names> </name></person-group><article-title>Ethical and legal challenges of informed consent applying artificial intelligence in medical diagnostic consultations</article-title><source>AI Soc</source><year>2021</year><month>06</month><volume>36</volume><issue>2</issue><fpage>509</fpage><lpage>520</lpage><pub-id pub-id-type="doi">10.1007/s00146-020-01008-9</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Murdoch</surname><given-names>B</given-names> </name></person-group><article-title>Privacy and artificial intelligence: challenges for protecting health information in a new era</article-title><source>BMC Med Ethics</source><year>2021</year><month>09</month><day>15</day><volume>22</volume><issue>1</issue><fpage>122</fpage><pub-id pub-id-type="doi">10.1186/s12910-021-00687-3</pub-id><pub-id pub-id-type="medline">34525993</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ford</surname><given-names>E</given-names> </name><name name-style="western"><surname>Pillinger</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stewart</surname><given-names>R</given-names> </name><etal/></person-group><article-title>What is the patient re-identification risk from using de-identified clinical free text data for health research?</article-title><source>AI Ethics</source><year>2025</year><volume>5</volume><issue>5</issue><fpage>4441</fpage><lpage>4454</lpage><pub-id pub-id-type="doi">10.1007/s43681-025-00681-0</pub-id><pub-id pub-id-type="medline">40978336</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loftus</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Ruppert</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Shickel</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Federated learning for preserving data privacy in collaborative healthcare research</article-title><source>Digit Health</source><year>2022</year><volume>8</volume><fpage>20552076221134455</fpage><pub-id pub-id-type="doi">10.1177/20552076221134455</pub-id><pub-id pub-id-type="medline">36325438</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>C</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Review on security of federated learning and its application in healthcare</article-title><source>Future Generation Comput Syst</source><year>2023</year><month>07</month><volume>144</volume><fpage>271</fpage><lpage>290</lpage><pub-id pub-id-type="doi">10.1016/j.future.2023.02.021</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nazer</surname><given-names>LH</given-names> </name><name name-style="western"><surname>Zatarah</surname><given-names>R</given-names> </name><name name-style="western"><surname>Waldrip</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Bias in artificial intelligence algorithms and recommendations for mitigation</article-title><source>PLOS Digit Health</source><year>2023</year><month>06</month><volume>2</volume><issue>6</issue><fpage>e0000278</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000278</pub-id><pub-id pub-id-type="medline">37347721</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mittermaier</surname><given-names>M</given-names> </name><name name-style="western"><surname>Raza</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Kvedar</surname><given-names>JC</given-names> </name></person-group><article-title>Bias in AI-based models for medical applications: challenges and mitigation strategies</article-title><source>NPJ Digit Med</source><year>2023</year><month>06</month><day>14</day><volume>6</volume><issue>1</issue><fpage>113</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00858-z</pub-id><pub-id pub-id-type="medline">37311802</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nouis</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Uren</surname><given-names>V</given-names> </name><name name-style="western"><surname>Jariwala</surname><given-names>S</given-names> </name></person-group><article-title>Evaluating accountability, transparency, and bias in AI-assisted healthcare decision-making: a qualitative study of healthcare professionals&#x2019; perspectives in the UK</article-title><source>BMC Med Ethics</source><year>2025</year><month>07</month><day>8</day><volume>26</volume><issue>1</issue><fpage>89</fpage><pub-id pub-id-type="doi">10.1186/s12910-025-01243-z</pub-id><pub-id pub-id-type="medline">40629303</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>H</given-names> </name></person-group><article-title>Clinical AI: opacity, accountability, responsibility and liability</article-title><source>AI Soc</source><year>2021</year><month>06</month><volume>36</volume><issue>2</issue><fpage>535</fpage><lpage>545</lpage><pub-id pub-id-type="doi">10.1007/s00146-020-01019-6</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nawaz</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Barr</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Desai</surname><given-names>MY</given-names> </name><etal/></person-group><article-title>Promoting research, awareness, and discussion on AI in medicine using #MedTwitterAI: a longitudinal Twitter hashtag analysis</article-title><source>Front Public Health</source><year>2022</year><volume>10</volume><fpage>856571</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2022.856571</pub-id><pub-id pub-id-type="medline">35844878</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lawrence</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kuram</surname><given-names>VS</given-names> </name><name name-style="western"><surname>Levine</surname><given-names>DL</given-names> </name><etal/></person-group><article-title>Informed consent for ambient documentation using generative AI in ambulatory care</article-title><source>JAMA Netw Open</source><year>2025</year><month>07</month><day>1</day><volume>8</volume><issue>7</issue><fpage>e2522400</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2025.22400</pub-id><pub-id pub-id-type="medline">40694347</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Andreotta</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Kirkham</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rizzi</surname><given-names>M</given-names> </name></person-group><article-title>AI, big data, and the future of consent</article-title><source>AI Soc</source><year>2022</year><month>12</month><volume>37</volume><issue>4</issue><fpage>1715</fpage><lpage>1728</lpage><pub-id pub-id-type="doi">10.1007/s00146-021-01262-5</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cavalier</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Goldstein</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Ravitsky</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Ethics in patient preferences for artificial intelligence-drafted responses to electronic messages</article-title><source>JAMA Netw Open</source><year>2025</year><month>03</month><day>3</day><volume>8</volume><issue>3</issue><fpage>e250449</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2025.0449</pub-id><pub-id pub-id-type="medline">40067301</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Sensitivity analysis: comparison of unweighted, IPW-weighted, and fully weighted ordinal logistic regression results.</p><media xlink:href="jmir_v27i1e77501_app1.docx" xlink:title="DOCX File, 26 KB"/></supplementary-material></app-group></back></article>