<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e60269</article-id>
      <article-id pub-id-type="pmid">39773888</article-id>
      <article-id pub-id-type="doi">10.2196/60269</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Bias Mitigation in Primary Health Care Artificial Intelligence Models: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Coristine</surname>
            <given-names>Andrew</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bensemann</surname>
            <given-names>Joshua</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>He</surname>
            <given-names>Lingxiao</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Sasseville</surname>
            <given-names>Maxime</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Faculté des sciences infirmières, Université Laval</institution>
            <addr-line>1050 Av. de la Médecine</addr-line>
            <addr-line>Québec, QC, G1V 0A6</addr-line>
            <country>Canada</country>
            <phone>1 418 656 3356</phone>
            <email>maxime.sasseville@fsi.ulaval.ca</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1694-1414</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Ouellet</surname>
            <given-names>Steven</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2158-0043</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Rhéaume</surname>
            <given-names>Caroline</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1863-4410</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Sahlia</surname>
            <given-names>Malek</given-names>
          </name>
          <degrees>MSc, MEng</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-4633-4698</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Couture</surname>
            <given-names>Vincent</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8811-0524</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Després</surname>
            <given-names>Philippe</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff6" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4163-7353</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Paquette</surname>
            <given-names>Jean-Sébastien</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9524-6761</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Darmon</surname>
            <given-names>David</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff7" ref-type="aff">7</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4425-4163</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Bergeron</surname>
            <given-names>Frédéric</given-names>
          </name>
          <degrees>MSI</degrees>
          <xref rid="aff8" ref-type="aff">8</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0978-7420</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author">
          <name name-style="western">
            <surname>Gagnon</surname>
            <given-names>Marie-Pierre</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0782-5457</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Faculté des sciences infirmières, Université Laval</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Vitam Research Center on Sustainable Health</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Département de médecine familiale et de médecine d'urgence de la Faculté de médecine, Université Laval</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Research Center of Quebec Heart and Lungs Institute</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>École Nationale des Sciences de l’Informatique, Université de La Manouba</institution>
        <addr-line>La Manouba</addr-line>
        <country>Tunisia</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Département de physique, de génie physique et d'optique de la Faculté des sciences et de génie, Université Laval</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff7">
        <label>7</label>
        <institution>Risques, Epidémiologie, Territoires, Informations, Education et Santé. Département d’enseignement et de recherche en médecine générale, Université Côte d'Azur</institution>
        <addr-line>Nice</addr-line>
        <country>France</country>
      </aff>
      <aff id="aff8">
        <label>8</label>
        <institution>Direction des services-conseils de la Bibliothèque, Université Laval</institution>
        <addr-line>Québec, QC</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Maxime Sasseville <email>maxime.sasseville@fsi.ulaval.ca</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>7</day>
        <month>1</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e60269</elocation-id>
      <history>
        <date date-type="received">
          <day>6</day>
          <month>5</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>14</day>
          <month>9</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>26</day>
          <month>9</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>7</day>
          <month>11</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Maxime Sasseville, Steven Ouellet, Caroline Rhéaume, Malek Sahlia, Vincent Couture, Philippe Després, Jean-Sébastien Paquette, David Darmon, Frédéric Bergeron, Marie-Pierre Gagnon. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 07.01.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e60269" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) predictive models in primary health care have the potential to enhance population health by rapidly and accurately identifying individuals who should receive care and health services. However, these models also carry the risk of perpetuating or amplifying existing biases toward diverse groups. We identified a gap in the current understanding of strategies used to assess and mitigate bias in primary health care algorithms related to individuals’ personal or protected attributes.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to describe the attempts, strategies, and methods used to mitigate bias in AI models within primary health care, to identify the diverse groups or protected attributes considered, and to evaluate the results of these approaches on both bias reduction and AI model performance.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We conducted a scoping review following Joanna Briggs Institute (JBI) guidelines, searching Medline (Ovid), CINAHL (EBSCO), PsycINFO (Ovid), and Web of Science databases for studies published between January 1, 2017, and November 15, 2022. Pairs of reviewers independently screened titles and abstracts, applied selection criteria, and performed full-text screening. Discrepancies regarding study inclusion were resolved by consensus. Following reporting standards for AI in health care, we extracted data on study objectives, model features, targeted diverse groups, mitigation strategies used, and results. Using the mixed methods appraisal tool, we appraised the quality of the studies.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>After removing 585 duplicates, we screened 1018 titles and abstracts. From the remaining 189 full-text articles, we included 17 studies. The most frequently investigated protected attributes were race (or ethnicity), examined in 12 of the 17 studies, and sex (often identified as gender), typically classified as “male versus female” in 10 of the studies. We categorized bias mitigation approaches into four clusters: (1) modifying existing AI models or datasets, (2) sourcing data from electronic health records, (3) developing tools with a “human-in-the-loop” approach, and (4) identifying ethical principles for informed decision-making. Algorithmic preprocessing methods, such as relabeling and reweighing data, along with natural language processing techniques that extract data from unstructured notes, showed the greatest potential for bias mitigation. Other methods aimed at enhancing model fairness included group recalibration and the application of the equalized odds metric. However, these approaches sometimes exacerbated prediction errors across groups or led to overall model miscalibrations.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The results suggest that biases toward diverse groups are more easily mitigated when data are open-sourced, multiple stakeholders are engaged, and during the algorithm’s preprocessing stage. Further empirical studies that include a broader range of groups, such as Indigenous peoples in Canada, are needed to validate and expand upon these findings.</p>
        </sec>
        <sec sec-type="trial registration">
          <title>Trial Registration</title>
          <p>OSF Registry osf.io/9ngz5/; https://osf.io/9ngz5/</p>
        </sec>
        <sec sec-type="registered-report">
          <title>International Registered Report Identifier (IRRID)</title>
          <p>RR2-10.2196/46684</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
        <kwd>algorithms</kwd>
        <kwd>expert system</kwd>
        <kwd>decision support</kwd>
        <kwd>bias</kwd>
        <kwd>community health services</kwd>
        <kwd>primary health care</kwd>
        <kwd>health disparities</kwd>
        <kwd>social equity</kwd>
        <kwd>scoping review</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Developments in computer science have led to artificial intelligence (AI) models that learn from large datasets and can perform independent analysis [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. Significant progress has been made in these tasks with the development of machine learning (ML). This branch of AI focuses on understanding, generating, and reasoning based on data without explicit human instructions [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>] Such ML algorithms use datasets known as “training datasets” to capture the patterns required for clustering tasks or predictive modeling [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. These models are now used in multiple contexts and industries to predict the likelihood of an event or to support human decision-making [<xref ref-type="bibr" rid="ref4">4</xref>]. In health care, AI models applied in radiology can potentially detect and predict the progression of cancerous tumors accurately [<xref ref-type="bibr" rid="ref5">5</xref>]. Algorithms can also be useful in community-based primary health care (CBPHC) for identifying individuals, such as heart failure or diabetes outpatients, who require specific health care services [<xref ref-type="bibr" rid="ref6">6</xref>]. As defined by the Canadian Institutes of Health Research, CBPHC encompasses a comprehensive array of services aimed at community well-being, including primary prevention (such as public health), health promotion, disease prevention, diagnosis, treatment, and management of chronic and episodic illnesses, rehabilitation support, and end-of-life care [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      <p>Despite the potential benefits of AI, such as compensating for workforce shortage and maximizing access to CBPHC [<xref ref-type="bibr" rid="ref6">6</xref>], algorithm biases toward diverse groups can hinder their application in health care settings. These biases may be perpetuated when protected attributes [<xref ref-type="bibr" rid="ref1">1</xref>], as identified by the place of residence, race/ethnicity/culture/language, occupation, gender/sex, religion, education, socioeconomic status, and social capital (PROGRESS-Plus) framework [<xref ref-type="bibr" rid="ref8">8</xref>], are underrepresented or misrepresented in the training data of algorithms [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Strategies aimed at identifying and mitigating bias, defined as a persistent inclination either in favor or toward something [<xref ref-type="bibr" rid="ref9">9</xref>], in predictive models are in development and beginning to be empirically applied [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. In computer science, attempts to achieve algorithmic fairness can involve which are (1) preprocessing, (2) in-processing, or even, (3) postprocessing strategies, such as those used in “out-of-the-box” commercial AI models [<xref ref-type="bibr" rid="ref4">4</xref>]. Academic disciplines beyond computer science, such as medicine, management, and ethics, are also closely involved in addressing issues related to identifying potential bias toward diverse groups in AI models [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. However, there remains a knowledge gap regarding which strategies and methods have been empirically applied to mitigate bias toward diverse groups in CBPHC algorithms [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>].</p>
      <p>To address this gap, we conducted a scoping review aimed at identifying and describing (1) the attempts made to mitigate bias in primary health care AI models, (2) which diverse groups or protected attributes have been considered, and (3) the results regarding bias attenuation and the overall performance of the models.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Search Strategy</title>
        <p>We conducted a scoping review informed by the Joanna Briggs Institute (JBI) [<xref ref-type="bibr" rid="ref13">13</xref>] and used the Population (or Participant), Concept, and Context Framework [<xref ref-type="bibr" rid="ref14">14</xref>], as shown in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Population (or Participant), Concept, and Context framework used for the search strategy.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="240"/>
            <col width="430"/>
            <col width="330"/>
            <thead>
              <tr valign="bottom">
                <td>PCC<sup>a</sup> elements [<xref ref-type="bibr" rid="ref14">14</xref>]</td>
                <td>Definition (per JBI<sup>b</sup> Reviewer’s Manual)</td>
                <td>PCC elements applied in this review</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Population</td>
                <td>“Important characteristics of participants, including age and other qualifying criteria” (11.2.4)</td>
                <td>Any diverse groups [<xref ref-type="bibr" rid="ref8">8</xref>] based on their personal or protected attributes [<xref ref-type="bibr" rid="ref1">1</xref>].</td>
              </tr>
              <tr valign="top">
                <td>Concept</td>
                <td>“The core concept examined by the scoping review should be clearly articulated to guide the scope and breadth of the inquiry. This may include details that pertain to elements that would be detailed in a standard systematic review, such as the “interventions” or “phenomena of interest” (11.2.4)</td>
                <td>Strategies, attempts, or methods for assessing and mitigating bias in artificial intelligence.</td>
              </tr>
              <tr valign="top">
                <td>Context</td>
                <td>“May include...cultural factors such as geographic location or specific racial or gender-based interests. In some cases, context may also encompass details about the specific setting.”</td>
                <td>Community-based primary health care [<xref ref-type="bibr" rid="ref7">7</xref>].</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>PCC (Population [or Participant], Concept, and Context) framework [<xref ref-type="bibr" rid="ref14">14</xref>].</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>JBI: Joanna Briggs Institute.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Bias Mitigation in Primary Health Care Artificial Intelligence Models</title>
        <p>Primary review questions are (1) What attempts have been made to mitigate bias in primary health care AI models? (2) Which diverse groups or protected attributes have been considered? and (3) What are the results regarding bias attenuation and model performance?</p>
        <p>In November 2022, we developed a search strategy aligned with the main concepts of our primary review questions with an experienced librarian in 4 relevant databases (MEDLINE [Ovid], CINAHL [EBSCO], PsycInfo [Ovid], and Web of Science). The results of the search strategy in Web of Science were limited to the following 2 indexes: Science Citation Index Expanded and Emerging Sources Citation Index. We used 5 relevant articles to test the sensitivity of our search strategy, focusing on peer-reviewed publications from the past 5 years (between January 1, 2017, and November 15, 2022). The search strategies for each database can be found in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
      </sec>
      <sec>
        <title>Data Collection</title>
        <p>We imported all sources (n=1603) into the web-based collaborative tool Covidence (Veritas Health Innovation) [<xref ref-type="bibr" rid="ref15">15</xref>], which automatically identified and removed 581 duplicates, with an additional 4 removed manually. The inclusion and exclusion criteria are presented in <xref ref-type="table" rid="table2">Table 2</xref>. During the title and abstract screening phase, 7 reviewers independently assessed the abstracts based on the selection criteria. We piloted the screening process on 50 sources that all reviewers independently assessed. Reviewers included a source if it met our inclusion criteria, such as featuring an AI predictive model in health, targeting primary health care populations, and presenting a strategy or method for reducing bias. All titles and abstracts were screened independently by at least 2 reviewers, with any discrepancies resolved through consensus involving all reviewers, including at least 1 senior researcher.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Inclusion and exclusion criteria.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="190"/>
            <col width="410"/>
            <col width="400"/>
            <thead>
              <tr valign="top">
                <td>PCC (Population, Concept, and Context) elements [<xref ref-type="bibr" rid="ref14">14</xref>]</td>
                <td>Inclusion criteria</td>
                <td>Exclusion criteria</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Population</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Any populations targeted by CBPHC<sup>a</sup> interventions.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Any populations targeted by hospital or specialized care interventions.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Concept</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>All methods or strategies deployed to assess and mitigate bias toward diverse groups or protected attributes in AI models.</p>
                    </list-item>
                    <list-item>
                      <p>All mitigation methods or strategies deployed to promote and increase equity, diversity, and inclusion in CBPHC algorithms.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Methods or strategies deployed to assess and mitigate bias in the AI model itself (eg, biased prediction of treatment effects), rather than bias related to individuals’ characteristics or protected attributes.</p>
                    </list-item>
                    <list-item>
                      <p>Strategies, methods, or interventions that are not related to CBPHC.</p>
                    </list-item>
                    <list-item>
                      <p>CBPHC interventions that do not include any algorithm or AI system.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Context</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Include all CBPHC algorithms (AI) applications that can perpetuate or introduce potential biases toward diverse groups based on their characteristics or protected attributes.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Algorithms used by primary health care providers for support in administrative tasks and operational aspects, rather than for clinical decisions.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Study design, study type, and time frame</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>All empirical studies published in English or French between 2017 and 2022.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Reviews, opinions, commentaries, editorial content, conference papers, communications, protocols, magazine articles, and so on.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>CBPHC: Community-based primary health care.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>For the remaining articles assessed for eligibility at the full-text review stage, we searched for and obtained any missing full texts of selected references, then imported them into Covidence. Out of 5 reviewers independently applied the same selection criteria, and all reasons for exclusion were recorded in Covidence. All full texts underwent dual screening. As in the previous stage, any discrepancies regarding the included studies were resolved through consensus among all reviewers, including at least one senior researcher.</p>
      </sec>
      <sec>
        <title>Data Extraction</title>
        <p>One experienced reviewer performed the extraction of the included studies, and 2 senior researchers validated the data for all of them. We also hand-searched [<xref ref-type="bibr" rid="ref16">16</xref>] and identified 2 relevant articles [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>] related to 2 included studies [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], which were added to Covidence for extraction. Based on reporting standards for AI in health care [<xref ref-type="bibr" rid="ref21">21</xref>], we extracted the following information (title of the paper, year of publication, lead author, and country), study objective, discipline and study design, AI model features, study population and setting, AI model architecture and evaluation, bias assessment method, strategy for deployment, diverse groups concerned, bias mitigation results, and the impact on AI model performance and accuracy.</p>
      </sec>
      <sec>
        <title>Quality Assessment</title>
        <p>One senior reviewer appraised the quality of the included studies by applying the Mixed-Methods Appraisal Tool (MMAT) [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>] and at least one senior researcher validated each of them.</p>
      </sec>
      <sec>
        <title>Data Synthesis</title>
        <p>In accordance with the JBI recommendations [<xref ref-type="bibr" rid="ref24">24</xref>], we synthesized data using structured narrative summaries around our review concepts (eg, model data source, model input, model output, diverse groups, or protected attributes), mitigation strategies deployed, and the results on bias mitigation and overall model performance. We reported our findings based on the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) [<xref ref-type="bibr" rid="ref25">25</xref>].</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>We obtained approval from the ethics board of the “Comité d’éthique de la recherche sectoriel en Santé des Populations et Première Ligne du Centre Intégré Universitaire de Santé et de Services Sociaux de la Capitale-Nationale” for the Protecting and Engaging Vulnerable Populations in the Development of Predictive Models in Primary Health Care for Inclusive, Diverse and Equitable AI (PREMIA) project (#2023-2726).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>Out of a total of 1018 titles and abstracts, along with 189 full-text articles that underwent dual screening, 17 studies [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref40">40</xref>] met our eligibility criteria. The PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 flow diagram is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref> [<xref ref-type="bibr" rid="ref41">41</xref>].</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>PRISMA (Preferred Reporting Items for Systematic reviews and Meta-Analyses) flow diagram.</p>
        </caption>
        <graphic xlink:href="jmir_v27i1e60269_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>The relatively high number of exclusions at the full-text review stage (172/189, 91%) can be attributed to our inclusive approach in the previous stage. For example, some reviews (17/189, 9%) and incorrect study types (67/189, 35%), such as editorials, commentaries, or conference papers, were excluded at this stage. Other exclusion reasons (88/189, 47%) included models that lacked AI components, models focusing on health care operational processes (eg, workflow modeling), studies targeting populations receiving specialized care (eg, hospitalized or cancer patients), interventions such as imaging research that were outside the scope of CBPHC, and methods for mitigating bias that were applied to the AI model itself (eg, biased predictions of treatment effects) rather than addressing biases related to diverse groups or personal attributes.</p>
      <sec>
        <title>Overview of Included Studies</title>
        <p>Of the 17 included studies published between 2019 and 2022, we identified 7 studies in the discipline of data science or informatics, 7 in medical informatics, 1 in medical ethics and informatics, 1 in medical ethics using a Delphi method, and 1 in management care ethics using a user-centered design. Most studies have been conducted in the United States (15/17, 88%), 1 in the United Kingdom, and 1 in Italy. The main characteristics of the included studies can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
      </sec>
      <sec>
        <title>Quality Assessment of Included Studies</title>
        <p>Most studies had a quantitative descriptive study design (14/17, 82%), while 2 used a mixed methods design, and 1 used a qualitative design. All studies showed high quality, receiving scores of 3 or 4 stars (on a possibility of 5). All MMAT scores can be found in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
      </sec>
      <sec>
        <title>Diverse Groups Considered</title>
        <p>The most frequently studied protected attributes were race (or ethnicity), examined in 71% (12/17) of studies, and sex (defined as binary male versus female), considered in 59% (10/17) of studies. None of the studies distinguished between biological sex and socially constructed gender, and 5 of them incorrectly identified sex as gender. Race or ethnicity was most often categorized as White or Black, Black or non-Black or, in one study, as Asian, Black, White, and other.</p>
        <p>Other protected attributes considered by the studies included age (7/17, 41%), socioeconomic status or its proxies, such as income, work class, education, health care insurance (5/17, 29%), place of residence (2/17, 12%), marital status (1/17, 6%), and disability status (1/17, 6%).</p>
      </sec>
      <sec>
        <title>Categorization of Deployed Bias Mitigation Strategies</title>
        <p>We identified considerable heterogeneity across the studies, which used various strategies and methods to assess and mitigate bias in algorithms impacting diverse groups. We categorized these efforts into four groups: (1) addressing bias in existing AI models or datasets, (2) mitigating biases from data sources such as electronic health records (EHRs), (3) developing tools that incorporate a “human-in-the-loop” approach, and (4) identifying ethical principles to guide informed decision-making.</p>
      </sec>
      <sec>
        <title>Attempts in Existing AI Models or Datasets</title>
        <p>We identified 7 studies that attempted to mitigate biases in existing AI models or datasets [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>].</p>
        <p>A debiasing attempt was made on an insurance coverage algorithm designed to identify individuals who could benefit from health resources according to their health needs [<xref ref-type="bibr" rid="ref35">35</xref>]. Risk scores were initially calculated based on projected future costs rather than uncontrolled or unmanaged illnesses, disadvantaging Black patients. By changing the data labeling to focus on future illness rather than future costs, the percentage of Black patients who could benefit from health resources increased significantly [<xref ref-type="bibr" rid="ref35">35</xref>].</p>
        <p>Another cohort study [<xref ref-type="bibr" rid="ref37">37</xref>] using a Medicaid enrollees’ dataset showed that reweighing was more effective at reducing bias in postpartum depression risk scores between White and Black individuals compared with training without the race variable for comparison. Initially, it was found that the White individuals had higher rates of postpartum depression and mental health service use. However, after comparing postpartum depression rates between races based on population surveys, it became clear that the higher rates in White women might be due to disparities in the timely assessment, screening, and detection of symptoms in Black women [<xref ref-type="bibr" rid="ref37">37</xref>].</p>
        <p>A total of three other studies include (1) retraining models with data that incorporated health equity measures resulted in a slight decrease in performance for detecting abnormal electrocardiograms but significantly reduced gender, race and age biases [<xref ref-type="bibr" rid="ref19">19</xref>]; (2) increasing diversity in the training data of a predictive pulmonary disease model improved its performance [<xref ref-type="bibr" rid="ref27">27</xref>]; and (3) although a mental health assessment model achieved high accuracy, its performance was statistically higher and more accurate for men than for women [<xref ref-type="bibr" rid="ref18">18</xref>]. The use of an algorithmic disparate remover, by adjusting the modeling data, significantly reduced this disparity while maintaining high accuracy [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
        <p>Another attempt to assess bias involved replicating models predicting liver disease [<xref ref-type="bibr" rid="ref39">39</xref>]. Importing an existing dataset reproduced predictive models with high accuracy but revealed a previously unobserved bias, with women experiencing a higher false negative rate.</p>
        <p>We identified only 1 in-processing debiasing attempt [<xref ref-type="bibr" rid="ref28">28</xref>]. Out of 2 algorithmic fairness strategies, group recalibration and equalized odds, were used to recalibrate a predictive model of cardiovascular diseases that was not initially adjusted for attributes such as sex or race. This resulted in an exacerbation of false positive and negative rates differences between groups, as well as overall model miscalibration.</p>
      </sec>
      <sec>
        <title>Attempts in Data Sourcing</title>
        <p>We identified 5 studies that attempted to mitigate biases in data sourcing [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>].</p>
        <p>Based on published synthetic datasets, such as the analysis of the American Time Use Survey dataset, using fairness metrics revealed potential discrepancies in representativeness between real and synthetic data across age, sex, and race [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        <p>Out of 4 other studies investigated EHRs datasets [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. A natural language processing model was developed to extract vital sign features from unstructured notes, comparing risk scores with 2 convenience samples. This method reduced the missingness of vital signs by 31%, thereby mitigating possible discrimination toward diverse groups, such as Black men or Black women [<xref ref-type="bibr" rid="ref32">32</xref>]. Based on data from a previous study, 2 ML models were trained to compare balanced error rates across different socioeconomic status levels and the incompleteness of EHRs data [<xref ref-type="bibr" rid="ref31">31</xref>]. Asthmatic children with lower socioeconomic status exhibited larger balanced error rates than those with higher socioeconomic status and had more missing information regarding asthma care, severity, or undiagnosed asthma, despite meeting asthma criteria [<xref ref-type="bibr" rid="ref31">31</xref>].</p>
        <p>Potential bias based on place of residence in EHRs was examined by 2 studies [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Rebalancing class labels by adding zip-code level information to 19,367 EHRs during the preprocessing step showed no significant deviation in performance, indicating that bias can be mitigated through preprocessing [<xref ref-type="bibr" rid="ref38">38</xref>]. Meanwhile, a simple 30-day readmission prediction model was developed, categorizing each patient as local (nearby) or not (far) [<xref ref-type="bibr" rid="ref40">40</xref>]. The performance with and without this variable was assessed, revealing no significant differences. Considering that living locally only affects the observability of the outcome (eg, a patient may be readmitted to a different hospital), differential bias assessment cannot rely solely on observed data [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
      </sec>
      <sec>
        <title>Attempts in Developing Tools With a “Human-in-the-Loop” Approach</title>
        <p>We identified 3 studies that attempted to mitigate biases by incorporating a “human-in-the-loop” approach [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref36">36</xref>].</p>
        <p>These studies led to the development of “human-in-the-loop” tools: (1) a visual tool for auditing and mitigating bias from tabular datasets, which was tested through experiments on 3 datasets with user participation and significantly reduced bias compared with another commercial debiasing toolkit [<xref ref-type="bibr" rid="ref29">29</xref>]; (2) pragmatic tools developed for better use of risk scores with a Medicare members’ dataset, allowing users to identify appropriate risk scores for each subgroup to achieve equality of opportunity [<xref ref-type="bibr" rid="ref30">30</xref>]; and (3) a tool called “FairLens” capable of identifying and explaining biases, which was tested using a fictitious black box model serving as a decision support system [<xref ref-type="bibr" rid="ref36">36</xref>]. Empirically validated by injecting biases into this fictitious decision support system, this tool outperformed other standard measures and enabled experts to identify problematic groups or affected patients, thereby allowing for the detection of potential misclassification [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
      </sec>
      <sec>
        <title>Attempts at Identifying Ethical Principles for Informed Decision-Making</title>
        <p>We identified 2 empirical studies that attempted to mitigate biases by identifying ethical principles for informed decision-making [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>].</p>
        <p>To assess the potential missingness of EHR data from phenotyping technology, a Delphi study was conducted to address ethical challenges and reach a consensus on the importance of privacy, transparency, consent, accountability, and fairness [<xref ref-type="bibr" rid="ref33">33</xref>]. In addition, a user-centered design study was conducted to identify user requirements, mainly intended for health managers and clinicians, to support informed decision-making and confidence in using a hepatitis C severity illness predictive model prototype [<xref ref-type="bibr" rid="ref34">34</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The reviewed studies illustrate a multifaceted approach to mitigating bias in primary care AI models. Strategies include retraining, reweighing, relabeling, adding more diversity, and attempting to replicate existing modeling data [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>], as well as algorithmic recalibration applied to an existing prediction model [<xref ref-type="bibr" rid="ref28">28</xref>]. Other strategies involve the development and application of fairness metrics to ensure equitable distributions in previously published databases [<xref ref-type="bibr" rid="ref26">26</xref>], and the identification of missingness in EHRs datasets by rebalancing class labels or adding information [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Another group of strategies includes the introduction of visual interactive tools for human-in-the-loop bias auditing [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. All these attempts cover a broad spectrum of interventions, ranging from data preprocessing and algorithmic modification to post hoc analysis, demonstrating the complexity and variety of approaches needed to address bias in AI models in primary health care.</p>
        <p>The studies collectively address a wide range of protected attributes [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref8">8</xref>], including race or ethnicity [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref37">37</xref>], sex [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>], age [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref36">36</xref>], socioeconomic status (SES) [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>], and other demographic variables such as place of residence [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. This underlines the recognition of the multifaceted nature of bias, which can intersect across various dimensions of identity and social determinants of health [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. However, we have identified disparities in the number of protected attributes studied. Race (White vs Black) and sex (male vs female) are most frequently investigated, whereas other attributes, such as disability and gender, are underresearched or not studied at all.</p>
        <p>Bias mitigation efforts reveal a nuanced landscape where attempts to reduce bias across protected attributes can result in complex trade-offs with model performance. For example, a decrease in overall model performance accompanied by significant reductions in bias was observed following the implementation of constrained optimization [<xref ref-type="bibr" rid="ref19">19</xref>]. Similarly, improvements in calibration for specific groups came at the cost of increased disparities in false positive and false negative rates between groups [<xref ref-type="bibr" rid="ref28">28</xref>]. Despite these trade-offs, the efforts have largely been successful in reducing bias, as evidenced by a study that achieved fairer distributions in synthetic data [<xref ref-type="bibr" rid="ref26">26</xref>], and in another study where human-in-the-loop interventions significantly reduced bias while maintaining utility [<xref ref-type="bibr" rid="ref29">29</xref>].</p>
        <p>These empirical findings reinforce theoretical insights that emphasize the importance of health equity between protected and unprotected attributes [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. To mitigate bias in AI health models, distributive justice options for ML have been proposed: (1) equal patient outcomes; (2) equal performance; and (3) equal allocation of resources [<xref ref-type="bibr" rid="ref1">1</xref>]. Since these different types of fairness options are often incompatible, optimizing all these parameters seems challenging, as demonstrated by an identified study [<xref ref-type="bibr" rid="ref28">28</xref>]. Trade-offs are essential, and a participatory process involving key stakeholders, including ethicists, clinicians, and marginalized populations, is strongly encouraged [<xref ref-type="bibr" rid="ref1">1</xref>]. While striving to create ethically robust AI models, selected studies often reveal tension, as efforts to reduce bias can sometimes lead to a decrease in the model’s overall performance. This presents a critical challenge: balancing the imperative of fairness with the need to maintain high accuracy and efficiency in algorithmic outputs.</p>
      </sec>
      <sec>
        <title>Comparison With Previous Work</title>
        <p>Initiatives focused on the fair use of AI in health care and the assessment of bias risk in AI predictive models have been published in recent years. Notable initiatives include Consolidated Standards of Reporting Trials-Artificial Intelligence (CONSORT-AI) and Standard Protocol Items Recommendations for Interventional Trials-Artificial Intelligence (SPIRIT-AI) [<xref ref-type="bibr" rid="ref43">43</xref>], which provide guidelines for the ethical presentation of the results of trials conducted with AI in the health care field. To assess the risk of bias in diagnostic and prognostic prediction model studies, the “Prediction Model Risk of Bias Assessment Tool” (PROBAST) [<xref ref-type="bibr" rid="ref44">44</xref>] can be used. PROBAST consists of a list of signaling questions grouped into 4 categories: participants, predictors, outcomes, and analysis. This tool was used in a systematic scoping review to assess the quality of primary studies reporting applications of AI in CBPHC [<xref ref-type="bibr" rid="ref45">45</xref>].</p>
        <p>However, the objective of our scoping review differs; it is not to identify biases in the AI prediction models themselves, but rather to examine biases toward groups that are underrepresented or misrepresented in these AI models. An identified review has used and adapted PROBAST to assess related protected attributes, but the AI predictive models studied were hospital-based and not relevant to primary care [<xref ref-type="bibr" rid="ref11">11</xref>]. We also identified a scoping review protocol that focused on bias toward diverse groups in AI systems in primary care; however, unless we are mistaken, the results of this protocol have never been published [<xref ref-type="bibr" rid="ref10">10</xref>]. Another identified review aimed to assess age-related bias in AI but did not focus on primary health care [<xref ref-type="bibr" rid="ref46">46</xref>]. Finally, we identified another systematic review investigating health inequities in primary care, but it adopted a system-wide perspective, focusing on aspects such as patient consultation and effects on health systems [<xref ref-type="bibr" rid="ref47">47</xref>].</p>
        <p>To our knowledge, no other published review has the objectives of identifying (1) the bias mitigation strategies or methods in primary health care, (2) the diverse groups that are underrepresented or misrepresented, and (3) the results of bias mitigation and AI model performance.</p>
      </sec>
      <sec>
        <title>Strengths and Limitations</title>
        <p>The strengths of this review include results that can be translated into recommendations for various stakeholders, such as AI developers, researchers, and decision makers. However, we acknowledge some limitations. First, we limited our search strategy to the last 5 years before November 2022 and focused on 4 databases, which may have excluded some relevant studies. Second, the extraction of studies and quality assessment were conducted only once, although all of them were validated by at least one senior researcher. Third, due to the heterogeneity of the studies, we were unable to combine results through a quantitative synthesis and remained at a narrative level of reporting. Finally, our review primarily identified research from a North American setting, which reduces its transferability to other continents.</p>
      </sec>
      <sec>
        <title>Future Directions and Dissemination Plan</title>
        <p>This scoping review serves as the initial phase of the iterative project “Protecting and Engaging Vulnerable Populations in the Development of Predictive Models in Primary Health Care for Inclusive, Diverse, and Equitable AI” (PREMIA).</p>
        <p>Following the results of this review, we have developed a framework currently validated by a diverse group of experts, including clinicians, public health managers, primary care researchers, data scientists, and patient and citizen partners. This group is concentrating on existing AI predictive models and the bias mitigation strategies identified in our scoping review. Diverse populations, such as older adults, individuals with disabilities, and people from various racial and ethnic backgrounds, are actively involved in this second phase of PREMIA. We plan to prepare and submit a manuscript based on the findings of this Delphi study.</p>
        <p>In addition, in recognition of the rapid advancements in this field, we plan to update this literature review in 2027 using a similar search strategy. This iterative approach will allow us to refine our framework and track the progress of bias mitigation in AI models within primary health care. Indigenous peoples in Canada represent a group historically underrepresented in health research, leading to inequities [<xref ref-type="bibr" rid="ref3">3</xref>]. Since no other study has addressed bias related to Indigenous status, we collaborate with Indigenous representatives to develop methods for mitigating this bias in CBPHC algorithms.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>This review identifies strategies and methods for mitigating bias in primary health care algorithms, considers diverse groups based on their personal or protected attributes, and examines the results of bias attenuation and model performance. The findings suggest that biases toward diverse groups can be more effectively mitigated when data are open-sourced, multiple stakeholders are involved, and during the preprocessing stage of algorithm development. More empirical studies are needed, with a focus on including participants who embrace greater diversity, such as nonbinary gender identities or Indigenous peoples in Canada.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Database Search Strategies.</p>
        <media xlink:href="jmir_v27i1e60269_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 84 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Characteristics of Included Studies.</p>
        <media xlink:href="jmir_v27i1e60269_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 328 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Quality assessment: MMAT (Mixed-Methods Appraisal Tool) scores.</p>
        <media xlink:href="jmir_v27i1e60269_app3.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 10 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>PRISMA-ScR checklist.</p>
        <media xlink:href="jmir_v27i1e60269_app4.docx" xlink:title="DOCX File , 109 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CBPHC</term>
          <def>
            <p>community-based primary health care</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CONSORT-AI</term>
          <def>
            <p>Consolidated Standards of Reporting Trials-Artificial Intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EHR</term>
          <def>
            <p>electronic health record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">JBI</term>
          <def>
            <p>Joanna Briggs Institute</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">MMAT</term>
          <def>
            <p>Mixed-Methods Appraisal Tool</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PREMIA</term>
          <def>
            <p>Protecting and Engaging Vulnerable Populations in the Development of Predictive Models in Primary Health Care for Inclusive, Diverse and Equitable AI</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PRISMA</term>
          <def>
            <p>Preferred Reporting Items for Systematic reviews and Meta-Analyses</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">PRISMA-ScR</term>
          <def>
            <p>Preferred Reporting Items for Systematic reviews and Meta-Analyses extension for Scoping Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">PROBAST</term>
          <def>
            <p>Prediction model Risk Of Bias Assessment Tool</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">PROGRESS</term>
          <def>
            <p>Place of residence, race/ethnicity/culture/language, occupation, gender/sex, religion, education, socioeconomic status, and social capital</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">SES</term>
          <def>
            <p>socioeconomic status</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">SPIRIT-AI</term>
          <def>
            <p>Standard Protocol Items Recommendations for Interventional Trials-Artificial Intelligence</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The Protecting and Engaging Vulnerable Populations in the Development of Predictive Models in Primary Health Care for Inclusive, Diverse, and Equitable AI project is funded by the International Observatory on the Societal Impacts of AI and Digital Technology. The authors would like to thank Karine Gentelet for her contribution to the study’s design and for obtaining the funding.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>All data generated or analyzed during this study are included in this published article and <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref>-<xref ref-type="supplementary-material" rid="app3">3</xref>.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>MS, MPG, CR, VC, PD, JSP, and DD designed the study and obtained the funding. MS, MPG, SO, and FB developed the search strategy. MS, SO, MS, CR, MPG, VC, and FB participated in the screening of sources. MS, SO, and MPG conducted the data extraction. SO, MS, and MPG completed the first draft of the manuscript, and all authors participated in the revision and editing of the manuscript versions. All authors reviewed and approved the final manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rajkomar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hardt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Howell</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Chin</surname>
              <given-names>MH</given-names>
            </name>
          </person-group>
          <article-title>Ensuring fairness in machine learning to advance health equity</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <volume>169</volume>
          <issue>12</issue>
          <fpage>866</fpage>
          <lpage>872</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30508424"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-1990</pub-id>
          <pub-id pub-id-type="medline">30508424</pub-id>
          <pub-id pub-id-type="pii">2717119</pub-id>
          <pub-id pub-id-type="pmcid">PMC6594166</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Qu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Che</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ouyang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Bian</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Integration of cognitive tasks into artificial general intelligence test for large models</article-title>
          <source>iScience</source>
          <year>2024</year>
          <volume>27</volume>
          <issue>4</issue>
          <fpage>109550</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2589-0042(24)00772-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.isci.2024.109550</pub-id>
          <pub-id pub-id-type="medline">38595796</pub-id>
          <pub-id pub-id-type="pii">S2589-0042(24)00772-7</pub-id>
          <pub-id pub-id-type="pmcid">PMC11001637</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gurevich</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>El Hassan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>El Morr</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Equity within AI systems: what can health leaders expect?</article-title>
          <source>Healthc Manage Forum</source>
          <year>2023</year>
          <volume>36</volume>
          <issue>2</issue>
          <fpage>119</fpage>
          <lpage>124</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/abs/10.1177/08404704221125368?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/08404704221125368</pub-id>
          <pub-id pub-id-type="medline">36226507</pub-id>
          <pub-id pub-id-type="pmcid">PMC9976641</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alabdulmohsin</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Lucic</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A near-optimal algorithm for debiasing trained machine learning models</article-title>
          <source>ArXiv. Preprint posted online on August 23, 2022</source>
          <year>2022</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2106.12887"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Leeuwen</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>de Rooij</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schalekamp</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>van Ginneken</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Rutten</surname>
              <given-names>MJCM</given-names>
            </name>
          </person-group>
          <article-title>How does artificial intelligence in radiology improve efficiency and health outcomes?</article-title>
          <source>Pediatr Radiol</source>
          <year>2022</year>
          <volume>52</volume>
          <issue>11</issue>
          <fpage>2087</fpage>
          <lpage>2093</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34117522"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00247-021-05114-8</pub-id>
          <pub-id pub-id-type="medline">34117522</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00247-021-05114-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC9537124</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hanif</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mirza</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Malik</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Machine learning in primary care: potential to improve public health</article-title>
          <source>J Med Eng Technol</source>
          <year>2021</year>
          <volume>45</volume>
          <issue>1</issue>
          <fpage>75</fpage>
          <lpage>80</lpage>
          <pub-id pub-id-type="doi">10.1080/03091902.2020.1853839</pub-id>
          <pub-id pub-id-type="medline">33283565</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="web">
          <article-title>Canadian Institutes of Health Research</article-title>
          <source>Community-based primary health care</source>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cihr-irsc.gc.ca/e/43626.html">https://cihr-irsc.gc.ca/e/43626.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <article-title>Cochrane methods</article-title>
          <source>PROGRESS-Plus</source>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://methods.cochrane.org/equity/projects/evidence-equity/progress-plus">https://methods.cochrane.org/equity/projects/evidence-equity/progress-plus</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Delgado</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>de Manuel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Parra</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Moyano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rueda</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Guersenzvaig</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ausin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cruz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Casacuberta</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Puyol</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Bias in algorithms of AI systems developed for COVID-19: a scoping review</article-title>
          <source>J Bioeth Inq</source>
          <year>2022</year>
          <volume>19</volume>
          <issue>3</issue>
          <fpage>407</fpage>
          <lpage>419</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://hdl.handle.net/10668/20864"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11673-022-10200-z</pub-id>
          <pub-id pub-id-type="medline">35857214</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11673-022-10200-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC9463236</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>JX</given-names>
            </name>
            <name name-style="western">
              <surname>Somani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Murray</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sarkar</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <article-title>Health equity in artificial intelligence and primary care research: protocol for a scoping review</article-title>
          <source>JMIR Res Protoc</source>
          <year>2021</year>
          <volume>10</volume>
          <issue>9</issue>
          <fpage>e27799</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchprotocols.org/2021/9/e27799/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/27799</pub-id>
          <pub-id pub-id-type="medline">34533458</pub-id>
          <pub-id pub-id-type="pii">v10i9e27799</pub-id>
          <pub-id pub-id-type="pmcid">PMC8486995</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>HE</given-names>
            </name>
            <name name-style="western">
              <surname>Landers</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Subbaswamy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kharrazi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gaskin</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Saria</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A bias evaluation checklist for predictive models and its pilot application for 30-day hospital readmission models</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>8</issue>
          <fpage>1323</fpage>
          <lpage>1333</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35579328"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocac065</pub-id>
          <pub-id pub-id-type="medline">35579328</pub-id>
          <pub-id pub-id-type="pii">6586579</pub-id>
          <pub-id pub-id-type="pmcid">PMC9277650</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sasseville</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ouellet</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rhéaume</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Couture</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Després</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Paquette</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Gentelet</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Darmon</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bergeron</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Gagnon</surname>
              <given-names>MP</given-names>
            </name>
          </person-group>
          <article-title>Risk of bias mitigation for vulnerable and diverse groups in community-based primary health care artificial intelligence models: protocol for a rapid review</article-title>
          <source>JMIR Res Protoc</source>
          <year>2023</year>
          <volume>12</volume>
          <fpage>e46684</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchprotocols.org/2023//e46684/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46684</pub-id>
          <pub-id pub-id-type="medline">37358896</pub-id>
          <pub-id pub-id-type="pii">v12i1e46684</pub-id>
          <pub-id pub-id-type="pmcid">PMC10337340</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MDJ</given-names>
            </name>
            <name name-style="western">
              <surname>Marnie</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Pollock</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Munn</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Alexander</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>McInerney</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Khalil</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Updated methodological guidance for the conduct of scoping reviews</article-title>
          <source>JBI Evid Synth</source>
          <year>2020</year>
          <volume>18</volume>
          <issue>10</issue>
          <fpage>2119</fpage>
          <lpage>2126</lpage>
          <pub-id pub-id-type="doi">10.11124/JBIES-20-00167</pub-id>
          <pub-id pub-id-type="medline">33038124</pub-id>
          <pub-id pub-id-type="pii">02174543-202010000-00004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>Apply PCC</article-title>
          <source>University of South Australia</source>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://guides.library.unisa.edu.au/ScopingReviews/ApplyPCC">https://guides.library.unisa.edu.au/ScopingReviews/ApplyPCC</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <article-title>Veritas health innovation</article-title>
          <source>Covidence</source>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.covidence.org/">https://www.covidence.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <article-title>Handsearching</article-title>
          <source>Cochrane</source>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://training.cochrane.org/resource/tsc-induction-mentoring-training-guide/5-handsearching">https://training.cochrane.org/resource/tsc-induction-mentoring-training-guide/5-handsearching</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reyna</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sadr</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Perez Alday</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Will two do? Varying dimensions in electrocardiography: the physioNet/computing in cardiology challenge 2021 v1.0.3</article-title>
          <source>physionet.org</source>
          <access-date>2024-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://physionet.org/content/challenge-2021/1.0.3/">https://physionet.org/content/challenge-2021/1.0.3/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>VK</given-names>
            </name>
            <name name-style="western">
              <surname>Long</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Automatic assessment of mental health using phone metadata</article-title>
          <source>Proc. Assoc. Info. Sci. Tech</source>
          <year>2019</year>
          <volume>55</volume>
          <issue>1</issue>
          <fpage>450</fpage>
          <lpage>459</lpage>
          <pub-id pub-id-type="doi">10.1002/pra2.2018.14505501049</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Perez Alday</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Rad</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Reyna</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Sadr</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Dumitru</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Albert</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sameni</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>GD</given-names>
            </name>
          </person-group>
          <article-title>Age, sex and race bias in automated arrhythmia detectors</article-title>
          <source>J Electrocardiol</source>
          <year>2022</year>
          <volume>74</volume>
          <fpage>5</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jelectrocard.2022.07.007</pub-id>
          <pub-id pub-id-type="medline">35878534</pub-id>
          <pub-id pub-id-type="pii">S0022-0736(22)00094-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC11486543</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Arunachalam</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Silenzio</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>VK</given-names>
            </name>
          </person-group>
          <article-title>Fairness in mobile phone-based mental health assessment algorithms: exploratory study</article-title>
          <source>JMIR Form Res</source>
          <year>2022</year>
          <volume>6</volume>
          <issue>6</issue>
          <fpage>e34366</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2022/6/e34366/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/34366</pub-id>
          <pub-id pub-id-type="medline">35699997</pub-id>
          <pub-id pub-id-type="pii">v6i6e34366</pub-id>
          <pub-id pub-id-type="pmcid">PMC9240929</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hernandez-Boussard</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bozkurt</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ioannidis</surname>
              <given-names>JPA</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>NH</given-names>
            </name>
          </person-group>
          <article-title>MINIMAR (MINimum Information for Medical AI Reporting): developing reporting standards for artificial intelligence in health care</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2020</year>
          <volume>27</volume>
          <issue>12</issue>
          <fpage>2011</fpage>
          <lpage>2015</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32594179"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocaa088</pub-id>
          <pub-id pub-id-type="medline">32594179</pub-id>
          <pub-id pub-id-type="pii">5864179</pub-id>
          <pub-id pub-id-type="pmcid">PMC7727333</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>QN</given-names>
            </name>
            <name name-style="western">
              <surname>Fàbregues</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bartlett</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Boardman</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Cargo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dagenais</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gagnon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Griffiths</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Nicolau</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>O’Cathain</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rousseau</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vedel</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Pluye</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The mixed methods appraisal tool (MMAT) version 2018 for information professionals and researchers</article-title>
          <source>EFI</source>
          <year>2018</year>
          <volume>34</volume>
          <issue>4</issue>
          <fpage>285</fpage>
          <lpage>291</lpage>
          <pub-id pub-id-type="doi">10.3233/efi-180221</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>QN</given-names>
            </name>
            <name name-style="western">
              <surname>Fàbregues</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bartlett</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Boardman</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Cargo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dagenais</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gagnon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Griffiths</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Nicolau</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>O’Cathain</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rousseau</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Vedel</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Pluye</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Mixed methods appraisal tool (MMAT) version 2018: user guide</article-title>
          <source>Mixed Methods Appraisal Tool</source>
          <year>2018</year>
          <access-date>2024-04-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://content.iospress.com/articles/education-for-information/efi180221">https://content.iospress.com/articles/education-for-information/efi180221</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pollock</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MDJ</given-names>
            </name>
            <name name-style="western">
              <surname>Khalil</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>McInerney</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Alexander</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>C</given-names>
            </name>
            <collab>de Moraes</collab>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Pieper</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Saran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stern</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Munn</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Recommendations for the extraction, analysis, and presentation of results in scoping reviews</article-title>
          <source>JBI Evid Synth</source>
          <year>2023</year>
          <volume>21</volume>
          <issue>3</issue>
          <fpage>520</fpage>
          <lpage>532</lpage>
          <pub-id pub-id-type="doi">10.11124/JBIES-22-00123</pub-id>
          <pub-id pub-id-type="medline">36081365</pub-id>
          <pub-id pub-id-type="pii">02174543-990000000-00076</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lillie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zarin</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Colquhoun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Levac</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Horsley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hempel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McGowan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hartling</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aldcroft</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Garritty</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lewin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Macdonald</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Langlois</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Soares-Weiser</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Moriarty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tunçalp</surname>
              <given-names>Özge</given-names>
            </name>
            <name name-style="western">
              <surname>Straus</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>PRISMA extension for scoping reviews (PRISMA-ScR): checklist and explanation</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <volume>169</volume>
          <issue>7</issue>
          <fpage>467</fpage>
          <lpage>473</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/abs/10.7326/M18-0850?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-0850</pub-id>
          <pub-id pub-id-type="medline">30178033</pub-id>
          <pub-id pub-id-type="pii">2700389</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bhanot</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Qi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Erickson</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Guyon</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bennett</surname>
              <given-names>KP</given-names>
            </name>
          </person-group>
          <article-title>The problem of fairness in synthetic healthcare data</article-title>
          <source>Entropy (Basel)</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>9</issue>
          <fpage>1165</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=e23091165"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/e23091165</pub-id>
          <pub-id pub-id-type="medline">34573790</pub-id>
          <pub-id pub-id-type="pii">e23091165</pub-id>
          <pub-id pub-id-type="pmcid">PMC8468495</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fletcher</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Nakeshimana</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Olubeko</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Addressing fairness, bias, and appropriate use of artificial intelligence and machine learning in global health</article-title>
          <source>Front Artif Intell</source>
          <year>2020</year>
          <volume>3</volume>
          <fpage>561802</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33981989"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frai.2020.561802</pub-id>
          <pub-id pub-id-type="medline">33981989</pub-id>
          <pub-id pub-id-type="pii">561802</pub-id>
          <pub-id pub-id-type="pmcid">PMC8107824</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Foryciarz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pfohl</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Evaluating algorithmic fairness in the presence of clinical guidelines: the case of atherosclerotic cardiovascular disease risk estimation</article-title>
          <source>BMJ Health Care Inform</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>e100460</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://informatics.bmj.com/lookup/pmidlookup?view=long&#38;pmid=35396247"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjhci-2021-100460</pub-id>
          <pub-id pub-id-type="medline">35396247</pub-id>
          <pub-id pub-id-type="pii">bmjhci-2021-100460</pub-id>
          <pub-id pub-id-type="pmcid">PMC8996004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="web">
          <article-title>D-BIAS: a causality-based human-in-the-loop system for tackling algorithmic bias</article-title>
          <source>IEEE Journals &#38; Magazine &#124; IEEE Xplore</source>
          <access-date>2024-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/9903601/authors#authors">https://ieeexplore.ieee.org/document/9903601/authors#authors</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hane</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Wasserman</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Designing equitable health care outreach programs from machine learning patient risk scores</article-title>
          <source>Med Care Res Rev</source>
          <year>2023</year>
          <volume>80</volume>
          <issue>2</issue>
          <fpage>216</fpage>
          <lpage>227</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/abs/10.1177/10775587221098831?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/10775587221098831</pub-id>
          <pub-id pub-id-type="medline">35685000</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Juhn</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ryu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Malik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Romero-Brufau</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Weng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Sohn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sharp</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Halamka</surname>
              <given-names>JD</given-names>
            </name>
          </person-group>
          <article-title>Assessing socioeconomic bias in machine learning algorithms in health care: a case study of the HOUSES index</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>7</issue>
          <fpage>1142</fpage>
          <lpage>1151</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35396996"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocac052</pub-id>
          <pub-id pub-id-type="medline">35396996</pub-id>
          <pub-id pub-id-type="pii">6565895</pub-id>
          <pub-id pub-id-type="pmcid">PMC9196683</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khurshid</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reeder</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Harrington</surname>
              <given-names>LX</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sarma</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>SF</given-names>
            </name>
            <name name-style="western">
              <surname>Di Achille</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Diamant</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Turner</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>ES</given-names>
            </name>
            <name name-style="western">
              <surname>Haimovich</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Alusi</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Klarqvist</surname>
              <given-names>MDR</given-names>
            </name>
            <name name-style="western">
              <surname>Ashburner</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Diedrich</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ghadessi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mielke</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Eilken</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>McElhinney</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Derix</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Atlas</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ellinor</surname>
              <given-names>PT</given-names>
            </name>
            <name name-style="western">
              <surname>Philippakis</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Batra</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lubitz</surname>
              <given-names>SA</given-names>
            </name>
          </person-group>
          <article-title>Cohort design and natural language processing to reduce bias in electronic health records research</article-title>
          <source>NPJ Digit. Med</source>
          <year>2022</year>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>47</fpage>
          <pub-id pub-id-type="doi">10.1038/s41746-022-00590-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Martinez-Martin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Greely</surname>
              <given-names>HT</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <article-title>Ethical development of digital phenotyping tools for mental health applications: delphi study</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>7</issue>
          <fpage>e27343</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2021/7/e27343/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/27343</pub-id>
          <pub-id pub-id-type="medline">34319252</pub-id>
          <pub-id pub-id-type="pii">v9i7e27343</pub-id>
          <pub-id pub-id-type="pmcid">PMC8367187</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nong</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Raj</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Platt</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Integrating predictive models into care: facilitating informed decision-making and communicating equity issues</article-title>
          <source>Am J Manag Care</source>
          <year>2022</year>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>18</fpage>
          <lpage>24</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ajmc.com/pubMed.php?pii=88812"/>
          </comment>
          <pub-id pub-id-type="doi">10.37765/ajmc.2022.88812</pub-id>
          <pub-id pub-id-type="medline">35049257</pub-id>
          <pub-id pub-id-type="pii">88812</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Obermeyer</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Powers</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Vogeli</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mullainathan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>
          <source>Science</source>
          <year>2019</year>
          <volume>366</volume>
          <issue>6464</issue>
          <fpage>447</fpage>
          <lpage>453</lpage>
          <pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id>
          <pub-id pub-id-type="medline">31649194</pub-id>
          <pub-id pub-id-type="pii">366/6464/447</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Panigutti</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Perotti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Panisson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bajardi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Pedreschi</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>FairLens: auditing black-box clinical decision support systems</article-title>
          <source>Inf Process Manag</source>
          <year>2021</year>
          <volume>58</volume>
          <issue>5</issue>
          <fpage>102657</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ipm.2021.102657</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sylla</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Dankwa-Mullan</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Koski</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>Comparison of methods to reduce bias from clinical prediction models of postpartum depression</article-title>
          <source>JAMA Netw Open</source>
          <year>2021</year>
          <volume>4</volume>
          <issue>4</issue>
          <fpage>e213909</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33856478"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.3909</pub-id>
          <pub-id pub-id-type="medline">33856478</pub-id>
          <pub-id pub-id-type="pii">2778568</pub-id>
          <pub-id pub-id-type="pmcid">PMC8050742</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seker</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Talburt</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Greer</surname>
              <given-names>ML</given-names>
            </name>
          </person-group>
          <article-title>Preprocessing to address bias in healthcare data</article-title>
          <source>Stud Health Technol Inform</source>
          <year>2022</year>
          <volume>294</volume>
          <fpage>327</fpage>
          <lpage>331</lpage>
          <pub-id pub-id-type="doi">10.3233/SHTI220468</pub-id>
          <pub-id pub-id-type="medline">35612086</pub-id>
          <pub-id pub-id-type="pii">SHTI220468</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Straw</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Investigating for bias in healthcare algorithms: a sex-stratified analysis of supervised machine learning models in liver disease prediction</article-title>
          <source>BMJ Health Care Inform</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>e100457</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://informatics.bmj.com/lookup/pmidlookup?view=long&#38;pmid=35470133"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjhci-2021-100457</pub-id>
          <pub-id pub-id-type="medline">35470133</pub-id>
          <pub-id pub-id-type="pii">bmjhci-2021-100457</pub-id>
          <pub-id pub-id-type="pmcid">PMC9039354</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pencina</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Boulware</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Goldstein</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Observability and its impact on differential bias for clinical prediction models</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>5</issue>
          <fpage>937</fpage>
          <lpage>943</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35211742"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocac019</pub-id>
          <pub-id pub-id-type="medline">35211742</pub-id>
          <pub-id pub-id-type="pii">6535920</pub-id>
          <pub-id pub-id-type="pmcid">PMC9006687</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <source>PRISMA Flow diagram</source>
          <access-date>2024-04-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.prisma-statement.org/prisma-2020-flow-diagram">https://www.prisma-statement.org/prisma-2020-flow-diagram</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nazer</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Zatarah</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Waldrip</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ke</surname>
              <given-names>JXC</given-names>
            </name>
            <name name-style="western">
              <surname>Moukheiber</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Khanna</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Hicklen</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Moukheiber</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Moukheiber</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mathur</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Bias in artificial intelligence algorithms and recommendations for mitigation</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>6</issue>
          <fpage>e0000278</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37347721"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000278</pub-id>
          <pub-id pub-id-type="medline">37347721</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-22-00357</pub-id>
          <pub-id pub-id-type="pmcid">PMC10287014</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Rivera</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Calvert</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Denniston</surname>
              <given-names>AK</given-names>
            </name>
            <collab>SPIRIT-AICONSORT-AI Working Group</collab>
          </person-group>
          <article-title>Reporting guidelines for clinical trial reports for interventions involving artificial intelligence: the CONSORT-AI Extension</article-title>
          <source>BMJ</source>
          <year>2020</year>
          <volume>370</volume>
          <fpage>m3164</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bmj.com/lookup/pmidlookup?view=long&#38;pmid=32909959"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.m3164</pub-id>
          <pub-id pub-id-type="medline">32909959</pub-id>
          <pub-id pub-id-type="pmcid">PMC7490784</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wolff</surname>
              <given-names>RF</given-names>
            </name>
            <name name-style="western">
              <surname>Moons</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>Riley</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Whiting</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Westwood</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Reitsma</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Kleijnen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mallett</surname>
              <given-names>S</given-names>
            </name>
            <collab>PROBAST Group†</collab>
          </person-group>
          <article-title>PROBAST: a tool to assess the risk of bias and applicability of prediction model studies</article-title>
          <source>Ann Intern Med</source>
          <year>2019</year>
          <volume>170</volume>
          <issue>1</issue>
          <fpage>51</fpage>
          <lpage>58</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/abs/10.7326/M18-1376?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-1376</pub-id>
          <pub-id pub-id-type="medline">30596875</pub-id>
          <pub-id pub-id-type="pii">2719961</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abbasgholizadeh Rahimi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Légaré</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Archambault</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zomahoun</surname>
              <given-names>HTV</given-names>
            </name>
            <name name-style="western">
              <surname>Chandavong</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rheault</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>T Wong</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Langlois</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Couturier</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Salmeron</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Gagnon</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Légaré</surname>
              <given-names>Jean</given-names>
            </name>
          </person-group>
          <article-title>Application of artificial intelligence in community-based primary health care: systematic scoping review and critical appraisal</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>9</issue>
          <fpage>e29839</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/9/e29839/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29839</pub-id>
          <pub-id pub-id-type="medline">34477556</pub-id>
          <pub-id pub-id-type="pii">v23i9e29839</pub-id>
          <pub-id pub-id-type="pmcid">PMC8449300</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Donato-Woodger</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Nyrup</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Leslie</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lyn</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bianchi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rahimi</surname>
              <given-names>Sa</given-names>
            </name>
            <name name-style="western">
              <surname>Grenier</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Age-related bias and artificial intelligence: a scoping review</article-title>
          <source>Humanit Soc Sci Commun</source>
          <year>2023</year>
          <volume>10</volume>
          <issue>1</issue>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1057/s41599-023-01999-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1057/s41599-023-01999-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>d'Elia</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gabbay</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rodgers</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kierans</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Durrani</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Frith</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and health inequities in primary care: a systematic scoping review and framework</article-title>
          <source>Fam Med Community Health</source>
          <year>2022</year>
          <volume>10</volume>
          <issue>Suppl 1</issue>
          <fpage>e001670</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://fmch.bmj.com/lookup/pmidlookup?view=long&#38;pmid=36450391"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/fmch-2022-001670</pub-id>
          <pub-id pub-id-type="medline">36450391</pub-id>
          <pub-id pub-id-type="pii">fmch-2022-001670</pub-id>
          <pub-id pub-id-type="pmcid">PMC9716837</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
