<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e78500</article-id><article-id pub-id-type="doi">10.2196/78500</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>AI-Supported Digital Microscopy Diagnostics in Primary Health Care Laboratories: Scoping Review</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>von Bahr</surname><given-names>Joar</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Suutala</surname><given-names>Antti</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Diwan</surname><given-names>Vinod</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>M&#x00E5;rtensson</surname><given-names>Andreas</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Lundin</surname><given-names>Johan</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Linder</surname><given-names>Nina</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Global Public Health, Karolinska Institutet</institution><addr-line>Tomtebodav&#x00E4;gen 18 A</addr-line><addr-line>Solna</addr-line><country>Sweden</country></aff><aff id="aff2"><institution>Global Health and Migration Unit, Department of Women&#x2019;s and Children&#x2019;s Health, Uppsala University</institution><addr-line>Uppsala</addr-line><country>Sweden</country></aff><aff id="aff3"><institution>Institute for Molecular Medicine Finland (FIMM), HiLIFE, University of Helsinki</institution><addr-line>Helsinki</addr-line><country>Finland</country></aff><aff id="aff4"><institution>Department of Infectious Diseases, Uppsala University Hospital</institution><addr-line>Uppsala</addr-line><country>Sweden</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Kim</surname><given-names>Ho Heon</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Shaffi</surname><given-names>Shamnad Mohamed</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Joar von Bahr, MD, Department of Global Public Health, Karolinska Institutet, Tomtebodav&#x00E4;gen 18 A, Solna, 17165, Sweden, 46 708561007; <email>joar.von.bahr@ki.se</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>5</day><month>1</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e78500</elocation-id><history><date date-type="received"><day>05</day><month>06</month><year>2025</year></date><date date-type="rev-recd"><day>16</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>17</day><month>11</month><year>2025</year></date></history><copyright-statement>&#x00A9; Joar von Bahr, Antti Suutala, Vinod Diwan, Andreas M&#x00E5;rtensson, Johan Lundin, Nina Linder. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 5.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e78500"/><abstract><sec><title>Background</title><p>Digital microscopy combined with artificial intelligence (AI) is increasingly being implemented in health care, predominantly in advanced laboratory settings. However, AI-supported digital microscopy could be especially advantageous in primary health care settings, since such methods could improve access to diagnostics via automation and a decreased need for experts on-site. To our knowledge, no scoping or systematic review has previously examined the use of AI-supported digital microscopy in primary health care laboratories, and a scoping review could guide future research by providing insights into the challenges of implementing these novel methods.</p></sec><sec><title>Objective</title><p>This scoping review aimed to map published peer-reviewed studies on AI-supported digital microscopy in primary health care laboratories to generate an overview of the subject.</p></sec><sec sec-type="methods"><title>Methods</title><p>A systematic search of the databases PubMed, Web of Science, Embase, and IEEE was conducted on October 2, 2024. The inclusion criteria in the scoping review were based on 3 concepts: using digital microscopy, AI, and comparison of the results with a standard diagnostic system, and 1 context, being performed in primary health care laboratories. Additional inclusion criteria were peer-reviewed diagnostic accuracy studies published in English, performed on humans and achieving a sample-level diagnosis. The study selection and data extraction were performed by 2 independent researchers (JVB and AS), and cases of disagreement were resolved through discussion with a third researcher (NL). The methodology is in accordance with the Joanna Briggs Institute methodology for scoping reviews.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 3403 papers were screened during the paper identification process, of which 22 (0.6%) were included in the scoping review. The samples analyzed were as follows: blood (n=12) for blood cell and malaria detection, urine (n=4) for urinalysis and parasite detection, cytology of atypical oral (n=1) and cervical cells (n=2), stool (n=2) for parasite detection, and sputum (n=1) for ferning patterns indicating inflammation. Both conventional (n=15) and specifically developed methods (n=7) were used in sample preparation. The AI-supported digital microscopy achieved comparable diagnostic accuracy to the reference standard for complete blood counts, malaria detection, identification of stool and genitourinary parasites, screening for oral and cervical cellular atypia, detection of pulmonary inflammation, and urinalysis. Furthermore, AI-supported digital microscopy achieved higher sensitivity than manual microscopy in 6/7 (85.7%) studies that used a reference standard that allowed for this comparison.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>AI-supported digital microscopy achieved comparable diagnostic accuracy to the reference standard for diagnosing multiple targets in primary health care laboratories and may be particularly advantageous for improving diagnostic sensitivity. With further research addressing challenges such as scalability and cost-effectiveness, AI-supported digital microscopy could improve access to diagnostics, especially in expert-scarce and resource-limited settings.</p></sec><sec sec-type="registered-report"><title>International Registered Report Identifier (IRRID)</title><p>RR2-10.2196/58149</p></sec></abstract><kwd-group><kwd>AI</kwd><kwd>artificial intelligence</kwd><kwd>convolutional neural network</kwd><kwd>deep learning</kwd><kwd>diagnosis</kwd><kwd>digital diagnostics</kwd><kwd>machine learning</kwd><kwd>pathology</kwd><kwd>primary health care</kwd><kwd>whole slide images</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Artificial intelligence (AI) in the form of machine learning has successfully been applied to image-based diagnostics within several medical fields [<xref ref-type="bibr" rid="ref1">1</xref>]. In parallel, manual microscopy remains a cornerstone of diagnostic practice in resource-limited settings and at the primary health care (PHC) level due to its low cost, versatility, and ability to provide direct visualization of pathogens and cellular changes. It is widely used for the diagnosis of infectious diseases such as malaria and intestinal parasitic infections, as well as for full blood counts and analysis of cervical and oral cytological samples and fine needle aspirates [<xref ref-type="bibr" rid="ref2">2</xref>]. Despite its usefulness and broad applicability, microscopy is highly dependent on the availability of trained personnel and adequate infrastructure, which are often limited in such settings, leading to variability in diagnostic quality and coverage [<xref ref-type="bibr" rid="ref3">3</xref>]. These limitations have motivated the development of AI-driven approaches, where deep learning methods can assist or automate microscopy-based diagnostics to improve accuracy and accessibility. Deep learning approaches, particularly convolutional neural networks (CNNs) and vision transformers, have become the dominant architectures for image classification and interpretation in medical imaging [<xref ref-type="bibr" rid="ref4">4</xref>]. CNNs extract visual features, enabling recognition of complex structures such as cells, pathogens, and tissue patterns, while vision transformers can capture contextual relationships between distant structures [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>Leveraging these methods for AI-based microscopy within laboratory workflows has the potential to automate processes, increase productivity, and improve diagnostic accuracy [<xref ref-type="bibr" rid="ref6">6</xref>]. Multiple AI-based diagnostic systems have been approved for clinical use, for example, for cervical cancer screening and prostate cancer diagnostics [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref8">8</xref>]. Most of these AI-based diagnostic systems depend on expensive, high-end digital imaging instruments and require advanced laboratory infrastructure and are therefore not feasible for use in PHC laboratories [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. However, the development of less expensive, portable digital microscope scanners has enabled research on the use of AI-supported diagnostic systems suitable for PHC laboratories [<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>A PHC laboratory, also known as a tier 1 laboratory, can be defined as a laboratory primarily serving outpatients by providing point-of-care (POC) tests and manual microscopy of specimens with simple preparations. An additional responsibility is preparing fine needle aspirations and other simple tissue specimens that are later dispatched to a tier 2 laboratory in a first-level hospital for analysis. The PHC laboratories work with a small budget compared with more advanced laboratories and are generally managed by a laboratory technician supervised by a pathologist from a distance [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>The World Health Organization has emphasized the importance of providing diagnostics near the patient to enhance the accuracy and timeliness of diagnoses, improve clinical decision-making, and reduce the risk of diagnostic errors [<xref ref-type="bibr" rid="ref12">12</xref>]. The implementation of AI-supported digital microscopy could help address these challenges at PHC laboratories. To begin with, since PHC laboratories lack access to pathology expertise, application of AI could enable more analyses on-site, consequently increasing both the availability and speed of diagnostics [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Increased speed and access to diagnostics through AI and telemedicine could reduce health inequities by strengthening diagnostic capacity, particularly in low- and middle-income countries (LMICs) and also in sparsely populated regions of high-income countries [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. In addition, a systematic review showed that the implementation of AI-supported diagnostics for microscopy increased the effectiveness of laboratory personnel [<xref ref-type="bibr" rid="ref6">6</xref>]. Although there is a global shortage of microscopy experts, the shortage of these specialists is more severe in LMICs; therefore, AI-supported digital microscopy may be especially advantageous in strengthening health systems and reducing the diagnostic gaps in these settings [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>There are several diseases where AI-supported digital microscopy diagnostics in PHC laboratories could be advantageous, and studies have been performed on, for example, screening of oral and cervical cancer as well as targeting parasitic infections, such as schistosomiasis and infections caused by soil-transmitted helminths [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Although the targeted diseases differed in these studies, researchers often encountered similar challenges due to commonalities in the methodologies applied, and a review mapping these challenges could provide valuable insights.</p><p>A preliminary search of the databases PubMed and Cochrane was performed to investigate whether any scoping or systematic review had been performed on AI-supported digital microscopy in PHC laboratories. A few related reviews were found. One systematic review of AI diagnostics for oral cancer [<xref ref-type="bibr" rid="ref20">20</xref>] overlaps to some extent with our review; however, since it focuses on a single disease, it does not provide an overview of the development of AI-supported digital microscopy in PHC laboratories. Another systematic review evaluating the application of AI to whole slide images of tissue samples stained with hematoxylin and eosin was also identified [<xref ref-type="bibr" rid="ref21">21</xref>]. This paper presents the current state of knowledge on AI implementation in pathology within high-end laboratories.</p><p>While these reviews are similar to this scoping review, they do not provide an overview of which diseases have been investigated in AI-supported digital microscopy and the disease-agnostic challenges faced in PHC laboratories. Furthermore, the development of more affordable scanners and improved AI, along with persistent workforce and resource constraints, makes a scoping review timely. A scoping review performed on AI-supported digital microscopy in PHC laboratories would, therefore, provide a valuable overview of the subject and collate knowledge that could guide future implementation.</p><p>This scoping review aimed to systematically review published peer-reviewed studies that have been performed related to AI-supported digital microscopy in PHC laboratories and specifically address the following questions: (1) In which diseases and for which conditions and targets has AI-based microscopy been applied for diagnostics within PHC laboratories? (2) What methods have been used in acquiring microscopy images to train and analyze AI models for diagnostics? (3) What AI models and training approaches have been applied? (4) How has the AI-supported diagnostic system performed compared with expert microscopists with regard to diagnostic accuracy?</p></sec><sec id="s1-2"><title>Review Question</title><p>What peer-reviewed studies have been published on implementing AI-supported digital microscopy in PHC laboratories? What methods have been used, what issues have been faced, and what results have been achieved?</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>The scoping review was conducted in accordance with the Joanna Briggs Institute methodology for scoping reviews updated in 2020 [<xref ref-type="bibr" rid="ref22">22</xref>]. A PRISMA-ScR (Preferred Reporting Items for Systematic reviews and Meta-Analyses extension for Scoping Reviews) checklist is included [<xref ref-type="bibr" rid="ref23">23</xref>]. A protocol was initially published in the Open Science Framework and later in the peer-reviewed journal JMIR Research Protocols [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. The inclusion and exclusion criteria are shown in <xref ref-type="table" rid="table1">Table 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Inclusion and exclusion criteria for identified studies.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study characteristic</td><td align="left" valign="bottom">Inclusion criteria</td><td align="left" valign="bottom">Exclusion criteria</td></tr></thead><tbody><tr><td align="left" valign="top">Language</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>English</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Non-English</p></list-item></list></td></tr><tr><td align="left" valign="top">Study design</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Published peer-reviewed studies</p></list-item><list-item><p>Diagnostic test accuracy studies</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Non&#x2013;peer reviewed studies</p></list-item><list-item><p>Not diagnostic test accuracy studies</p></list-item></list></td></tr><tr><td align="left" valign="top">Population</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Humans</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Studies performed on animals</p></list-item></list></td></tr><tr><td align="left" valign="top">Concept</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> techniques applied as a diagnostic tool on microscopy</p></list-item><list-item><p>Final slide-level diagnosis was performed and compared with a standard microscopist</p></list-item><list-item><p>Outcome valuable for clinicians</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Studies that applied AI models on images not conventionally analyzed in microscopy</p></list-item><list-item><p>No final slide diagnosis</p></list-item></list></td></tr><tr><td align="left" valign="top">Context</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Performed at primary health care laboratory (tier 1 laboratory)</p></list-item><list-item><p>No pathologist needed on site</p></list-item><list-item><p>Samples such as stool, urine, blood, cytology smears, and fine needle aspirations of superficial tissue (eg, from breast lumps) prepared with simple methods</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Studies performed in an advanced laboratory setting</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-2"><title>Eligibility Criteria</title><sec id="s2-2-1"><title>Participants</title><p>This scoping review considered studies on human participants. No exclusion was made based on age, sex, economic status, or nationality.</p></sec><sec id="s2-2-2"><title>Concept</title><p>The studies included in this scoping review fulfilled 3 concept criteria. First, the studies needed to be performed on images obtained with an imaging instrument built to automatically capture microscopy sample areas large enough for diagnostic purposes. Furthermore, the imaging instrument used must be operated in a way that does not require human expertise to determine what areas of the slide should be captured. Microscopy was defined as deploying a light source, optical lenses, and a digital camera to acquire a magnified image of a biological sample, generating an image conventionally interpreted by a microscopist.</p><p>Second, the studies needed to use AI when analyzing the microscopy images. AI was defined as a computer system that is trained to perform a task that typically requires human intelligence. No exclusion was made based on the architecture of the AI model or the dataset used for training. This analysis of the microscopy images could be performed on-site or in a remote cloud environment.</p><p>Third, the studies needed to compare the AI-supported diagnostic system with a standard diagnostic system. A diagnostic system was defined as all the steps included in the diagnostic process, from sample collection to the acquisition of results. The result needed to be sufficient to reach a diagnosis at the subject level.</p></sec><sec id="s2-2-3"><title>Context</title><p>The included studies needed to be performed in a PHC laboratory setting. To be defined as a PHC laboratory, also known as a tier 1 laboratory, the laboratory needed to fulfill 2 criteria. First, regarding staffing, the laboratory must be run by a laboratory technician, not requiring a pathologist on-site. Second, the sample preparations could not exceed the capabilities of a PHC laboratory. Acceptable samples collected included stool, urine, blood, cytology smears, and fine needle aspirations of superficial and easily accessible tissues (eg, from breast lumps and superficial lymph nodes). The sample staining procedure must be possible to perform manually without advanced laboratory equipment such as a microtome or tissue processor [<xref ref-type="bibr" rid="ref2">2</xref>]. Sample procedures that fulfill these criteria include Kato-Katz thick stool smears, blood smears, centrifuged urine samples, Papanicolaou-stained cervical or oral smears, and hematoxylin and eosin&#x2013;stained fine needle cytology smears [<xref ref-type="bibr" rid="ref2">2</xref>]. Since the context of PHC laboratories in this scoping review is based on human medicine, the exclusion criteria and initial search strategy were changed to exclude veterinary medicine, which was included in the initial protocol published on Open Science Framework [<xref ref-type="bibr" rid="ref24">24</xref>]. This adjustment was made before submitting the protocol to JMIR Research Protocols to focus the scoping review specifically on challenges in implementing AI-supported microscopy in human health care [<xref ref-type="bibr" rid="ref25">25</xref>].</p></sec></sec><sec id="s2-3"><title>Types of Sources</title><p>All types of diagnostic test accuracy studies were included. Because data collection in diagnostic test accuracy studies can be both retrospective and prospective, studies using either approach were included. In addition, studies using both paired and random designs for reference standards were included [<xref ref-type="bibr" rid="ref26">26</xref>]. The included studies had to be published in English.</p></sec><sec id="s2-4"><title>Search Strategy</title><p>The search strategy was designed to identify peer-reviewed published papers. An initial limited search of PubMed and Cochrane was undertaken to identify papers on the topic. Search blocks were created for the final search based on terms used in the identified papers. The search blocks were developed to find papers containing the 2 concepts, microscopy and AI, as well as the context specification of being in a PHC setting, with 1 block created for each. The databases searched were PubMed, Web of Science, Embase, and IEEE, and a detailed description of the search strategy is given in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The search was performed on October 2, 2024. The reference lists and all the papers citing the included papers were gathered through the SpiderCite tool on December 3, 2024, and included in the review process [<xref ref-type="bibr" rid="ref27">27</xref>].</p></sec><sec id="s2-5"><title>Study Selection</title><p>Following the search, all identified papers were compiled in a reference management software system, Zotero (version 6.0.20, Digital Scholar; January 13, 2023, opensource) and duplicates removed. Following the pilot test, titles and abstracts were screened by 2 independent reviewers (JvB and AS) for assessment against the inclusion and exclusion criteria using Covidence systematic review software (Veritas Health Innovation, 2024) [<xref ref-type="bibr" rid="ref28">28</xref>]. During this step, the Cohen &#x03BA; agreement was 0.59. All disagreements between JvB and AS were resolved by NL, who provided the deciding vote and could consult the other screeners for their rationale. Thereafter, the full texts of the remaining papers were assessed in detail against the inclusion criteria by 2 independent reviewers (JvB and AS). During full text screening, the Cohen &#x03BA; agreement was 0.75 for the database search and 0.36 for the citation search. Two reasons caused 17 out of 21 disagreements in the citation search and were resolved through discussions between JvB, AS, JL, and NL. The first issue concerned whether urine analyzers such as Iris iQ200 or Sysmex UF-100 fulfilled the PHC criteria: it was concluded that they did not, as these devices perform advanced sample preprocessing within the machine [<xref ref-type="bibr" rid="ref29">29</xref>]. The second issue concerned whether handcrafted feature classification qualified as AI: it was concluded that it did not, as it does not involve AI training. With these 2 issues resolved, the citation search had a Cohen &#x03BA; agreement of 0.79.</p></sec><sec id="s2-6"><title>Data Charting and Synthesis</title><p>Data were extracted from the studies included in the scoping review by 2 reviewers (JvB and AS) using a data extraction tool developed with Covidence systematic review software. The predeveloped extraction tool can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. Initially, the extraction was performed by JvB. Afterward, the extracted information was checked by AS. All disagreements were resolved through discussion between JvB and AS. When questions arose regarding an original paper, the corresponding author of that manuscript was contacted. The findings are presented narratively and additionally in a table format based on the extraction tool. The information from the extraction tool was split into 3 tables and 1 figure to increase readability. The figure contains a simplified overview of the studies, the first table summarizes the process from sample collection to scanning, the second table shows information on the AI analysis pipeline and training data, and the third table reports the study outcomes. Based on the information extracted to the tables, a narrative description was written to provide an overview of the mapped information. The studies were grouped based on the sample type investigated and the disease targeted as per the first objective of the study: to map in which diseases and for which conditions and targets has AI-based microscopy been applied for diagnostics within PHC laboratories.</p></sec><sec id="s2-7"><title>Critical Appraisal of Results</title><p>The QUADAS-2 tool was applied to investigate the bias of the included studies. This tool was developed to assess the risk of bias for diagnostic accuracy studies in 4 areas: patient selection, index test, reference standard, and flow and timing [<xref ref-type="bibr" rid="ref30">30</xref>]. The results are shown in the &#x201C;Results&#x201D; section, and the form used can be found in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>In total, 3403 papers were screened during the paper identification process, of which 22 (0.6%) were included in the scoping review. The results of the search and the study inclusion process are reported in full in a PRISMA (Preferred Reporting Items for Systematic reviews and Meta-Analyses) flow diagram (<xref ref-type="fig" rid="figure1">Figure 1</xref>) [<xref ref-type="bibr" rid="ref31">31</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Flowchart for study inclusion.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78500_fig01.png"/></fig><p>The oldest included study was published in 2014, while the remaining studies were published in 2018 or later, with 9 out of the 22 (40.9%) studies published in 2024. The papers were published in 15 different journals with the most common being <italic>Malaria Journal</italic> (n=4) and <italic>PLOS One</italic> (n=4). The most analyzed samples were blood (n=12), followed by urine (n=4), cytology (n=3), stool (n=2), and sputum (n=1). Different parasites (malaria, intestinal, or genitourinary parasites) were the most common targets (n=13), followed by blood cells (n=4), atypical cervical cells (n=2), atypical oral cells (n=1), and urine particles, such as cells (n=1) and crystalline ferning patterns in sputum (n=1). Detection of these targets was used for multiple diseases and conditions; complete blood counts (CBCs) and urinalysis were used for both organ-specific and systemic diseases, parasite detection for corresponding infections, atypical cells for screening and detection of cancer, and ferning patterns for identifying pulmonary inflammation in patients with COVID-19. An overview of all studies is shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Overview of the included studies. AI better than microscopy: White = No comparison, Yes = Higher/same sensitivity and specificity, Mix = Higher sensitivity and lower specificity, and No = Lower sensitivity and specificity. Number of samples in the test set: k=1000. Conditional formatting was applied to all numerical values, with high values shaded green and low values shaded yellow. AI: artificial intelligence; CBC: complete blood count; D: Downey cells; F: Ferning patterns indicative of inflammation; S: sputum; U: urinalysis.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78500_fig02.png"/></fig></sec><sec id="s3-2"><title>Sample Preparation and Scanning</title><p>Out of the 22 included studies, 12 relied solely on manual preparation methods and 3 used centrifuges. The remaining studies (n=7) used cartridges that simplified and eliminated manual steps. Both in-house&#x2013;built and commercially available scanners such as Grundium, MiLab, and Motic EasyScan GO were used. The lowest numerical aperture used was 0.1 and the highest was 1.4. Several scanners used both autofocus algorithms and z-stacking to avoid out-of-focus areas. In the 5 studies reporting the time from sample collection to diagnosis using AI-supported digital microscopy, it was 20&#x2010;40 minutes, but there was also a study reporting that it took more than 50 minutes for the scanning and AI analysis (<xref ref-type="table" rid="table2">Table 2</xref>) and a more detailed table in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Time for analysis and sample processing for the included studies.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Sample</td><td align="left" valign="bottom">Target</td><td align="left" valign="bottom">Sample preparation</td><td align="left" valign="bottom">Sample scanning</td><td align="left" valign="bottom">Time for analysis</td></tr></thead><tbody><tr><td align="left" valign="top">Bachar et al (2021) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">CBC<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="left" valign="top">Cartridge with 2 stains</td><td align="left" valign="top">No retrievable magnification and resolution</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Gasparin et al (2023) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">CBC</td><td align="left" valign="top">Dual-chamber cartridge with 2 stains</td><td align="left" valign="top">No retrievable magnification and resolution</td><td align="left" valign="top">Total: 30&#x2010;40 minutes</td></tr><tr><td align="left" valign="top">Gasparin et al (2022) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">CBC</td><td align="left" valign="top">Dual-chamber cartridge with 2 stains</td><td align="left" valign="top">No retrievable magnification and resolution</td><td align="left" valign="top">Total: 30&#x2010;40 minutes</td></tr><tr><td align="left" valign="top">Akisin et al (2023) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Downey cells</td><td align="left" valign="top">Manual blood smears stained with May-Gr&#x00FC;nwald and Giemsa</td><td align="left" valign="top">100&#x00D7; with oil immersion</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Hamid et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Cartridge with Giemsa staining</td><td align="left" valign="top">A resolution similar to 50&#x00D7; microscopy [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Total: &#x003C;30 minutes</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2020) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Manual blood smears stained with DAPI<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></td><td align="left" valign="top">A resolution of 0.9 &#x00B5;m</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Bae et al (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Cartridge with Giemsa staining</td><td align="left" valign="top">A resolution similar to 50&#x00D7;</td><td align="left" valign="top">Total: &#x003C;30 minutes [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]; Scanning: 7&#x2010;10 minutes</td></tr><tr><td align="left" valign="top">Ewnetu et al (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Cartridge with Giemsa staining</td><td align="left" valign="top">A resolution similar to 50&#x00D7; [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Total: circa 20 minutes</td></tr><tr><td align="left" valign="top">Das et al (2022) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Manual blood smears stained with Giemsa</td><td align="left" valign="top">40&#x00D7; (NA<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup> 0.75)</td><td align="left" valign="top">Scanning and AI<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup> analysis: 20&#x2010;30 minutes</td></tr><tr><td align="left" valign="top">Torres et al (2018) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Manual blood smears stained with Giemsa</td><td align="left" valign="top">100&#x00D7; with oil immersion (NA 1.25)</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Linder et al (2014) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Thin blood smears stained with Giemsa</td><td align="left" valign="top">63&#x00D7; with oil immersion (NA 1.4)</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Horning et al (2021) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Blood</td><td align="left" valign="top">Malaria parasites</td><td align="left" valign="top">Manual blood smears stained with Giemsa</td><td align="left" valign="top">40x (NA 0.75)</td><td align="left" valign="top">Scanning and AI analysis 54 minutes</td></tr><tr><td align="left" valign="top">Stegm&#x00FC;ller et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Cervical cytology</td><td align="left" valign="top">Cellular atypia</td><td align="left" valign="top">SurePath procedure with Papanicolaou stain</td><td align="left" valign="top">40&#x00D7; (NA 0.75)</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2021) [<xref ref-type="bibr" rid="ref9">9</xref>]</td><td align="left" valign="top">Cervical cytology</td><td align="left" valign="top">Cellular atypia</td><td align="left" valign="top">Conventional Papanicolaou smears</td><td align="left" valign="top">20&#x00D7; (NA 0.4)</td><td align="left" valign="top">Scanning: 5&#x2010;10 minutes; uploading 10&#x2010;40 minutes</td></tr><tr><td align="left" valign="top">Sunny et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Oral cytology</td><td align="left" valign="top">Cellular atypia</td><td align="left" valign="top">Manual liquid-based cytology, stained with H&#x0026;E<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup> [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">20&#x00D7; (NA 0.4) [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">AI analysis: 10 minutes</td></tr><tr><td align="left" valign="top">Ghaderinia et al (2024) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Sputum</td><td align="left" valign="top">Ferning patterns</td><td align="left" valign="top">Sedimented unstained sputum samples</td><td align="left" valign="top">40&#x00D7; magnification</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Soares et al (2024) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Stool</td><td align="left" valign="top">Intestinal parasites</td><td align="left" valign="top">Fecal samples with centrifugation, flotation, and sedimentation [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">No retrievable magnification and resolution</td><td align="left" valign="top">AI analysis: circa 3 minutes</td></tr><tr><td align="left" valign="top">Lundin et al (2024) [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Stool</td><td align="left" valign="top">Soil-transmitted helminths</td><td align="left" valign="top">Kato-Katz thick smears</td><td align="left" valign="top">20&#x00D7; (NA 0.4)</td><td align="left" valign="top">Scanning 5&#x2010;10; uploading 10&#x2010;20 minutes; AI analysis 5 minutes</td></tr><tr><td align="left" valign="top">Sahu et al (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Urine</td><td align="left" valign="top">Urinalysis</td><td align="left" valign="top">Cartridge that concentrates the urine through 5 minutes of sedimentation</td><td align="left" valign="top">40&#x00D7; (NA 0.65)</td><td align="left" valign="top">No retrievable information</td></tr><tr><td align="left" valign="top">Meulah et al (2022) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Urine</td><td align="left" valign="top">Schistosoma</td><td align="left" valign="top">A membrane capturing filtered urine particles</td><td align="left" valign="top">4&#x00D7; (NA 0.1)</td><td align="left" valign="top">Scanning: 12 minutes; AI analysis: 5 minutes</td></tr><tr><td align="left" valign="top">Oyibo et al (2022) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Urine</td><td align="left" valign="top">Schistosoma</td><td align="left" valign="top">A membrane capturing filtered urine particles</td><td align="left" valign="top">4&#x00D7; (NA 0.1)</td><td align="left" valign="top">Scanning: 12 minutes; AI analysis: 10&#x2010;12 minutes</td></tr><tr><td align="left" valign="top">Meulah et al (2024) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Urine</td><td align="left" valign="top">Schistosoma</td><td align="left" valign="top">A membrane capturing filtered urine particles</td><td align="left" valign="top">4&#x00D7; (NA 0.1)</td><td align="left" valign="top">Scanning and AI analysis: 25 minutes</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>CBC: complete blood count.</p></fn><fn id="table2fn2"><p><sup>b</sup>DAPI: 4',6-diamidino-2-phenylindole.</p></fn><fn id="table2fn3"><p><sup>c</sup>NA: numerical aperture.</p></fn><fn id="table2fn4"><p><sup>d</sup>AI: artificial intelligence.</p></fn><fn id="table2fn5"><p><sup>e</sup>H&#x0026;E: hematoxylin and eosin.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Training Data and AI Analysis Pipeline</title><p>For training AI models, most studies used in-house collected and annotated datasets of varying sizes; some had hundreds of target objects in their dataset, whereas others had hundreds of thousands. Many studies reported using pretrained neural networks with different datasets such as COCOtrain2017 and ImageNet for training [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. One study used unlabeled data from their collection for unsupervised pretraining and incorporated publicly available datasets [<xref ref-type="bibr" rid="ref44">44</xref>].</p><p>The AI analysis pipeline for all included studies can be summarized as follows: a digitized microscopy sample was provided as input, fields-of-view (FOVs) were analyzed, FOV results were aggregated to produce a slide-level diagnosis, and this diagnosis served as the output (<xref ref-type="fig" rid="figure3">Figure 3</xref>). The digitized sample used as input could consist of either whole-slide images or multiple FOVs captured from the physical slide. The FOV analysis involved both the identification and the classification of specific targets; however, not all studies used this first identification of suspicious FOVs (regions of interest).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Visualization of the artificial intelligence analysis pipeline. As an illustrative case, the pipeline is applied to a digitized fecal smear with <italic>Ascaris lumbricoides</italic> parasite eggs.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78500_fig03.png"/></fig><p>An initial detection of suspicious FOVs was described in half of the studies; when this was applied, algorithmic approaches, shallow CNNs, or support vector machines (SVMs) were used. The purpose of performing this initial identification of FOVs of interest was to reduce the number of FOVs that needed to be analyzed by more computationally expensive AI algorithms. An additional advantage of performing an initial detection of suspicious FOVs was that the FOVs were more homogeneous in content and quality, which can improve the accuracy of the AI classifier. All studies used AI for the FOV classification step, predominantly CNNs, except for the oldest study that used an SVM [<xref ref-type="bibr" rid="ref42">42</xref>]. One approach was to use multiple classification steps, for example, by using SVM or shallow CNNs to classify targets and then reclassifying those with higher uncertainty with deeper CNNs [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref47">47</xref>].</p><p>To achieve the slide-level diagnosis from the FOV analysis results, multiple methods were used; for example, classifying slides with any number of positive targets as positive, using different cutoffs based on confidence or number of findings, or using AI-based methods such as SVMs and multiple instance learning (<xref ref-type="table" rid="table3">Table 3</xref>).</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Artificial intelligence model training and architecture for the included studies.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Sample and target</td><td align="left" valign="bottom">Samples in training set</td><td align="left" valign="bottom">AI<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> model architecture and training</td></tr></thead><tbody><tr><td align="left" valign="top">Bachar et al (2021) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Blood and CBC<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">No retrievable information</td><td align="left" valign="top">AI model with separate pipelines for platelets, RBCs<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup>, and WBCs<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup> (1) algorithmically identifies candidates, and (2) candidates categorized by specialized CNNs<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup> and machine learning algorithms</td></tr><tr><td align="left" valign="top">Gasparin et al (2023) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Blood and CBC</td><td align="left" valign="top">Expert-verified training data gathered throughout development; no further retrievable information</td><td align="left" valign="top">AI model with CNN architecture using the YOLO<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup> framework</td></tr><tr><td align="left" valign="top">Gasparin et al (2022) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Blood and CBC</td><td align="left" valign="top">Expert-verified training data gathered throughout development; no further retrievable information [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">AI model with CNN architecture using the YOLO framework</td></tr><tr><td align="left" valign="top">Akisin et al (2023) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Blood and Downey cells</td><td align="left" valign="top">15,885 expert-annotated WBCs containing 172 Downey cells</td><td align="left" valign="top">AI model with YOLOv4-tiny-based framework with spatial attention using average and maximum pooling along the channel axis</td></tr><tr><td align="left" valign="top">Hamid et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">No retrievable information</td><td align="left" valign="top">AI model with (1) U-Net segmenting RBCs, (2) a 3-layer CNN removing normal RBCs, (3) a 23-layer CNN for detecting parasites, and (4) 1 positive object sufficient for slide positivity [<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2020) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">25 thin blood smears with annotated trophozoites (n=5059) and other fluorescence signals (n=856)</td><td align="left" valign="top">AI model with (1) Circle Hough Transform identifying RBCs, (2) fluorescence signals from within the detected RBCs are used, and (3) RBCs with fluorescence signals were analyzed with a CNN (GoogLeNet)</td></tr><tr><td align="left" valign="top">Bae et al (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">No retrievable information</td><td align="left" valign="top">AI model with (1) U-Net segmenting RBCs, (2) a 3-layer CNN removing normal RBCs, (3) a 23-layer CNN for detecting parasites, and (4) 1 positive object sufficient for slide positivity</td></tr><tr><td align="left" valign="top">Ewnetu et al (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">No retrievable information</td><td align="left" valign="top">AI model with (1) U-Net segmenting RBCs, (2) a 3-layer CNN removing normal RBCs, (3) a 23-layer CNN for detecting parasites, and (4) 1 positive object sufficient for slide positivity [<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">Das et al (2022) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">Subset of 1452 blood samples and 956,531 annotated parasite objects [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">AI model analyzes only thick region with (1) potential parasites identified through dynamic thresholding and SVM<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup>, (2) CNN (VGG<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup> architecture) classifies parasites, and (3) a predetermined threshold decides slide positivity [<xref ref-type="bibr" rid="ref54">54</xref>]</td></tr><tr><td align="left" valign="top">Torres et al (2018) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">Approximately 150 high-quality thick films with 75,000 parasites</td><td align="left" valign="top">AI model for thick region with (1) local thresholding and low-cost methods to identify potential parasites, (2) CNNs (VGG architecture) classifies parasites and stage, and (3) number and confidence of parasites determine slide-level diagnosis</td></tr><tr><td align="left" valign="top">Linder et al (2014) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">A training set (n=10) with parasites (n=8329) and a validation set (n=6) parasites (n=569)</td><td align="left" valign="top">AI model with (1) thresholding algorithm segments potential parasites, and (2) mathematical feature extraction and classification with SVM</td></tr><tr><td align="left" valign="top">Horning et al (2021) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Blood and malaria parasites</td><td align="left" valign="top">Thick model: Subset of 1452 blood samples with 956,531 parasite objects [<xref ref-type="bibr" rid="ref54">54</xref>].<break/>Thin model: 798 blood samples with more than 92,000 parasites [<xref ref-type="bibr" rid="ref55">55</xref>].<break/>Tuning slides: 48 slides</td><td align="left" valign="top">Separate AI models for thin and thick regions:<break/>Thick-AI model: (1) potential parasites identified through dynamic thresholding and SVM. (2) CNNs (VGG architecture) classify [<xref ref-type="bibr" rid="ref54">54</xref>].<break/>Thin AI model: (1) potential parasites detected with a gradient-boosted tree classifier. (2) CNNs for classifying parasite stages [<xref ref-type="bibr" rid="ref55">55</xref>]</td></tr><tr><td align="left" valign="top">Stegm&#x00FC;ller et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Cervical cytology and cellular atypia</td><td align="left" valign="top">A stratified 4-fold split approach to partition the 307 slides with 1228 tile-level annotations into training, validation, and test sets; 2 public datasets also used</td><td align="left" valign="top">AI model with (1) CNN (ResNet-50) with self-supervised training (DINO) and then supervised training with cell pasting, and (2) 8 most suspicious tiles used for slide classification with multiple instance learning (CLAM)</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2021) [<xref ref-type="bibr" rid="ref9">9</xref>]</td><td align="left" valign="top">Cervical cytology and cellular atypia</td><td align="left" valign="top">350 WSIs<sup><xref ref-type="table-fn" rid="table3fn9">i</xref></sup> were used for training with 16,133 annotations made by a pathologist</td><td align="left" valign="top">AI model with (1) a CNN that segments slide into high- and low-grade atypia, and (2) a threshold that decides slide positivity</td></tr><tr><td align="left" valign="top">Sunny et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Oral cytology and cellular atypia</td><td align="left" valign="top">252 atypical and 280 normal cell images annotated (90% for training and 10% for validation)</td><td align="left" valign="top">AI model with (1) cells segmented to single cells, (2) a CNN (Inception V3) used for classification, and (3) cut-offs and SVMs based on percentage and mean score of atypical cells and mean cell score for slide diagnosis</td></tr><tr><td align="left" valign="top">Ghaderinia et al (2024) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Sputum and ferning patterns (inflammation)</td><td align="left" valign="top">650 images (520 training and 130 validation) derived from 70 participants</td><td align="left" valign="top">AI model with (1) a CNN (EfficientNet-B0); and (2) CNN output used to classify sample</td></tr><tr><td align="left" valign="top">Soares et al (2024) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Stool and intestinal parasites (both helminths and protozoans)</td><td align="left" valign="top">51,919 images containing 12,225 annotations of 15 parasite species (ranging from 83 to 3297 per species) [<xref ref-type="bibr" rid="ref56">56</xref>]</td><td align="left" valign="top">AI model with (1) classification with extracted features and probabilistic SVM, and (2) uncertain objects analyzed with a CNN (Vgg-16) [<xref ref-type="bibr" rid="ref56">56</xref>]</td></tr><tr><td align="left" valign="top">Lundin et al (2024) [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Stool and soil-transmitted helminths</td><td align="left" valign="top">388 samples with 15,058 annotations: <italic>Ascaris lumbricoides</italic> (n=2299), <italic>Trichuris trichiura</italic> (n=2727), hookworm (n=552), and artifacts (n=9480)</td><td align="left" valign="top">AI model with (1) YOLOv2 used to detect potential parasites, (2) a CNN (ResNet50) used for classification, and (3) 1 parasite sufficient for slide positivity</td></tr><tr><td align="left" valign="top">Sahu et al (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Urine and urinalysis</td><td align="left" valign="top">A dataset annotated by a pathologist</td><td align="left" valign="top">AI model with (1) a single CNN (YOLOX) to detect objects, and (2) object counts used to grade slide in tiers of positivity</td></tr><tr><td align="left" valign="top">Meulah et al (2022) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">Both spiked laboratory samples and 33 field samples [<xref ref-type="bibr" rid="ref17">17</xref>]</td><td align="left" valign="top">AI model with (1) a CNN segmentation model (U-Net architecture) [<xref ref-type="bibr" rid="ref17">17</xref>]</td></tr><tr><td align="left" valign="top">Oyibo et al (2022) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">17,799 annotated <italic>Schistosoma haematobium</italic> eggs in 2997 FOV images; dataset split into 80% training and 20% validation set</td><td align="left" valign="top">AI model with (1) a CNN (DeepLabv3-MobileNetV3), (2) egg-shaped ellipses fitted to segmented regions for counting, and (3) 1 parasite fulfilling criteria sufficient for slide positivity</td></tr><tr><td align="left" valign="top">Meulah et al (2024) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">17,799 annotated <italic>S. haematobium</italic> eggs in 2997 FOV<sup><xref ref-type="table-fn" rid="table3fn10">j</xref></sup> images; dataset split into 80% training and 20% validation set [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">AI model with (1) a CNN (DeepLabv3-MobileNetV3), (2) egg-shaped ellipses fitted to segmented regions for counting, and (3) 1 parasite fulfilling criteria sufficient for slide positivity [<xref ref-type="bibr" rid="ref52">52</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table3fn2"><p><sup>b</sup>CBC: complete blood count.</p></fn><fn id="table3fn3"><p><sup>c</sup>RBCs: red blood cells.</p></fn><fn id="table3fn4"><p><sup>d</sup>WBCs: white blood cells.</p></fn><fn id="table3fn5"><p><sup>e</sup>CNNs: convolutional neural networks.</p></fn><fn id="table3fn6"><p><sup>f</sup>YOLO: You Only Look Once.</p></fn><fn id="table3fn7"><p><sup>g</sup>SVM: support vector machine.</p></fn><fn id="table3fn8"><p><sup>h</sup>VGG: Visual Geometry Group.</p></fn><fn id="table3fn9"><p><sup>i</sup>WSIs: whole slide images.</p></fn><fn id="table3fn10"><p><sup>j</sup>FOV: field of view. </p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4"><title>Study Outcomes</title><p>When the study outcomes were mapped, differences were observed in the reference standards, study sizes, and performance metrics used. For the 3 studies investigating CBCs, the Pearson correlation coefficient was compared with high-end analyzers and was above 0.9 for all cells except basophils, where the value ranged from 0.6 to 0.8 in all studies [<xref ref-type="bibr" rid="ref32">32</xref>-<xref ref-type="bibr" rid="ref34">34</xref>]. Nine studies used manual microscopy of the same samples as the reference standard and reported results with sensitivity and specificity (for malaria, soil-transmitted helminths, Schistosoma, and cervical cell atypia). Across these 9 studies, all reported a sensitivity and specificity of at least 80% except for 1 with a lower sensitivity of 57% [<xref ref-type="bibr" rid="ref35">35</xref>] and 3 with lower specificity (75.6%, 78.4%, and 48.9%) [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. One study included results with and without human expert verification: human verification of AI model findings increased specificity by 29.5% but conversely led to a sensitivity decrease of 0.9% for malaria detection [<xref ref-type="bibr" rid="ref36">36</xref>]. Seven studies used reference standards such as polymerase chain reaction (PCR) or histology and included comparisons between AI-supported digital microscopy and manual microscopy; in 4 of these 7 studies, a higher sensitivity but lower specificity was reported for AI-supported digital microscopy. Of the remaining 3 studies, 1 evaluated urinalysis, in which manual analysis had higher sensitivity, specificity, or both across all targets [<xref ref-type="bibr" rid="ref50">50</xref>], 1 for intestinal parasites where the AI had higher sensitivity and the same specificity [<xref ref-type="bibr" rid="ref47">47</xref>], and 1 for oral atypia where the AI had both higher sensitivity and specificity [<xref ref-type="bibr" rid="ref19">19</xref>]. The number of samples included in the diagnostic evaluations ranged from 27 to 2250. Most studies (n=15) achieved a low risk for bias according to QUADAS-2; however, some studies either lacked the information needed to properly evaluate bias or had methodological issues (n=7) (<xref ref-type="table" rid="table4">Table 4</xref>).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Results for the included studies.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Sample and target</td><td align="left" valign="bottom">Human verification</td><td align="left" valign="bottom">Outcome</td><td align="left" valign="bottom">Manual microscopy</td><td align="left" valign="bottom">Number of samples</td><td align="left" valign="bottom">Reference standard</td><td align="left" valign="bottom">QUADAS-2<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup></td></tr></thead><tbody><tr><td align="left" valign="top">Bachar et al (2021) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Blood and CBC<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">No</td><td align="left" valign="top"><italic>r</italic><sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup>&#x2265;0.94 (except basophils=0.6)</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup></td><td align="left" valign="top">679</td><td align="left" valign="top">Hematology analyzer</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Gasparin et al (2023) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Blood and CBC</td><td align="left" valign="top">Yes</td><td align="left" valign="top"><italic>r</italic>&#x2265;0.94 (except eosinophils/basophils=0.81)</td><td align="left" valign="top">NR</td><td align="left" valign="top">550</td><td align="left" valign="top">Hematology analyzer</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Gasparin et al (2022) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Blood and CBC</td><td align="left" valign="top">Yes</td><td align="left" valign="top"><italic>r</italic>&#x2265;0.91 (except eosinophils/basophils=0.80)</td><td align="left" valign="top">NR</td><td align="left" valign="top">450</td><td align="left" valign="top">Hematology analyzer</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">4: Akisin et al (2023) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Blood and Downey cells</td><td align="left" valign="top">No</td><td align="left" valign="top">Se<sup><xref ref-type="table-fn" rid="table4fn5">e</xref></sup> 57%, Sp<sup><xref ref-type="table-fn" rid="table4fn6">f</xref></sup> 100%</td><td align="left" valign="top">NR</td><td align="left" valign="top">31</td><td align="left" valign="top">Manual microscopy</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Hamid et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Se 90.2%, Sp 96.2%</td><td align="left" valign="top">Se 89.3%, Sp 100%</td><td align="left" valign="top">190</td><td align="left" valign="top">PCR<sup><xref ref-type="table-fn" rid="table4fn7">g</xref></sup></td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2020) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">No</td><td align="left" valign="top"><italic>r</italic>=0.90 for parasite counts</td><td align="left" valign="top">NR</td><td align="left" valign="top">27</td><td align="left" valign="top">PCR</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Bae et al (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 95.1%, Sp 91.4%</td><td align="left" valign="top">NR</td><td align="left" valign="top">488</td><td align="left" valign="top">Microscopy and RDTs<sup><xref ref-type="table-fn" rid="table4fn8">h</xref></sup></td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Ewnetu et al (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Se 83%&#x2010;93.9%, Sp 94%&#x2010;97.6%</td><td align="left" valign="top">Se 67%&#x2010;69.9%, Sp 97%&#x2010;98.7%</td><td align="left" valign="top">1165</td><td align="left" valign="top">PCR</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Das et al (2022) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 91.1%, Sp 75.6%</td><td align="left" valign="top">NR</td><td align="left" valign="top">2250</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Torres et al (2018) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">No</td><td align="left" valign="top">Site 1: Se 72%, Sp 85%<break/>Site 2: Se 52%, Sp 70%</td><td align="left" valign="top">Site 1: Se 68%, Sp 100%<break/>Site 2: Se 42%, Sp 97%</td><td align="left" valign="top">Site 1: 400 Site 2: 300</td><td align="left" valign="top">PCR</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Linder et al (2014) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Se 95%, Sp 100%</td><td align="left" valign="top">NR</td><td align="left" valign="top">31</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Horning et al (2021) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Blood and malaria</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 86.7%, Sp 100%</td><td align="left" valign="top">NR</td><td align="left" valign="top">35</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Stegm&#x00FC;ller et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Cervical cytology and cellular atypia</td><td align="left" valign="top">No</td><td align="left" valign="top">Mean area under curve 77.5</td><td align="left" valign="top">NR</td><td align="left" valign="top">307 (4-fold split)</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Holmstr&#x00F6;m et al (2021) [<xref ref-type="bibr" rid="ref9">9</xref>]</td><td align="left" valign="top">Cervical cytology and cellular atypia</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 100%, Sp 78.4%</td><td align="left" valign="top">NR</td><td align="left" valign="top">361</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Sunny et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Oral cytology and cellular atypia</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 89%, Sp 100%</td><td align="left" valign="top">Se 59%, Sp 67%</td><td align="left" valign="top">30</td><td align="left" valign="top">Histology</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Ghaderinia et al (2024) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Sputum and ferning patterns (inflammation)</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 94.3%, Sp 95.9%</td><td align="left" valign="top">NR</td><td align="left" valign="top">160</td><td align="left" valign="top">CT<sup><xref ref-type="table-fn" rid="table4fn9">i</xref></sup></td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Soares et al (2024) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Stool and intestinal parasites (both helminths and protozoans)</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 86%, Sp 100%</td><td align="left" valign="top">Se 81%, Sp 100%</td><td align="left" valign="top">73</td><td align="left" valign="top">Manual and AI<sup><xref ref-type="table-fn" rid="table4fn10">j</xref></sup> microscopy</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Lundin et al (2024) [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Stool and soil-transmitted helminths</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 76.4%&#x2010;91.9% Sp 89.7%&#x2010;98.2%</td><td align="left" valign="top">NR</td><td align="left" valign="top">792</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Sahu et al (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Urine and urinalysis</td><td align="left" valign="top">No</td><td align="left" valign="top">Se &#x2265;81% except for bacteria (76%) and casts (71%), Sp &#x2265;88%</td><td align="left" valign="top">Se &#x2265;94% and Sp &#x2265;93%</td><td align="left" valign="top">240</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Meulah et al (2022) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 87.3%, Sp 48.9%</td><td align="left" valign="top">NR</td><td align="left" valign="top">487</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Low</td></tr><tr><td align="left" valign="top">Oyibo et al (2022) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 93.8%, Sp 93.9%</td><td align="left" valign="top">NR</td><td align="left" valign="top">65</td><td align="left" valign="top">Microscopy</td><td align="left" valign="top">Mostly low</td></tr><tr><td align="left" valign="top">Meulah et al (2024) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Urine and Schistosoma</td><td align="left" valign="top">No</td><td align="left" valign="top">Se 62.9%, Sp 78.8 %</td><td align="left" valign="top">Se 61.9%, Sp 96.4%</td><td align="left" valign="top">339</td><td align="left" valign="top">PCR and particle lateral flow test</td><td align="left" valign="top">Low</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>QUADAS-2: Quality assessment of diagnostic accuracy studies 2.</p></fn><fn id="table4fn2"><p><sup>b</sup>CBC: complete blood count.</p></fn><fn id="table4fn3"><p><sup>c</sup><italic>r</italic>: Pearson correlation coefficient.</p></fn><fn id="table4fn4"><p><sup>d</sup>NR: Not reported. </p></fn><fn id="table4fn5"><p><sup>e</sup>Se: sensitivity.</p></fn><fn id="table4fn6"><p><sup>f</sup>Sp: specificity.</p></fn><fn id="table4fn7"><p><sup>g</sup>PCR: polymerase chain reaction.</p></fn><fn id="table4fn8"><p><sup>h</sup>RDT: rapid diagnostic test.</p></fn><fn id="table4fn9"><p><sup>i</sup>CT: computed tomographic scan.</p></fn><fn id="table4fn10"><p><sup>j</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Summary</title><p>This scoping review included 22 publications deploying AI-supported digital microscopy in PHC laboratories for multiple targets, published in 15 different journals. These studies fulfilled the concepts of using AI and digital microscopy to achieve a slide-level diagnosis in PHC laboratories. The number of included studies was low, given the extensive research on AI in medical imaging. The exclusion of 58 papers due to the absence of sample-level diagnoses and of 71 papers due to not being conducted in PHC laboratories suggests that most research has focused on target detection or advanced laboratory settings, rather than evaluating end-to-end diagnostic systems for PHC use. This is notable, given the potential benefits of such technologies in PHC laboratories. However, 9 of the 22 included studies were published in 2024 indicating an upward trend in studies focused on AI-supported digital microscopy at the PHC level.</p><p>The studies targeting specific diseases primarily focused on conditions that disproportionally affect vulnerable populations. The results from the included studies in this scoping review indicate that AI-supported digital microscopy can achieve accuracy comparable to that of standard microscopy for malaria, intestinal parasites, cell atypia, and urinalysis; to that of computed tomography for detecting pulmonary inflammation in patients with COVID-19; and to that of conventional hematology analyzers for CBC. Diagnostic accuracy comparable to the reference standard was defined as sensitivity and specificity of &#x003E;80% or a Pearson correlation of &#x003E;0.90. The reported results also indicate that AI-supported digital microscopy could be particularly advantageous for increasing sensitivity, as 6 out of 7 (85.7%) studies comparing it with manual microscopy reported higher sensitivity for AI-supported digital microscopy. Furthermore, the objective of the scoping review was to map target-agnostic challenges and solutions regarding sample preparation, scanning, AI methods, and human integration and discuss future implications for AI-supported digital microscopy in PHC laboratories.</p></sec><sec id="s4-2"><title>Sample Preparation</title><p>Variability in target morphology and artifacts may reduce AI performance and can be introduced in all steps from sample collection to scanning. Manual steps in sample preparation are prone to introducing variability, and all included studies involved such steps, with 12 relying on entirely manual preparation. Decreased specificity due to sample variability was observed in one study, where poorly prepared samples led to the introduction of artifacts [<xref ref-type="bibr" rid="ref41">41</xref>], and in another study where synthetically prepared samples in the training dataset lacked artifacts present in real-world samples [<xref ref-type="bibr" rid="ref51">51</xref>]. Sensitivity can also be affected by variability in preparation, as demonstrated in one study on soil-transmitted helminths [<xref ref-type="bibr" rid="ref49">49</xref>]. This indicates that variability introduced during sample preparation may be a major hurdle when developing AI-supported digital microscopy for PHC laboratories as more steps are performed manually.</p><p>There are possible solutions to sample variability. For example, improving consistency through good laboratory practices and standard operating procedures is one way to minimize sample variability; however, this requires system-specific training for personnel, good laboratory infrastructure, and quality controls, which might reduce the feasibility of implementation in PHC laboratories. The use of equipment such as cartridges to limit manual steps is another approach to minimize variability [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. This may lower the demands on personnel; however, using disease-specific consumables may introduce issues, for example, increased costs. Another potential approach to minimize variability is to simplify sample preparation, for example, by removing staining or smearing steps [<xref ref-type="bibr" rid="ref57">57</xref>]. Although this could reduce variability, it may also lead to a loss of valuable diagnostic information, in turn decreasing the AI model performance.</p></sec><sec id="s4-3"><title>Scanning</title><p>The scanner needs to capture sufficient information to allow AI model classification of targets, but scanning large sample areas at high magnification is time-consuming. The scanning time can be decreased by analyzing a smaller sample area. However, this can lead to a reduced ability to detect low-density targets, highlighting a trade-off between faster diagnostics and high sensitivity for these cases. This is exemplified by one solution for malaria, where clinicians are able to increase the area analyzed to detect low-density infections [<xref ref-type="bibr" rid="ref39">39</xref>]. Another solution to decrease scanning time is to use a lower magnification than what is conventionally used by microscopists: this approach achieved diagnostic accuracy comparable to manual microscopy for cytology [<xref ref-type="bibr" rid="ref19">19</xref>], malaria [<xref ref-type="bibr" rid="ref36">36</xref>], and parasitic infections [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Nonetheless, the use of lower magnification could result in information loss that reduces the AI model performance.</p></sec><sec id="s4-4"><title>Training Data</title><p>All studies that specified the AI-training methods used variants of supervised learning which require annotated data. Annotating data is time-consuming and requires digitized samples that are rarely produced in PHC laboratories due to limited access to scanners. Therefore, many studies had to collect and annotate their own datasets rather than access existing data. One study also used laboratory-enriched samples to increase the number of targets [<xref ref-type="bibr" rid="ref51">51</xref>]. In some cases, certain targets were underrepresented in the dataset, which caused the AI models to perform poorly on those [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref43">43</xref>], emphasizing the challenges of limited training data. To overcome limited datasets, approaches, such as using data augmentation, publicly available datasets, CNNs with pretrained weights, and unsupervised learning, were deployed [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Although there are many ways to limit the effect of small datasets, the improved diagnostic performance in studies, iteratively collecting larger datasets, highlights that insufficient training data remain a limiting factor when developing AI-supported digital microscopy for PHC laboratories [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Larger studies and collaborations that allow data sharing could provide solutions to the issue of limited training data.</p></sec><sec id="s4-5"><title>AI Analysis Pipeline</title><p>The AI analysis pipelines used can be broadly divided into 3 main steps: FOV identification, FOV classification, and aggregation for sample-level diagnosis (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Given that the analysis of a single sample took more than 30 minutes in some studies and that access to graphics processing units may be limited in PHC laboratories, efficiency becomes important. One strategy to minimize computational demands is to combine identification and classification, as implemented in the You Only Look Once framework, which uses a single CNN [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref58">58</xref>]. Another strategy is to first identify targets using fast and computationally efficient methods and subsequently feed the suspicious FOVs into more computationally intensive algorithms for classification. Using an initial object identification step may also enhance the uniformity of the data entering the classification stage, which may be particularly beneficial due to the variability in manually prepared samples that are commonly used in PHC laboratories [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>For the third step, slide-level classification, different approaches were used: slides were classified as positive if a single positive target was detected; others applied cutoffs to reduce noise and false positives. In addition, certain studies used methods such as SVMs [<xref ref-type="bibr" rid="ref19">19</xref>] or multiple instance learning [<xref ref-type="bibr" rid="ref44">44</xref>] to aggregate slide-level results. While these methods may improve classification, they carry a risk of overfitting, especially since the number of training samples at the slide level is much smaller than at the object level.</p></sec><sec id="s4-6"><title>Manual Verification</title><p>One study investigated AI-supported digital microscopy with and without human verification. In the study, human verification was performed on targets initially classified as positive by the AI models, which led to a 0.9% drop in sensitivity but a 29.5% increase in specificity [<xref ref-type="bibr" rid="ref36">36</xref>]. This demonstrates that, with human intelligence, AI errors can be identified and removed without a substantial loss of sensitivity. This is in line with the high specificity presented in studies using human verification, which all showed specificity of &#x003E;90% [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Expanding human verification to include borderline cases classified as negative may also be used to reduce false negatives and increase sensitivity.</p></sec><sec id="s4-7"><title>Reported Diagnostic Performance</title><p>The reported diagnostic performance of the studies included in the scoping review indicates that AI-supported digital microscopy may achieve comparable diagnostic accuracy in PHC laboratories; however, it is important to account for methodological choices when interpreting the results and due to the heterogeneity in study designs, comparisons between studies and diseases become challenging. One example of methodological choices is that most studies used manual microscopy as the reference standard. Since microscopy itself is an imperfect diagnostic test, it can affect the performance of the index test and may result in over- or underestimation of the diagnostic accuracy of AI-supported microscopy. Two studies argued that this limitation may have reduced the apparent diagnostic accuracy of AI-supported digital microscopy [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Another aspect to consider is the number of samples on which the method was evaluated. For example, the study in which the AI-supported digital microscopy had higher sensitivity and specificity than manual microscopy analyzed 30 samples [<xref ref-type="bibr" rid="ref19">19</xref>]. Generally, the QUADAS-2 tool indicated a low risk of bias. However, it did not capture the issue of AI models being trained on samples from the same collection, which is a potential source of poor generalizability for AI. This can occur even when the detection algorithms are trained on different datasets, for example, when thresholds or rules for deriving slide-level diagnoses are developed using the same slides on which diagnostic performance is later evaluated, leading to inflated estimates of diagnostic accuracy. However, some studies avoided training on the data from the same collection, included more than 100 test samples, had low risk of QUADAS-2 bias, and used a more advanced reference standard and still achieved comparable or better results than manual microscopy [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref53">53</xref>].</p></sec><sec id="s4-8"><title>Limitations</title><p>A limitation of the extraction process was the lack of consistent terminology used in the field. This was exemplified in the search block aimed at identifying PHC. Terms such as &#x201C;low-cost&#x201D; and &#x201C;PHC&#x201D; were included but not &#x201C;remote,&#x201D; which was used to describe one study that fulfilled the inclusion criteria [<xref ref-type="bibr" rid="ref59">59</xref>]. Another limitation was the broad definition of PHC laboratories adopted from Fleming et al, which led to the inclusion of studies using relatively advanced methods, such as oil immersion scanning at 100&#x00D7; magnification and specially designed cartridges for sample preparation [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. These methods may be difficult to implement in some PHC laboratories, but to achieve a more comprehensive overview of the field, the inclusion of these studies was deemed advantageous [<xref ref-type="bibr" rid="ref25">25</xref>]. A third limitation stems from the lack of standardized methodological descriptions in the included studies. In some cases, key information, such as scanner magnification, was missing or reported inconsistently across studies, which complicated comparisons in the scoping review.</p></sec><sec id="s4-9"><title>Steps Needed to Achieve Clinical Implementation</title><p>In this scoping review, we identified hurdles that were shared across several studies and that must be overcome before implementing AI-supported digital microscopy. Many developers have recognized a need to iteratively improve their AI-supported digital microscopy; thus, a framework that enables continuous improvements might be advantageous for supporting the development of more accurate AI models. This requires health policy guidelines and frameworks that give details on how these processes should be conducted [<xref ref-type="bibr" rid="ref60">60</xref>]. Another hurdle in the implementation of AI-supported digital microscopy is cost. The development of lower-cost scanners has reduced expenses; however, most commercially available scanners remain more expensive than traditional microscopes [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref61">61</xref>]. Microscopes typically function reliably over long periods, and scanners may need comparable longevity for AI-supported digital microscopy to be cost-effective. One potential solution to this is modular scanner construction, which may improve its lifespan through component updates and thereby its sustainability. Some systems in this review were developed for specific diseases, which increases the cost of implementing them in PHC laboratories, as multiple systems would have to be acquired to replace microscopes. To make it economically feasible to implement AI-supported digital microscopy, it may, therefore, be necessary to adopt a multipurpose approach where systems are developed for multiple diseases. Some studies show that scanners can digitize different samples and similar approaches can be applied to different diseases [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. Systems developed for specific diseases may instead be useful in large screening programs or epidemiological surveys, for example, for soil-transmitted helminths, malaria, or cancer screening. Cost-effectiveness trials could provide guidance for the feasibility of AI-supported digital microscopy and evaluate single-disease systems against multipurpose platforms.</p></sec><sec id="s4-10"><title>Potential Implications for PHC Diagnostics</title><p>AI-supported digital microscopy has potential advantages compared with manual microscopy in PHC laboratories. First, it could improve diagnostic accuracy, especially sensitivity, and this may be further enhanced by incorporating human verification [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Second, it might increase the access and timeliness of diagnostics by allowing diagnostic procedures to be performed at the POC in PHC laboratories and eliminate the need to send samples elsewhere for analysis [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. Third, it could alleviate the workload of personnel through task shifting. This could increase the productivity of experts and thereby access to image-based diagnosis [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>].</p><p>Other POC diagnostic technologies, including rapid diagnostic tests (RDTs) and PCR-based methods, provide alternative means of diagnosing some conditions discussed in this scoping review [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref63">63</xref>]. One included study compared AI-supported digital microscopy with another POC-diagnostic method: a malaria study comparing it with RDTs [<xref ref-type="bibr" rid="ref39">39</xref>]. The AI-supported digital microscopy had higher sensitivity and specificity than RDTs for some malaria species and settings but lower in others. One review proposed that AI-supported digital microscopy holds more promise than RDTs for malaria diagnosis [<xref ref-type="bibr" rid="ref64">64</xref>]. However, other studies highlight the possibilities of other methods, such as RDTs and PCR, for POC diagnostics [<xref ref-type="bibr" rid="ref62">62</xref>].</p><p>Implementation of AI-supported digital microscopy could strengthen health systems and increase health equity, particularly where resources are limited such as in scarcely populated areas and LMICs. This may be possible since, in addition to comparable diagnostic accuracy to microscopy, slides can be scanned and analyzed within approximately 30 minutes for multiple diseases [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. As the process eliminates the need for microscopy expertise on-site, it could enable timely and accurate diagnostics in PHC laboratories that currently lack this capacity: even if manual verification is required, it can be performed remotely. Moreover, by decentralizing diagnostics, it may reduce referrals to higher-tier health care facilities, alleviating their work and minimizing the risk of referral-related dropouts [<xref ref-type="bibr" rid="ref12">12</xref>].</p></sec><sec id="s4-11"><title>Knowledge Gaps and Research Priorities</title><p>This scoping review identified evidence of the feasibility of AI-supported digital microscopy for multiple targets in PHC laboratories. Drawing on the evidence mapped here, future research should prioritize studying scalable and robust systems that can be transferred and implemented in new laboratories and settings. Achieving scalability requires research into AI-supported digital microscopy with an end-to-end perspective, where everything from sample preparation, scanning, and AI analysis until the final diagnosis is accounted for and easily reproducible. Adding to this, research that examines how predeveloped AI-based systems are transferred and implemented in new clinical settings would provide valuable insights into real-world robustness, which was done by some of the included studies. To enable this kind of research, large multisite collaborations are important, which could be facilitated by improved health policy guidelines and frameworks, as well as initiatives led by key stakeholders including governments and nongovernmental organizations. Furthermore, important research priorities include assessing cost-effectiveness and exploring perceived barriers to implementation among patients and health care professionals. Finally, the scoping review&#x2019;s screening process identified additional potential applications for AI-supported digital microscopy, including tuberculosis, other parasitic diseases, respiratory cytology, sperm motility, and sickle cell anemia, which warrant further investigation in PHC settings [<xref ref-type="bibr" rid="ref65">65</xref>-<xref ref-type="bibr" rid="ref69">69</xref>].</p></sec><sec id="s4-12"><title>Conclusions</title><p>This scoping review identified 22 studies deploying AI-supported digital microscopy in PHC laboratories. For multiple diagnostic purposes, AI-supported digital microscopy achieved comparable results to the reference standard and could be particularly advantageous for increasing sensitivity in diagnosis. Further research is needed on challenges such as generalizability, scalability, and cost-effectiveness. Such evidence is critical to stimulate product development, enable regulatory approval, and support reimbursement and adoption by health care authorities. If the methods can be demonstrated to be feasible in real-life clinical PHC settings, translated into medical device products, and carefully integrated into health care systems, they are likely to improve access to diagnostics, particularly in LMICs and scarcely populated regions.</p></sec></sec></body><back><ack><p>The authors acknowledge the support of the Karolinska Institute&#x2019;s Library for guidance in developing a satisfactory search strategy. The authors declare the use of generative artificial intelligence in the research and writing process. According to the GAIDeT taxonomy (2025) [<xref ref-type="bibr" rid="ref70">70</xref>], the following tasks were delegated to GAI tools under full human supervision: (1) Proofreading and editing, and (2) adapting and adjusting the tone and style. Paperpal Preflight and ChatGPT 4, 4.5, and 5 (GPT, OpenAI&#x2019;s large-scale language generation model) were used for these purposes.</p></ack><notes><sec><title>Funding</title><p>This study was funded by the Erling-Persson Foundation and Autonomous Systems and Software Program (WASP) funded by the Knut and Alice Wallenberg Foundation. In addition, it was supported by the Swedish Research Council, Finska L&#x00E4;kares&#x00E4;llskapet r.f., Wilhelm och Else Stockmanns stiftelse r.f and Medicinska Underst&#x00F6;dsf&#x00F6;reningen Liv och H&#x00E4;lsa r.f. No funder had any role in the study design or decision to submit the paper for publication.</p></sec><sec><title>Data Availability</title><p>All data generated within this scoping review are available in the study, such as search details.</p></sec></notes><fn-group><fn fn-type="con"><p>JVB conceived and designed the study, wrote the manuscript, developed the search strategy, screened the studies for eligibility, and performed the data extraction. AS screened the studies for eligibility, performed the data extraction, and assisted in writing the manuscript. JL conceived and designed the study, assisted in writing the manuscript, and developing the search strategy. NL conceived and designed the study, assisted in writing the manuscript, developing the search strategy, and screened the studies for eligibility. VD conceived and designed the study and assisted in writing the manuscript. AM assisted in designing the study and in writing the manuscript.</p></fn><fn fn-type="conflict"><p>JL reported receiving personal fees from Aiforia Technologies Oy and serving as cofounder and co-owner of Aiforia Technologies Oy outside the submitted work. JL and AS reported having a patent for Mobile Microscope pending (no. WO2017037334A1, the invention is related to the use of fluorescence imaging filters combined with inexpensive plastic lenses, and all rights are with the University of Helsinki) and JL having a patent for a slide holder for an optical microscope pending (no. WO2015185805A1; related to motorization of regular microscopes). All other authors have no conflicts of interest to declare.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CBC</term><def><p>complete blood count</p></def></def-item><def-item><term id="abb3">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb4">FDA</term><def><p>US Food and Drug Administration</p></def></def-item><def-item><term id="abb5">FOV</term><def><p>fields-of-view</p></def></def-item><def-item><term id="abb6">LMICs</term><def><p>low- and middle-income countries</p></def></def-item><def-item><term id="abb7">PCR</term><def><p>polymerase chain reaction</p></def></def-item><def-item><term id="abb8">PHC</term><def><p>primary health care</p></def></def-item><def-item><term id="abb9">POC</term><def><p>point of care</p></def></def-item><def-item><term id="abb10">PRISMA</term><def><p>Preferred Reporting Items for Systematic reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb11">PRISMA-ScR</term><def><p>Preferred Reporting Items for Systematic reviews and Meta-Analyses extension for Scoping Reviews</p></def></def-item><def-item><term id="abb12">QUADAS-2</term><def><p>Quality Assessment of Diagnostic Accuracy Studies 2</p></def></def-item><def-item><term id="abb13">RDT</term><def><p>rapid diagnostic test</p></def></def-item><def-item><term id="abb14">SVM</term><def><p>support vector machine</p></def></def-item><def-item><term id="abb15">WHO</term><def><p>World Health Organization</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinto-Coelho</surname><given-names>L</given-names> </name></person-group><article-title>How artificial intelligence is shaping medical imaging technology: a survey of innovations and applications</article-title><source>Bioengineering (Basel)</source><year>2023</year><month>12</month><day>18</day><volume>10</volume><issue>12</issue><fpage>1435</fpage><pub-id pub-id-type="doi">10.3390/bioengineering10121435</pub-id><pub-id pub-id-type="medline">38136026</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Fleming</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Naidoo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wilson</surname><given-names>M</given-names> </name><etal/></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Jamison</surname><given-names>DT</given-names> </name><name name-style="western"><surname>Gelband</surname><given-names>H</given-names> </name><name name-style="western"><surname>Horton</surname><given-names>S</given-names> </name></person-group><article-title>High-quality diagnosis: an essential pathology package</article-title><source>Disease Control Priorities: Improving Health and Reducing Poverty</source><year>2017</year><edition>3</edition><publisher-name>International Bank for Reconstruction and Development/World Bank</publisher-name><pub-id pub-id-type="doi">10.1596/978-1-4648-0527-1_ch11</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilson</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Fleming</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Kuti</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Looi</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Lago</surname><given-names>N</given-names> </name><name name-style="western"><surname>Ru</surname><given-names>K</given-names> </name></person-group><article-title>Access to pathology and laboratory medicine services: a crucial gap</article-title><source>Lancet</source><year>2018</year><month>05</month><day>12</day><volume>391</volume><issue>10133</issue><fpage>1927</fpage><lpage>1938</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(18)30458-6</pub-id><pub-id pub-id-type="medline">29550029</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shamshad</surname><given-names>F</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zamir</surname><given-names>SW</given-names> </name><etal/></person-group><article-title>Transformers in medical imaging: a survey</article-title><source>Med Image Anal</source><year>2023</year><month>08</month><volume>88</volume><fpage>102802</fpage><pub-id pub-id-type="doi">10.1016/j.media.2023.102802</pub-id><pub-id pub-id-type="medline">37315483</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohammed</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Tune</surname><given-names>KK</given-names> </name><name name-style="western"><surname>Assefa</surname><given-names>BG</given-names> </name><name name-style="western"><surname>Jett</surname><given-names>M</given-names> </name><name name-style="western"><surname>Muhie</surname><given-names>S</given-names> </name></person-group><article-title>Medical image classifications using convolutional neural networks: a survey of current methods and statistical modeling of the literature</article-title><source>MAKE</source><year>2024</year><volume>6</volume><issue>1</issue><fpage>699</fpage><lpage>736</lpage><pub-id pub-id-type="doi">10.3390/make6010033</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rezende</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Bianchi</surname><given-names>AGC</given-names> </name><name name-style="western"><surname>Carneiro</surname><given-names>CM</given-names> </name></person-group><article-title>Cervical cancer: automation of Pap test screening</article-title><source>Diagn Cytopathol</source><year>2021</year><month>04</month><volume>49</volume><issue>4</issue><fpage>559</fpage><lpage>574</lpage><pub-id pub-id-type="doi">10.1002/dc.24708</pub-id><pub-id pub-id-type="medline">33548162</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>da Silva</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Pereira</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Salles</surname><given-names>PG</given-names> </name><etal/></person-group><article-title>Independent real-world application of a clinical-grade automated prostate cancer detection system</article-title><source>J Pathol</source><year>2021</year><month>06</month><volume>254</volume><issue>2</issue><fpage>147</fpage><lpage>158</lpage><pub-id pub-id-type="doi">10.1002/path.5662</pub-id><pub-id pub-id-type="medline">33904171</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Muehlematter</surname><given-names>UJ</given-names> </name><name name-style="western"><surname>Daniore</surname><given-names>P</given-names> </name><name name-style="western"><surname>Vokinger</surname><given-names>KN</given-names> </name></person-group><article-title>Approval of artificial intelligence and machine learning-based medical devices in the USA and Europe (2015-20): a comparative analysis</article-title><source>Lancet Digit Health</source><year>2021</year><month>03</month><volume>3</volume><issue>3</issue><fpage>e195</fpage><lpage>e203</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(20)30292-2</pub-id><pub-id pub-id-type="medline">33478929</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holmstr&#x00F6;m</surname><given-names>O</given-names> </name><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kaingu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Point-of-care digital cytology with artificial intelligence for cervical cancer screening in a resource-limited setting</article-title><source>JAMA Netw Open</source><year>2021</year><month>03</month><day>1</day><volume>4</volume><issue>3</issue><fpage>e211740</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.1740</pub-id><pub-id pub-id-type="medline">33729503</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ward</surname><given-names>P</given-names> </name><name name-style="western"><surname>Dahlberg</surname><given-names>P</given-names> </name><name name-style="western"><surname>Lagatie</surname><given-names>O</given-names> </name><etal/></person-group><article-title>Affordable artificial intelligence-based digital pathology for neglected tropical diseases: a proof-of-concept for the detection of soil-transmitted helminths and Schistosoma mansoni eggs in Kato-Katz stool thick smears</article-title><source>PLoS Negl Trop Dis</source><year>2022</year><month>06</month><volume>16</volume><issue>6</issue><fpage>e0010500</fpage><pub-id pub-id-type="doi">10.1371/journal.pntd.0010500</pub-id><pub-id pub-id-type="medline">35714140</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Nyirenda</surname><given-names>D</given-names> </name><name name-style="western"><surname>M&#x00E5;rtensson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kaingu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Ngasala</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lundin</surname><given-names>J</given-names> </name></person-group><article-title>AI supported diagnostic innovations for impact in global women&#x2019;s health</article-title><source>BMJ</source><year>2025</year><month>10</month><day>10</day><volume>391</volume><fpage>e086009</fpage><pub-id pub-id-type="doi">10.1136/bmj-2025-086009</pub-id><pub-id pub-id-type="medline">41073085</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><article-title>Diagnostic errors</article-title><source>World Health Organization</source><year>2016</year><access-date>2025-11-27</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://iris.who.int/handle/10665/252410">https://iris.who.int/handle/10665/252410</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bogoch</surname><given-names>II</given-names> </name><name name-style="western"><surname>Lundin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lo</surname><given-names>NC</given-names> </name><name name-style="western"><surname>Andrews</surname><given-names>JR</given-names> </name></person-group><article-title>Mobile phone and handheld microscopes for public health applications</article-title><source>Lancet Public Health</source><year>2017</year><month>08</month><volume>2</volume><issue>8</issue><fpage>e355</fpage><pub-id pub-id-type="doi">10.1016/S2468-2667(17)30120-2</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Galvan</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ortellado</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rivas</surname><given-names>R</given-names> </name><name name-style="western"><surname>Grossling</surname><given-names>B</given-names> </name><name name-style="western"><surname>Hilario</surname><given-names>E</given-names> </name></person-group><article-title>PP84 developing the network for the future of healthcare through telemedicine-driven diagnostic innovation</article-title><source>Int J Technol Assess Health Care</source><year>2024</year><month>12</month><volume>40</volume><issue>S1</issue><fpage>S89</fpage><lpage>S89</lpage><pub-id pub-id-type="doi">10.1017/S0266462324002538</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reschke</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gruenewald</surname><given-names>LD</given-names> </name><name name-style="western"><surname>Koch</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Radiology access in rural Germany: a nationwide survey on outpatient imaging and teleradiology</article-title><source>Diagnostics (Basel)</source><year>2025</year><month>04</month><day>10</day><volume>15</volume><issue>8</issue><fpage>962</fpage><pub-id pub-id-type="doi">10.3390/diagnostics15080962</pub-id><pub-id pub-id-type="medline">40310336</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Bychkov</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fukuoka</surname><given-names>J</given-names> </name></person-group><article-title>Evaluation of the global supply of pathologists [abstract]</article-title><source>Modern Pathology</source><year>2022</year><volume>35</volume><publisher-name>Springer Nature</publisher-name><pub-id pub-id-type="doi">10.1038/s41379-022-01050-6</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oyibo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Jujjavarapu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Meulah</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Schistoscope: an automated microscope with artificial intelligence for detection of Schistosoma haematobium eggs in resource-limited settings</article-title><source>Micromachines (Basel)</source><year>2022</year><month>04</month><day>19</day><volume>13</volume><issue>5</issue><fpage>643</fpage><pub-id pub-id-type="doi">10.3390/mi13050643</pub-id><pub-id pub-id-type="medline">35630110</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holmstr&#x00F6;m</surname><given-names>O</given-names> </name><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Ngasala</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Point-of-care mobile digital microscopy and deep learning for the detection of soil-transmitted helminths and Schistosoma haematobium</article-title><source>Glob Health Action</source><year>2017</year><month>06</month><volume>10</volume><issue>sup3</issue><fpage>1337325</fpage><pub-id pub-id-type="doi">10.1080/16549716.2017.1337325</pub-id><pub-id pub-id-type="medline">28838305</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sunny</surname><given-names>S</given-names> </name><name name-style="western"><surname>Baby</surname><given-names>A</given-names> </name><name name-style="western"><surname>James</surname><given-names>BL</given-names> </name><etal/></person-group><article-title>A smart tele-cytology point-of-care platform for oral cancer screening</article-title><source>PLoS ONE</source><year>2019</year><volume>14</volume><issue>11</issue><fpage>e0224885</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0224885</pub-id><pub-id pub-id-type="medline">31730638</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khanagar</surname><given-names>SB</given-names> </name><name name-style="western"><surname>Naik</surname><given-names>S</given-names> </name><name name-style="western"><surname>Al Kheraif</surname><given-names>AA</given-names> </name><etal/></person-group><article-title>Application and performance of artificial intelligence technology in oral cancer diagnosis and prediction of prognosis: a systematic review</article-title><source>Diagnostics (Basel)</source><year>2021</year><volume>11</volume><issue>6</issue><fpage>1004</fpage><pub-id pub-id-type="doi">10.3390/diagnostics11061004</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rodriguez</surname><given-names>JPM</given-names> </name><name name-style="western"><surname>Rodriguez</surname><given-names>R</given-names> </name><name name-style="western"><surname>Silva</surname><given-names>VWK</given-names> </name><etal/></person-group><article-title>Artificial intelligence as a tool for diagnosis in digital pathology whole slide images: a systematic review</article-title><source>J Pathol Inform</source><year>2022</year><volume>13</volume><fpage>100138</fpage><pub-id pub-id-type="doi">10.1016/j.jpi.2022.100138</pub-id><pub-id pub-id-type="medline">36268059</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Peters</surname><given-names>MDJ</given-names> </name><name name-style="western"><surname>Godfrey</surname><given-names>C</given-names> </name><name name-style="western"><surname>McInerney</surname><given-names>P</given-names> </name><name name-style="western"><surname>Munn</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Tricco</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Khalil</surname><given-names>H</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Aromataris</surname><given-names>E</given-names> </name><name name-style="western"><surname>Munn</surname><given-names>Z</given-names> </name></person-group><article-title>Chapter 11: scoping reviews (2020 version)</article-title><source>JBI Manual for Evidence Synthesis</source><year>2020</year><access-date>2025-12-05</access-date><publisher-name>JBI</publisher-name><pub-id pub-id-type="doi">10.46658/JBIRM-20-01</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tricco</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Lillie</surname><given-names>E</given-names> </name><name name-style="western"><surname>Zarin</surname><given-names>W</given-names> </name><etal/></person-group><article-title>PRISMA Extension for Scoping Reviews (PRISMA-ScR): checklist and explanation</article-title><source>Ann Intern Med</source><year>2018</year><month>10</month><day>2</day><volume>169</volume><issue>7</issue><fpage>467</fpage><lpage>473</lpage><pub-id pub-id-type="doi">10.7326/M18-0850</pub-id><pub-id pub-id-type="medline">30178033</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>von Bahr</surname><given-names>J</given-names> </name><name name-style="western"><surname>Diwan</surname><given-names>V</given-names> </name><name name-style="western"><surname>M&#x00E5;rtensson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Lundin</surname><given-names>J</given-names> </name></person-group><article-title>Artificial intelligence-supported digital microscopy diagnostics in primary health care laboratories: a scoping review protocol</article-title><source>OSF</source><year>2024</year><month>03</month><day>7</day><access-date>2025-12-10</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://osf.io/yz67t/overview">https://osf.io/yz67t/overview</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>von Bahr</surname><given-names>J</given-names> </name><name name-style="western"><surname>Diwan</surname><given-names>V</given-names> </name><name name-style="western"><surname>M&#x00E5;rtensson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Lundin</surname><given-names>J</given-names> </name></person-group><article-title>AI-supported digital microscopy diagnostics in primary health care laboratories: protocol for a scoping review</article-title><source>JMIR Res Protoc</source><year>2024</year><month>11</month><day>1</day><volume>13</volume><fpage>e58149</fpage><pub-id pub-id-type="doi">10.2196/58149</pub-id><pub-id pub-id-type="medline">39486020</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Olsen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vali</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Study designs for comparative diagnostic test accuracy: a methodological review and classification scheme</article-title><source>J Clin Epidemiol</source><year>2021</year><month>10</month><volume>138</volume><fpage>128</fpage><lpage>138</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2021.04.013</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Clark</surname><given-names>J</given-names> </name><name name-style="western"><surname>Glasziou</surname><given-names>P</given-names> </name><name name-style="western"><surname>Del Mar</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bannach-Brown</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stehlik</surname><given-names>P</given-names> </name><name name-style="western"><surname>Scott</surname><given-names>AM</given-names> </name></person-group><article-title>A full systematic review was completed in 2 weeks using automation tools: a case study</article-title><source>J Clin Epidemiol</source><year>2020</year><month>05</month><volume>121</volume><fpage>81</fpage><lpage>90</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2020.01.008</pub-id><pub-id pub-id-type="medline">32004673</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="web"><source>Covidence</source><access-date>2025-12-07</access-date><publisher-name>Veritas Health Innovation</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.covidence.org/">https://www.covidence.org/</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chien</surname><given-names>TI</given-names> </name><name name-style="western"><surname>Kao</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>HL</given-names> </name><etal/></person-group><article-title>Urine sediment examination: a comparison of automated urinalysis systems and manual microscopy</article-title><source>Clinica Chimica Acta</source><year>2007</year><month>09</month><volume>384</volume><issue>1-2</issue><fpage>28</fpage><lpage>34</lpage><pub-id pub-id-type="doi">10.1016/j.cca.2007.05.012</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Westwood</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title><source>Ann Intern Med</source><year>2011</year><month>10</month><day>18</day><volume>155</volume><issue>8</issue><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id><pub-id pub-id-type="medline">22007046</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bachar</surname><given-names>N</given-names> </name><name name-style="western"><surname>Benbassat</surname><given-names>D</given-names> </name><name name-style="western"><surname>Brailovsky</surname><given-names>D</given-names> </name><etal/></person-group><article-title>An artificial intelligence-assisted diagnostic platform for rapid near-patient hematology</article-title><source>Am J Hematol</source><year>2021</year><month>10</month><day>1</day><volume>96</volume><issue>10</issue><fpage>1264</fpage><lpage>1274</lpage><pub-id pub-id-type="doi">10.1002/ajh.26295</pub-id><pub-id pub-id-type="medline">34264525</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gasparin</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Araujo</surname><given-names>CIF</given-names> </name><name name-style="western"><surname>Cardoso</surname><given-names>MR</given-names> </name><etal/></person-group><article-title>Hilab system device in an oncological hospital: a new clinical approach for point of care CBC test, supported by the internet of things and machine learning</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>05</month><day>11</day><volume>13</volume><issue>10</issue><fpage>1695</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13101695</pub-id><pub-id pub-id-type="medline">37238184</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gasparin</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Araujo</surname><given-names>CIF</given-names> </name><name name-style="western"><surname>Schmitt</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Hilab system, a new point-of-care hematology analyzer supported by the Internet of Things and artificial intelligence</article-title><source>Sci Rep</source><year>2022</year><month>06</month><day>21</day><volume>12</volume><issue>1</issue><fpage>10409</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-13913-8</pub-id><pub-id pub-id-type="medline">35729182</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ardicoglu Akisin</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Akar</surname><given-names>N</given-names> </name><name name-style="western"><surname>Burkay Cotel&#x0131;</surname><given-names>M</given-names> </name></person-group><article-title>Decision support system for the classification of Downey cells as a pre-diagnostic tool</article-title><source>Turkish Journal of Biochemistry</source><year>2024</year><month>01</month><day>2</day><volume>48</volume><issue>6</issue><fpage>634</fpage><lpage>640</lpage><pub-id pub-id-type="doi">10.1515/tjb-2023-0035</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamid</surname><given-names>MMA</given-names> </name><name name-style="western"><surname>Mohamed</surname><given-names>AO</given-names> </name><name name-style="western"><surname>Mohammed</surname><given-names>FO</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of an automated microscope solution (miLab&#x2122;) in detecting malaria parasites in symptomatic patients at point-of-care in Sudan: a case-control study</article-title><source>Malar J</source><year>2024</year><month>06</month><day>28</day><volume>23</volume><issue>1</issue><fpage>200</fpage><pub-id pub-id-type="doi">10.1186/s12936-024-05029-3</pub-id><pub-id pub-id-type="medline">38943203</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bae</surname><given-names>CY</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>YM</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Embedded-deep-learning-based sample-to-answer device for on-site malaria diagnosis</article-title><source>Front Bioeng Biotechnol</source><year>2024</year><volume>12</volume><fpage>1392269</fpage><pub-id pub-id-type="doi">10.3389/fbioe.2024.1392269</pub-id><pub-id pub-id-type="medline">39100623</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holmstr&#x00F6;m</surname><given-names>O</given-names> </name><name name-style="western"><surname>Stenman</surname><given-names>S</given-names> </name><name name-style="western"><surname>Suutala</surname><given-names>A</given-names> </name><etal/></person-group><article-title>A novel deep learning-based point-of-care diagnostic method for detecting Plasmodium falciparum with fluorescence digital microscopy</article-title><source>PLoS One</source><year>2020</year><volume>15</volume><issue>11</issue><fpage>e0242355</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0242355</pub-id><pub-id pub-id-type="medline">33201905</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ewnetu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Badu</surname><given-names>K</given-names> </name><name name-style="western"><surname>Carlier</surname><given-names>L</given-names> </name><etal/></person-group><article-title>A digital microscope for the diagnosis of Plasmodium falciparum and Plasmodium vivax, including P. falciparum with hrp2/hrp3 deletion</article-title><source>PLOS Glob Public Health</source><year>2024</year><volume>4</volume><issue>5</issue><fpage>e0003091</fpage><pub-id pub-id-type="doi">10.1371/journal.pgph.0003091</pub-id><pub-id pub-id-type="medline">38768243</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Das</surname><given-names>D</given-names> </name><name name-style="western"><surname>Vongpromek</surname><given-names>R</given-names> </name><name name-style="western"><surname>Assawariyathipat</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Field evaluation of the diagnostic performance of EasyScan GO: a digital malaria microscopy device based on machine-learning</article-title><source>Malar J</source><year>2022</year><month>04</month><day>12</day><volume>21</volume><issue>1</issue><fpage>122</fpage><pub-id pub-id-type="doi">10.1186/s12936-022-04146-1</pub-id><pub-id pub-id-type="medline">35413904</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Torres</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bachman</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Delahunt</surname><given-names>CB</given-names> </name><etal/></person-group><article-title>Automated microscopy for routine malaria diagnosis: a field comparison on Giemsa-stained blood films in Peru</article-title><source>Malar J</source><year>2018</year><month>09</month><day>25</day><volume>17</volume><issue>1</issue><fpage>339</fpage><pub-id pub-id-type="doi">10.1186/s12936-018-2493-0</pub-id><pub-id pub-id-type="medline">30253764</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Linder</surname><given-names>N</given-names> </name><name name-style="western"><surname>Turkki</surname><given-names>R</given-names> </name><name name-style="western"><surname>Walliander</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A malaria diagnostic tool based on computer vision screening and visualization of Plasmodium falciparum candidate areas in digitized blood smears</article-title><source>PLOS One</source><year>2014</year><volume>9</volume><issue>8</issue><fpage>e104855</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0104855</pub-id><pub-id pub-id-type="medline">25144549</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Horning</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Delahunt</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Bachman</surname><given-names>CM</given-names> </name><etal/></person-group><article-title>Performance of a fully-automated system on a WHO malaria microscopy evaluation slide set</article-title><source>Malar J</source><year>2021</year><month>02</month><day>25</day><volume>20</volume><issue>1</issue><fpage>110</fpage><pub-id pub-id-type="doi">10.1186/s12936-021-03631-3</pub-id><pub-id pub-id-type="medline">33632222</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stegm&#x00FC;ller</surname><given-names>T</given-names> </name><name name-style="western"><surname>Abbet</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bozorgtabar</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Self-supervised learning-based cervical cytology for the triage of HPV-positive women in resource-limited settings and low-data regime</article-title><source>Comput Biol Med</source><year>2024</year><month>02</month><volume>169</volume><fpage>107809</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107809</pub-id><pub-id pub-id-type="medline">38113684</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Skandarajah</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sunny</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Gurpur</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Mobile microscopy as a screening tool for oral cancer in India: a pilot study</article-title><source>PLoS One</source><year>2017</year><volume>12</volume><issue>11</issue><fpage>e0188440</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0188440</pub-id><pub-id pub-id-type="medline">29176904</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghaderinia</surname><given-names>M</given-names> </name><name name-style="western"><surname>Abadijoo</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mahdavian</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Smartphone-based device for point-of-care diagnostics of pulmonary inflammation using convolutional neural networks (CNNs)</article-title><source>Sci Rep</source><year>2024</year><month>03</month><day>22</day><volume>14</volume><issue>1</issue><fpage>6912</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-54939-4</pub-id><pub-id pub-id-type="medline">38519489</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Soares</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Suzuki</surname><given-names>CTN</given-names> </name><name name-style="western"><surname>Sabadini</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Laboratory validation of the automated diagnosis of intestinal parasites via fecal sample processing for the recovery of intestinal parasites through the dissolved air flotation technique</article-title><source>Parasit Vectors</source><year>2024</year><month>08</month><day>30</day><volume>17</volume><issue>1</issue><fpage>368</fpage><pub-id pub-id-type="doi">10.1186/s13071-024-06434-y</pub-id><pub-id pub-id-type="medline">39215369</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Carvalho</surname><given-names>J de</given-names> </name><name name-style="western"><surname>Santos</surname><given-names>B dos</given-names> </name><name name-style="western"><surname>Gomes</surname><given-names>JF</given-names> </name><etal/></person-group><article-title>TF&#x2010;Test modified: new diagnostic tool for human enteroparasitosis</article-title><source>Clinical Laboratory Analysis</source><year>2016</year><month>07</month><volume>30</volume><issue>4</issue><fpage>293</fpage><lpage>300</lpage><pub-id pub-id-type="doi">10.1002/jcla.21854</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lundin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Suutala</surname><given-names>A</given-names> </name><name name-style="western"><surname>Holmstr&#x00F6;m</surname><given-names>O</given-names> </name><etal/></person-group><article-title>Diagnosis of soil-transmitted helminth infections with digital mobile microscopy and artificial intelligence in a resource-limited setting</article-title><source>PLoS Negl Trop Dis</source><year>2024</year><month>04</month><volume>18</volume><issue>4</issue><fpage>e0012041</fpage><pub-id pub-id-type="doi">10.1371/journal.pntd.0012041</pub-id><pub-id pub-id-type="medline">38602896</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sahu</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kandaswamy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>DV</given-names> </name><etal/></person-group><article-title>AI driven lab-on-chip cartridge for automated urinalysis</article-title><source>SLAS Technol</source><year>2024</year><month>06</month><volume>29</volume><issue>3</issue><fpage>100137</fpage><pub-id pub-id-type="doi">10.1016/j.slast.2024.100137</pub-id><pub-id pub-id-type="medline">38657705</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meulah</surname><given-names>B</given-names> </name><name name-style="western"><surname>Oyibo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bengtson</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Performance evaluation of the Schistoscope 5.0 for (semi-)automated digital detection and quantification of Schistosoma haematobium eggs in urine: a field-based study in Nigeria</article-title><source>Am J Trop Med Hyg</source><year>2022</year><month>11</month><day>14</day><volume>107</volume><issue>5</issue><fpage>1047</fpage><lpage>1054</lpage><pub-id pub-id-type="doi">10.4269/ajtmh.22-0276</pub-id><pub-id pub-id-type="medline">36252803</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oyibo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Meulah</surname><given-names>B</given-names> </name><name name-style="western"><surname>Bengtson</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Two-stage automated diagnosis framework for urogenital schistosomiasis in microscopy images from low-resource settings</article-title><source>J Med Imag</source><year>2023</year><volume>10</volume><issue>4</issue><fpage>044005</fpage><pub-id pub-id-type="doi">10.1117/1.JMI.10.4.044005</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meulah</surname><given-names>B</given-names> </name><name name-style="western"><surname>Oyibo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Hoekstra</surname><given-names>PT</given-names> </name><etal/></person-group><article-title>Validation of artificial intelligence-based digital microscopy for automated detection of Schistosoma haematobium eggs in urine in Gabon</article-title><source>PLoS Negl Trop Dis</source><year>2024</year><month>02</month><volume>18</volume><issue>2</issue><fpage>e0011967</fpage><pub-id pub-id-type="doi">10.1371/journal.pntd.0011967</pub-id><pub-id pub-id-type="medline">38394298</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Mehanian</surname><given-names>C</given-names> </name><name name-style="western"><surname>Jaiswal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Delahunt</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Computer-automated malaria diagnosis and quantitation using convolutional neural networks</article-title><conf-name>2017 IEEE International Conference on Computer Vision Workshop (ICCVW)</conf-name><conf-date>Oct 22-29, 2017</conf-date><conf-loc>Venice, Italy</conf-loc><fpage>125</fpage><pub-id pub-id-type="doi">10.1109/ICCVW.2017.22</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Delahunt</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Jaiswal</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Horning</surname><given-names>MP</given-names> </name><etal/></person-group><article-title>Fully-automated patient-level malaria assessment on field-prepared thin blood film microscopy images, including supplementary information</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 11, 2022</comment><pub-id pub-id-type="doi">10.48550/arXiv.1908.01901</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Osaku</surname><given-names>D</given-names> </name><name name-style="western"><surname>Cuba</surname><given-names>CF</given-names> </name><name name-style="western"><surname>Suzuki</surname><given-names>CTN</given-names> </name><name name-style="western"><surname>Gomes</surname><given-names>JF</given-names> </name><name name-style="western"><surname>Falc&#x00E3;o</surname><given-names>AX</given-names> </name></person-group><article-title>Automated diagnosis of intestinal parasites: a new hybrid approach and its benefits</article-title><source>Comput Biol Med</source><year>2020</year><month>08</month><volume>123</volume><fpage>103917</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103917</pub-id><pub-id pub-id-type="medline">32768052</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rivenson</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Virtual histological staining of unlabelled tissue-autofluorescence images via deep learning</article-title><source>Nat Biomed Eng</source><year>2019</year><month>06</month><volume>3</volume><issue>6</issue><fpage>466</fpage><lpage>477</lpage><pub-id pub-id-type="doi">10.1038/s41551-019-0362-y</pub-id><pub-id pub-id-type="medline">31142829</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ali</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name></person-group><article-title>The YOLO Framework: a comprehensive review of evolution, applications, and benchmarks in object detection</article-title><source>Computers</source><year>2024</year><volume>13</volume><issue>12</issue><fpage>336</fpage><pub-id pub-id-type="doi">10.3390/computers13120336</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cure-Bolt</surname><given-names>N</given-names> </name><name name-style="western"><surname>Perez</surname><given-names>F</given-names> </name><name name-style="western"><surname>Broadfield</surname><given-names>LA</given-names> </name><etal/></person-group><article-title>Artificial intelligence-based digital pathology for the detection and quantification of soil-transmitted helminths eggs</article-title><source>PLoS Negl Trop Dis</source><year>2024</year><month>09</month><volume>18</volume><issue>9</issue><fpage>e0012492</fpage><pub-id pub-id-type="doi">10.1371/journal.pntd.0012492</pub-id><pub-id pub-id-type="medline">39348405</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="web"><article-title>Regulatory considerations on artificial intelligence for health</article-title><source>World Health Organization</source><year>2023</year><access-date>2025-11-27</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240078871">https://www.who.int/publications/i/item/9789240078871</ext-link></comment></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Patel</surname><given-names>A</given-names> </name><name name-style="western"><surname>Balis</surname><given-names>UGJ</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Contemporary whole slide imaging devices and their applications within the modern pathology department: a selected hardware review</article-title><source>J Pathol Inform</source><year>2021</year><volume>12</volume><fpage>50</fpage><pub-id pub-id-type="doi">10.4103/jpi.jpi_66_21</pub-id><pub-id pub-id-type="medline">35070479</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ofori</surname><given-names>B</given-names> </name><name name-style="western"><surname>Twum</surname><given-names>S</given-names> </name><name name-style="western"><surname>Nkansah Yeboah</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ansah</surname><given-names>F</given-names> </name><name name-style="western"><surname>Amofa Nketia Sarpong</surname><given-names>K</given-names> </name></person-group><article-title>Towards the development of cost-effective point-of-care diagnostic tools for poverty-related infectious diseases in sub-Saharan Africa</article-title><source>PeerJ</source><year>2024</year><volume>12</volume><fpage>e17198</fpage><pub-id pub-id-type="doi">10.7717/peerj.17198</pub-id><pub-id pub-id-type="medline">38915381</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gupta</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>S</given-names> </name></person-group><article-title>Point-of-care tests for human papillomavirus detection in uterine cervical samples: a review of advances in resource-constrained settings</article-title><source>Indian J Med Res</source><year>2023</year><month>11</month><day>1</day><volume>158</volume><issue>5 &#x0026; amp; amp;6</issue><fpage>509</fpage><lpage>521</lpage><pub-id pub-id-type="doi">10.4103/ijmr.ijmr_1143_23</pub-id><pub-id pub-id-type="medline">38236008</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coro</surname><given-names>F</given-names> </name><name name-style="western"><surname>De Maria</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mangano</surname><given-names>VD</given-names> </name><name name-style="western"><surname>Ahluwalia</surname><given-names>A</given-names> </name></person-group><article-title>Technologies for the point-of-care diagnosis of malaria: a scoping review</article-title><source>Infect Dis Poverty</source><year>2025</year><month>06</month><day>23</day><volume>14</volume><issue>1</issue><fpage>54</fpage><pub-id pub-id-type="doi">10.1186/s40249-025-01329-1</pub-id><pub-id pub-id-type="medline">40551195</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mandal</surname><given-names>S</given-names> </name><name name-style="western"><surname>Das</surname><given-names>D</given-names> </name><name name-style="western"><surname>Udutalapally</surname><given-names>V</given-names> </name></person-group><article-title>mSickle: sickle cell identification through gradient evaluation and smartphone microscopy</article-title><source>J Ambient Intell Human Comput</source><year>2023</year><month>10</month><volume>14</volume><issue>10</issue><fpage>13319</fpage><lpage>13331</lpage><pub-id pub-id-type="doi">10.1007/s12652-022-03786-0</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aulia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Suksmono</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Mengko</surname><given-names>TR</given-names> </name><name name-style="western"><surname>Alisjahbana</surname><given-names>B</given-names> </name></person-group><article-title>A novel digitized microscopic images of ZN-stained sputum smear and its classification based on IUATLD grades</article-title><source>IEEE Access</source><year>2024</year><volume>12</volume><fpage>51364</fpage><lpage>51380</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2024.3386208</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roberts</surname><given-names>J</given-names> </name><name name-style="western"><surname>Flenaugh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Oprea-Ilies</surname><given-names>G</given-names> </name></person-group><article-title>AI aided rapid on site evaluation of respiratory cytology</article-title><source>J Am Soc Cytopathol</source><year>2021</year><month>09</month><volume>10</volume><issue>5</issue><fpage>S2</fpage><pub-id pub-id-type="doi">10.1016/j.jasc.2021.07.131</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kanakasabapathy</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Sadasivam</surname><given-names>M</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>A</given-names> </name><etal/></person-group><article-title>An automated smartphone-based diagnostic assay for point-of-care semen analysis</article-title><source>Sci Transl Med</source><year>2017</year><month>03</month><day>22</day><volume>9</volume><issue>382</issue><fpage>eaai7863</fpage><pub-id pub-id-type="doi">10.1126/scitranslmed.aai7863</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>D&#x2019;Ambrosio</surname><given-names>MV</given-names> </name><name name-style="western"><surname>Bakalar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bennuru</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Point-of-care quantification of blood-borne filarial parasites with a mobile phone microscope</article-title><source>Sci Transl Med</source><year>2015</year><month>05</month><day>6</day><volume>7</volume><issue>286</issue><fpage>286re4</fpage><pub-id pub-id-type="doi">10.1126/scitranslmed.aaa3480</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suchikova</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Tsybuliak</surname><given-names>N</given-names> </name><name name-style="western"><surname>Teixeira da Silva</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Nazarovets</surname><given-names>S</given-names> </name></person-group><article-title>GAIDeT (Generative AI Delegation Taxonomy): a taxonomy for humans to delegate tasks to generative artificial intelligence in scientific research and publishing</article-title><source>Account Res</source><year>2025</year><month>08</month><day>8</day><fpage>1</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1080/08989621.2025.2544331</pub-id><pub-id pub-id-type="medline">40781729</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search strategy.</p><media xlink:href="jmir_v28i1e78500_app1.docx" xlink:title="DOCX File, 14 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Original data extraction tool.</p><media xlink:href="jmir_v28i1e78500_app2.docx" xlink:title="DOCX File, 14 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Quadas-2 tool.</p><media xlink:href="jmir_v28i1e78500_app3.docx" xlink:title="DOCX File, 40 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Sample collection and scanning.</p><media xlink:href="jmir_v28i1e78500_app4.docx" xlink:title="DOCX File, 45 KB"/></supplementary-material><supplementary-material id="app5"><label>Checklist 1</label><p>PRISMA-ScR (Preferred Reporting Items for Systematic reviews and Meta-Analyses extension for Scoping Reviews) checklist.</p><media xlink:href="jmir_v28i1e78500_app5.pdf" xlink:title="PDF File, 118 KB"/></supplementary-material></app-group></back></article>