<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e78869</article-id><article-id pub-id-type="doi">10.2196/78869</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Image-Based Deep Learning for Cataract Diagnosis: Systematic Review and Meta-Analysis</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Li</surname><given-names>Ruixi</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Li</surname><given-names>Hongyi</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Li</surname><given-names>Chong</given-names></name><degrees>BM</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Shuo</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Lei</surname><given-names>Linhong</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Tao</surname><given-names>Dan</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Ophthalmology, The Second Affiliated Hospital of Kunming Medical University</institution><addr-line>Kunming</addr-line><addr-line>Yunnan Province</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Ophthalmology, People's Hospital of Dongchuan District</institution><addr-line>Kunming</addr-line><addr-line>Yunnan Province</addr-line><country>China</country></aff><aff id="aff3"><institution>Department of Ophthalmology, Kunming Children's Hospital (The affiliated Children's Hospital of Kunming Medical University)</institution><addr-line>Kunming</addr-line><country>China</country></aff><aff id="aff4"><institution>Department of Ophthalmology, Qilin District People's Hospital of Qujing City</institution><addr-line>Qujing</addr-line><addr-line>Yunnan Province</addr-line><country>China</country></aff><aff id="aff5"><institution>Department of Ophthalmology, Yunnan Provincial Maternal and Child Health Hospital</institution><addr-line>No. 200 Gulou Road, Wuhua District</addr-line><addr-line>Kunming</addr-line><addr-line>Yunnan Province</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Legault</surname><given-names>Gary L</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Tanabe</surname><given-names>Hirotaka</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Wang</surname><given-names>Xiaomin</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Lu</surname><given-names>Yi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Dan Tao, MD, Department of Ophthalmology, Yunnan Provincial Maternal and Child Health Hospital, No. 200 Gulou Road, Wuhua District, Kunming, Yunnan Province, 650051, China, +86 13987610751; <email>td07510603@163.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>29</day><month>4</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e78869</elocation-id><history><date date-type="received"><day>11</day><month>06</month><year>2025</year></date><date date-type="rev-recd"><day>14</day><month>02</month><year>2026</year></date><date date-type="accepted"><day>18</day><month>02</month><year>2026</year></date></history><copyright-statement>&#x00A9; Ruixi Li, Hongyi Li, Chong Li, Shuo Li, Linhong Lei, Dan Tao. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 29.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e78869"/><abstract><sec><title>Background</title><p>Cataracts are an eye condition characterized by high prevalence and blindness-inducing potential, and effective approaches are required for their early diagnosis, underscoring the clinical significance of this study.</p></sec><sec><title>Objective</title><p>This study aims to evaluate the performance of deep learning (DL) in cataract diagnosis and assess its potential as an effective tool for automated diagnosis, and compare the diagnostic accuracy of DL versus both machine learning and human experts.</p></sec><sec sec-type="methods"><title>Methods</title><p>A systematic search was conducted in Web of Science, Embase, IEEE Xplore, PubMed, and Cochrane Library until April 1, 2025, for studies on image-based DL for cataract detection or clinical subtype classification. The included studies were assessed for the risk of bias (RoB) using Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2). Bivariate mixed effects models were used for data analyses, and publication bias was assessed by Deeks&#x2019; funnel plots.</p></sec><sec sec-type="results"><title>Results</title><p>Sixty-three studies were finally included. The quality assessment indicated a high or unclear RoB in the patient selection (34 studies) and index test (44 studies) domains. Meanwhile, in the reference standard domain, the risk of bias was high or unclear in only 2 studies. Image-based DL achieved a sensitivity of 96% (95% CI 0.95&#x2010;0.97) and a specificity of 98% (0.96&#x2010;0.98) for cataract detection, with an area under the ROC curve (AUC) of 0.99 (0.98&#x2010;1.00). For cataract classification, the sensitivity and specificity of image-based DL were 94% (0.93&#x2010;0.96) and 97% (0.96&#x2010;0.98), respectively, with an AUC of 0.99 (0.98&#x2010;0.99). Despite the strong overall performance, the model&#x2019;s generalization capability was challenged by its lower performance observed on independent external datasets (detection: sensitivity 87%, specificity 93%; classification: sensitivity 89%, specificity 90%), potentially attributable to domain shift between the training and validation data.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Image-based DL has demonstrated high precision in the detection and classification of cataracts, showing potential advantages over traditional machine learning methods, though validation remains limited. Its performance falls within the range of reported accuracy of human experts, highlighting the high feasibility of automated diagnosis. However, validation data limitations, coupled with moderate-quality evidence and high heterogeneity, constrain the utility of DL in auxiliary diagnosis. The model&#x2019;s sensitivity dropped to 87% in external validation, restricting its generalization capability, so caution should be exercised in broad clinical implementation.</p></sec></abstract><kwd-group><kwd>image-based</kwd><kwd>deep learning</kwd><kwd>meta-analysis</kwd><kwd>cataract</kwd><kwd>systematic review</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The lens is a biconvex, transparent structure in the anterior segment of the eye, which focuses light to project images of objects at varying distances onto the retina. A cataract, defined as clouding of the lens, is primarily an age-related degenerative disease, and congenital and pediatric cataracts also occur [<xref ref-type="bibr" rid="ref1">1</xref>]. Early cataracts are asymptomatic, but progressive clouding can lead to visual impairment, greatly reducing quality of life and productivity [<xref ref-type="bibr" rid="ref2">2</xref>]. Cataracts are the major cause of visual loss worldwide. In 2020, the estimated number of the blind aged 50 years and older was 15 million, and moderate to severe visual disorders due to cataracts affected an estimated 79 million people aged 50 years and older [<xref ref-type="bibr" rid="ref2">2</xref>]. These figures suggest the number of patients with blindness and moderate to severe visual disorders rose by 30% and 93%, respectively, versus the year 2000 [<xref ref-type="bibr" rid="ref3">3</xref>]. Blinding cataracts refer to cataracts causing severe visual impairment (visual acuity &#x2264;3/60) or blindness (visual acuity &#x003C;3/60) according to the WHO (World Health Organization) and <italic>ICD-11</italic> (<italic>International Classification of Diseases, 11th Revision)</italic> [<xref ref-type="bibr" rid="ref4">4</xref>]. About 94% of blinding cataracts occur in low- and middle-income countries (LMICs), and cataract-related visual disorders are causally related to poverty in low-resource settings (LRS) [<xref ref-type="bibr" rid="ref1">1</xref>]. As the conventional diagnosis of cataracts relies on ophthalmologists and complex equipment, it is subjective and requires health care resources, making it difficult to satisfy the need for large-scale screening. Moreover, due to the limitations of infrastructure and the lack of trained personnel in LMICs, inequalities are present in early cataract detection and severity classification.</p><p>Deep learning (DL) can process pixel-level information that cannot be recognized by the human eye to greatly contribute to the analysis of medical images, assist doctors in clinical decision-making, and enhance screening efficiency. As a branch of artificial intelligence (AI), DL models are inspired by the brain and specialized in pattern recognition [<xref ref-type="bibr" rid="ref5">5</xref>]. Therefore, DL has been rapidly evolving with broad prospects in the field of medical image analysis involving disease detection, classification, segmentation, and image registration [<xref ref-type="bibr" rid="ref6">6</xref>]. For example, convolutional neural networks (CNNs), as the primary technique of DL for image learning, perform excellently in image classification and feature extraction, making it a cornerstone in medical imaging [<xref ref-type="bibr" rid="ref7">7</xref>]. Residual network (ResNet), one of the landmark architectures of CNNs, has contributed to the development of DL, especially with outstanding performance in benchmarking in image recognition and classification [<xref ref-type="bibr" rid="ref8">8</xref>]. The superiority of the ResNet model in the medical field lies in its capability to efficiently train the deep network and raise the accuracy of image recognition [<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>The strategic importance of AI is to raise the quality of care and potentially reduce costs in high-income economies and to address critical health care issues and staffing shortages and provide access to specialized skills in LMICs [<xref ref-type="bibr" rid="ref10">10</xref>]. Nowadays, DL is developed mostly based on data from high-income countries and regions and relies on high-resolution images and advanced electronic devices. In remote regions, however, fragmented health care systems are generally characterized by insufficient infrastructure, a shortage of professionals, and a lack of health care resources, so it is difficult to guarantee the quality of screening and diagnosis. The following problems are present in the available DL models: (1) lack of compatibility: mismatch with the low-cost equipment used in LRS [<xref ref-type="bibr" rid="ref10">10</xref>]; (2) lack of generalization capability: significant decline in the model performance in real-world scenarios, especially under different lighting conditions or in different patient populations [<xref ref-type="bibr" rid="ref11">11</xref>]; (3) lack of clinical validation: a recent systematic review of studies assessing the use of AI algorithms for medical image analysis found that only 6% of the included studies (n=516) conducted external validation, and the assessment of the diagnostic efficiency was lacking [<xref ref-type="bibr" rid="ref12">12</xref>]; and (4) training data bias and lack of diversity in data (race, age, and disease subtypes) weaken the model&#x2019;s generalization capability [<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>In the past 5 years, breakthroughs have been made in the use of DL for ophthalmic image analysis in diabetic retinopathy (DR), retinopathy of prematurity, and glaucoma. By automating the massive processing of ophthalmic images, DL can achieve a more accurate and rapid diagnosis of cataracts, reducing the doctors&#x2019; subjectivity and errors of conventional methods. Moreover, DL can fuse multiple image data (eg, slit lamp, fundus, and optical coherence tomography [OCT] images) to make a comprehensive and accurate diagnosis through multimodal image analysis. In addition, diagnostic results can be obtained quickly from the DL model with real-time diagnostic capability, greatly improving work efficiency and achieving large-scale screening and early diagnosis, especially in primary health care institutions and LRS [<xref ref-type="bibr" rid="ref14">14</xref>]. Many DL-based diagnostic tools have been approved by the US Food and Drug Administration (FDA), but they require further evaluation and independent quality review [<xref ref-type="bibr" rid="ref15">15</xref>]. For example, IDx-DR [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>], the first AI system approved by the FDA, is a CNN-based system for automated screening for DR, with high sensitivity and specificity, which can contribute to the early diagnosis and lower the risk of visual loss in clinical practice, especially in LRS.</p><p>Image-based DL has exhibited greater potential for automatic cataract detection and classification using fundus and slit lamp images. However, the existing findings are still heterogeneous, and a systematic review of DL algorithms for cataract image analysis is still lacking. Therefore, this study was conducted to systematically assess the performance of different DL models in cataract detection and classification from sensitivity, specificity, and the area under the ROC curve (AUC), thereby revealing the methodological and reporting quality and contributing to the clinical translation of DL.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Registration and Study Design</title><p>This study adhered to the PRISMA-DTA (Preferred Reporting Items for Systematic Reviews and Meta-analyses of Diagnostic Test Accuracy Studies) [<xref ref-type="bibr" rid="ref18">18</xref>], and the study protocol was registered on PROSPERO (International Prospective Register of Systematic Reviews; CRD420251030230). We acknowledge that the registration was completed on April 10, 2025, following the initial literature search conducted on April 1, 2025; thus, this is a retrospective registration.</p></sec><sec id="s2-2"><title>Search Strategy, Eligibility Criteria, and Data Extraction</title><p>Based on the predefined criteria, 2 investigators (RXL and HYL) independently searched Web of Science, IEEE Xplore, Embase, PubMed, and Cochrane Library up to April 1, 2025, for studies published from 2019 to 2025.</p><p>The retrieved studies were first imported into EndNote to eliminate duplicate publications. Then the title and abstract were read, and the full text of clearly or potentially eligible studies was examined. No restrictions were imposed on the geographical location or study setting. The following study types were excluded, including letters, non&#x2013;peer-reviewed reports, narrative reviews, animal studies, and conference abstracts. The search strategy is shown in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, with specific search phrases, Boolean operations, and field restrictions. The following studies were excluded during full-text screening: studies failing to report key values such as true positives (TP), false positives (FP), false negatives (FN), and true negatives (TN), making it impossible to make a contingency table; studies reporting only composite metrics like sensitivity, specificity, or AUC without providing the underlying data; and studies that reported data ambiguously or in a manner that prevented accurate construction of a 2&#x00D7;2 contingency table. The following is a table of inclusion and exclusion criteria (<xref ref-type="table" rid="table1">Table 1</xref>). Discrepancies were resolved through discussion with a third investigator (SL or LHL) if needed.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Eligibility criteria table, which outlines the following aspects: study design, language, data extraction, disease type, and intervention.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Inclusion criteria</td><td align="left" valign="bottom">Exclusion criteria</td></tr></thead><tbody><tr><td align="left" valign="top">Study design</td><td align="left" valign="top">Randomized controlled trials, prospective observational study, retrospective diagnostic accuracy study, cross-sectional diagnostic study</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Letters, non&#x2013;peer-reviewed reports, narrative reviews, animal studies, and conference abstracts.</p></list-item></list></td></tr><tr><td align="left" valign="top">Language</td><td align="left" valign="top">Full-text in English</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Non-English publications</p></list-item></list></td></tr><tr><td align="left" valign="top">Data extraction</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Studies can report key values such as true positive (TP), false positive (FP), false negative (FN), and true negative (TN), and can create a contingency table.</p></list-item><list-item><p>Studies report on comprehensive indicators such as sensitivity, specificity, or AUC, and provide basic data;</p></list-item><list-item><p>Studies that report clear data and can accurately construct 2&#x00D7;2 contingency tables.</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Studies failing to report key values such as true positives (TP), false positives (FP), false negatives (FN), and true negatives (TN), making it impossible to make a contingency table;</p></list-item><list-item><p>Studies reporting only composite metrics like sensitivity, specificity, or AUC without providing the underlying data;</p></list-item><list-item><p>Studies that reported data ambiguously or in a manner that prevented accurate construction of a 2&#x00D7;2 contingency table.</p></list-item></list></td></tr><tr><td align="left" valign="top">Disease type</td><td align="left" valign="top">Cataracts</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Noncataract</p></list-item></list></td></tr><tr><td align="left" valign="top">Intervention</td><td align="left" valign="top">Cataract diagnosis (detection and/or classification)</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Noncataract diagnosis</p></list-item></list></td></tr></tbody></table></table-wrap><p>Only studies on the performance of image-based DL algorithms in cataract detection and classification were included following the eligibility criteria (<xref ref-type="table" rid="table2">Table 2</xref>).</p><p>Two investigators (RXL and HYL) independently extracted the following data using standardized data extraction tables: basic characteristics (country, publication year, study site, and type), dataset characteristics (nature, number of images, and presence or absence of external validation), and performance metrics (sensitivity and specificity). Discrepancies were settled by discussion with a third investigator. Contingency tables were used to directly extract the data on binary diagnostic accuracy, including TP, FP, TN, and FN. During data extraction, double-checking was performed, and the original authors were contacted to obtain supplementary data. These data were then used to calculate pooled sensitivity, specificity, and other metrics. If a study provided multiple contingency tables for the same or different DL algorithms, they were assumed to be independent of each other.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Outlines of the study design and basic demographic characteristics, including authorship details, participant information (number of participants and mean or median age), study design type, inclusion criteria, and exclusion criteria (n=number of participants).</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Studies</td><td align="left" valign="bottom" colspan="3">Participants</td><td align="left" valign="bottom">N</td><td align="left" valign="bottom">Median age (range)</td><td align="left" valign="bottom">Study design</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Inclusion criteria</td><td align="left" valign="top">Exclusion criteria</td><td align="left" valign="top">Labels</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr></thead><tbody><tr><td align="left" valign="top">Lin et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Patients aged less than 14 years, with or without eye symptoms, and with no history of eye surgery. All participants were required to undergo slit-lamp photography, and sedatives such as chloral hydrate when necessary.</td><td align="left" valign="top">Patients who already had a definitive diagnosis of cataract, other ocular abnormalities, or ocular trauma.</td><td align="left" valign="top">Noncataract or cataract</td><td align="char" char="." valign="top">350</td><td align="char" char="." valign="top">6.58 (6.13-7.03)</td><td align="left" valign="top">Multicenter randomized controlled trial (RCT)</td></tr><tr><td align="left" valign="top">Deepak and Bhat (2024) [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or glaucoma or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Zhao et al (2024) [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Noncataract or mild cataract or moderate cataract or severe cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Zia et al (2023) [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or glaucoma or diabetic retinopathy or neutral.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zhang et al (2023) [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or low-grade or high-grade.</td><td align="char" char="." valign="top">543</td><td align="left" valign="top">NR</td><td align="left" valign="top">Prospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zeboulon et al (2022) [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">Patients of either one of the following clinical categories: clear lens or cataract. Patients with clear lens had no history of refractive surgery and had a best corrected visual acuity (BCVA) of at least 20/20. Patients with cataract had significant visual discomfort and were scheduled for surgery. All types of cataracts were included, and 4 experienced cataract and refractive surgeons of the department performed the patient inclusions (authors PZ, CP, WG, and DG).</td><td align="left" valign="top">Patients with any corneal disease.</td><td align="left" valign="top">Normal or cataract or background.</td><td align="char" char="." valign="top">157</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zhang et al (2024) [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NC<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> severity level: normal or mild or severe.</td><td align="char" char="." valign="top">530</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NC severity level: normal or mild or severe.</td><td align="char" char="." valign="top">543</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Xie et al (2023) [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Patients with cataract whose best corrected distance visual acuity (BCDVA) was good (&#x003E;0.6) within 1 month after cataract surgery, and patients without cataract without refractive media opacities. The fundus images were captured without mydriasis before surgery.</td><td align="left" valign="top">Traumatic cataracts, congenital cataracts and lens dislocation, corneal diseases, asteroid hyalosis, vitreous hemorrhage, and severe retinal and optic nerve diseases. Poor quality and unreadable images were also excluded: images out of focus; images underexposed; images overexposed; incomplete images with more than 1/3 peripheral halo.</td><td align="left" valign="top">Noncataract or mild cataracts/ or visually impaired cataracts.</td><td align="char" char="." valign="top">5245</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Wu et al (2022) [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">Patients with congenital cataract, intraocular lens, aphakic eye, severe eye trauma, or corneal opacity.</td><td align="left" valign="top">Cataract or noncataract with normal-quality images or noncataract with poor-quality images.</td><td align="char" char="." valign="top">30,668</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Vasan et al (2023) [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">New patients of both the paid and free service facilities who were aged 40 years and older with the BCVA less than 20/40 in either eye. Participants were recruited immediately after the vision examination before further investigation and ophthalmologist examination.</td><td align="left" valign="top">Patients with trauma or vulnerabilities, patients who were unwilling to participate in the study or with dilated pupil.</td><td align="left" valign="top">Negative or positive or &#x201C;can&#x2019;t say&#x201D; or &#x201C;not asked.&#x201D;</td><td align="char" char="." valign="top">1407</td><td align="left" valign="top">NR</td><td align="left" valign="top">Prospective diagnostic study</td></tr><tr><td align="left" valign="top">Hassan et al (2024) [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract or glaucoma or diabetic or uveitis.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Ueno et al (2024) [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract or infectious keratitis or immunological keratitis or corneal scar or corneal deposits or bullous keratopathy or ocular surface tumor or primary angle-closure glaucoma.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Singh et al (2024) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or glaucoma or diabetic retinopathy.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Shafiq et al (2024) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Glaucoma or cataracts or diabetic retinopathy or myopia or macular degeneration.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Santone et al (2024) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract.</td><td align="char" char="." valign="top">4785</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Jawad et al (2024) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or glaucoma or cataract or myopia or others.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Janti et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Patients who were aged 45 years and older, and participants of both genders, including males and females.</td><td align="left" valign="top">Patients found to be critically ill after the examination, and those who were not willing to participate in the study.</td><td align="left" valign="top">Cataract positive (mature or immature) or cataract negative (normal and intraocular lens).</td><td align="char" char="." valign="top">495</td><td align="left" valign="top">61.2 (NR-NR&#xFF09;</td><td align="left" valign="top">Prospective, observational diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Emir and Colak (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Healthy or diabetic retinopathy or glaucoma or cataract or age-related macular degeneration or hypertension/myopia or others.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Ogundokun et al (2024) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">AMD<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup> or cataract or diabetes or glaucoma or hypertension myopia or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Nguyen and Lin (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Mai et al (2024) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Control (no cataract present), without posterior polar cataract (PPC; cataract present without PPC), with PPC (cataract present with PPC).</td><td align="char" char="." valign="top">103</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Raveenthini et al (2024) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">AMD or cataract or diabetic retinopathy or glaucoma or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Rafay et al (2023) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or diabetic retinopathy or glaucoma or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Abbas et al (2023) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Glaucoma or diabetic retinopathy or cataract or normal.</td><td align="char" char="." valign="top">300</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Uyar et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or DR or glaucoma or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Serwaa et al (2024) [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Glaucoma-positive or glaucoma-negative or cataracts-positive or cataract-negative.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NC<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> severity level: normal, mild, or severe.</td><td align="char" char="." valign="top">543</td><td align="char" char="." valign="top">61.30 (42.65-79.95)</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Glaret Subin and Muthukannan (2022) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">AMD, diabetic retinopathy, cataract, or glaucoma.</td><td align="char" char="." valign="top">5000</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Xiao et al (2024) [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cortical cataract (CC) severity level: normal, mild, or severe.</td><td align="char" char="." valign="top">469</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Wang et al (2024) [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">2 coarse-grained types (noncataract No C and posterior subcapsular cataract [PSC]), 7 fine-grained types (NC II, NC III, &#x2265; NC IV, CC I, CC II, CC III, and CC IV).</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Kumari and Saxena (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Diseased class or normal class.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Devaraj et al (2024) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Immature cataract, mature cataract, no cataract, and prior cataract operation with intraocular lens (IOL) inserted.</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or noncataract.</td><td align="char" char="." valign="top">7726</td><td align="left" valign="top">50 (NR-NR&#xFF09;</td><td align="left" valign="top">Prospective observational study</td></tr><tr><td align="left" valign="top">Al-Saadi et al (2024) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal, early, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Elsawy et al (2023) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Phakic eyes.</td><td align="left" valign="top">Pseudophakic and aphakic eyes.</td><td align="left" valign="top">NC, cortical cataract (cortical lens opacity), or PSC.</td><td align="char" char="." valign="top">2573</td><td align="char" char="." valign="top">69.84 (62.12-77.56)</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Akram and Debnath, 2020 [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">The symptoms of selected eye diseases include several visual abnormalities in the eye region, particularly blurred, clouded, or yellowing lens, gray or white spots on the cornea, red or bloodshot eyes, yellow or greenish-yellow coatings on eyes, foamy white spots in sclera, swollen eyes, eyelid deformity such as the length of the lower eyelid being turned out from the eye, or reddish bumps on the edge of an inner eyelid depending on specific diseases, and symptoms are different for each disease.</td><td align="left" valign="top">NR</td><td align="left" valign="top">Bitot&#x2019;s spot of vitamin A deficiency, cataracts, conjunctivitis, corneal ulcer, ectropion, healthy, or periorbital cellulitis or trachoma.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Jiang et al (2021) [<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Opacity area: Limited or Extensive</td><td align="left" valign="top">NR</td><td align="char" char="." valign="top">1.58 (0.7-2.46)</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref56">56</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Severity: no, mild, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref57">57</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Severity: no, mild, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Subin and Kannan (2022) [<xref ref-type="bibr" rid="ref58">58</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract, diabetic retinopathy, glaucoma, or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Pratap and Kokil (2019) [<xref ref-type="bibr" rid="ref59">59</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal, mild, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Luo et al (2021) [<xref ref-type="bibr" rid="ref60">60</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal, glaucoma, cataract, or AMD.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Imran et al (2020) [<xref ref-type="bibr" rid="ref61">61</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal, mild, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Imran et al (2021) [<xref ref-type="bibr" rid="ref62">62</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal, mild, moderate, or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Acar et al (2021) [<xref ref-type="bibr" rid="ref63">63</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Noncataract or cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Olaniyan et al (2024) [<xref ref-type="bibr" rid="ref64">64</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Ganokratanaa et al (2023) [<xref ref-type="bibr" rid="ref65">65</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Gan et al (2023) [<xref ref-type="bibr" rid="ref66">66</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Four stages of cataract: incipient stage, intumescent stage, mature stage, or hypermature stage.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Tham et al (2022) [<xref ref-type="bibr" rid="ref67">67</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">Visual impairment caused by other pathologies, incomplete or missing data on cataract grading, or BCVA.</td><td align="left" valign="top">Normal or visually significant cataract.</td><td align="char" char="." valign="top">13,482</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Siddique (2022) [<xref ref-type="bibr" rid="ref68">68</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract, chalazion, normal, or squint.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Sirajudeen et al (2022) [<xref ref-type="bibr" rid="ref69">69</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or normal.</td><td align="char" char="." valign="top">200</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Junayed et al (2021) [<xref ref-type="bibr" rid="ref70">70</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or noncataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Hu et al (2020) [<xref ref-type="bibr" rid="ref71">71</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Pronounced cataract (2) or early cataract (1) or normal (0).</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Hu et al (2021) [<xref ref-type="bibr" rid="ref72">72</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or normal.</td><td align="char" char="." valign="top">38</td><td align="char" char="." valign="top">58 (NR-NR)</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Lai et al (2022) [<xref ref-type="bibr" rid="ref73">73</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Askarian et al (2021) [<xref ref-type="bibr" rid="ref74">74</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or healthy.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Son et al, 2022 [<xref ref-type="bibr" rid="ref75">75</xref>]</td><td align="left" valign="top">Patients had available anterior segment photograph data.</td><td align="left" valign="top">Patients with pathologic features of the cornea, anterior chamber, lens, or iris that interfere with the detection of lens images (eg, corneal opacity or edema, uveitis, and iris defects including aniridia, coloboma, and iridocorneal endothelial syndrome) and a medical history of previous ophthalmic surgery (eg, keratoplasty, implantable Collamer lens, and cataract surgery); patients with retinal and vitreal diseases involving visual pathways that could interfere with visual acuity and final management plan.</td><td align="left" valign="top">Cortical opacity; nuclear color; nuclear opalescence; PSC: normal or mild or moderate or severe.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Saju and Rajesh, 2022 [<xref ref-type="bibr" rid="ref76">76</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Five types of cataracts: cortical or hyper mature or mature or nuclear or posterior.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cross-sectional diagnostic study</td></tr><tr><td align="left" valign="top">Chellaswamy et al (2022) [<xref ref-type="bibr" rid="ref77">77</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or diabetic retinopathy or glaucoma or normal or AMD.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Lu et al (2022) [<xref ref-type="bibr" rid="ref78">78</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">Eyes with corneal opacity or other corneal disease that might significantly interfere with lens observation and blurred region of interest due to poor fixation or eyes with small pupils that prevented manual cataract evaluation.</td><td align="left" valign="top">Nuclear cataract or cortical cataract or posterior subcapsular cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Al-Naji et al (2024) [<xref ref-type="bibr" rid="ref79">79</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Normal or cataract or foreign body or glaucoma or subconjunctival hemorrhage or viral conjunctivitis.</td><td align="char" char="." valign="top">645</td><td align="left" valign="top">NR</td><td align="left" valign="top">Prospective observational study</td></tr><tr><td align="left" valign="top">Elloumi (2022) [<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Healthy or mild or moderate or severe cataract.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr><tr><td align="left" valign="top">Zannah et al (2024) [<xref ref-type="bibr" rid="ref81">81</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Cataract or diabetic retinopathy or glaucoma or normal.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Retrospective diagnostic accuracy study</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>NR: not reported.</p></fn><fn id="table2fn2"><p><sup>b</sup>NC: nuclear cataract.</p></fn><fn id="table2fn3"><p><sup>c</sup>AMD: age-related macular degeneration.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-3"><title>Primary and Secondary Outcomes</title><p>Primary and secondary outcomes were defined to assess the performance of DL in cataract detection and classification. The primary outcomes included sensitivity, specificity, and positive and negative likelihood ratios.</p><p>The secondary outcomes were used to assess the accuracy of DL versus machine learning (ML) algorithms in cataract diagnosis through subgroup analyses and compare DL algorithms with human experts in studies using identical datasets. The datasets in each study were also investigated for the reference standard used to determine whether transfer learning was applied, the methods for model testing and validation, and the sources and characteristics of the datasets (<xref ref-type="table" rid="table3">Table 3</xref>).</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Summary of indicators, algorithms, and data sources (n=number of images).</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Studies</td><td align="left" valign="bottom" colspan="2">Indicator definition</td><td align="left" valign="bottom" colspan="3">Algorithm</td><td align="left" valign="bottom" colspan="4">Data source</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Device</td><td align="left" valign="top">Exclusion of poor-quality cases</td><td align="left" valign="top">Algorithm architecture</td><td align="left" valign="top">ML<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> or DL<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">Transfer learning applied</td><td align="left" valign="top">Source of data</td><td align="left" valign="top">Number of cases for training or test or internal or external validation</td><td align="left" valign="top">Data range</td><td align="left" valign="top">Open access data</td></tr></thead><tbody><tr><td align="left" valign="top">Lin et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">Yes</td><td align="left" valign="top">CC-Cruiser</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">ZOC, located in Guangzhou in southern China. The other 4 eye clinics are affiliated with Shenzhen Eye Hospital, the Central Hospital of Wuhan, the Second Affiliated Hospital of Fujian Medical University, and Kaifeng Eye Hospital</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup>/350/NR/NR</td><td align="left" valign="top">August 9, 2017-May 25, 2018</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Deepak and Bhat (2024) [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Darknet-53</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Ocular Disease Intelligent Recognition (ODIR)</td><td align="left" valign="top">4000/1000/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">ODIR</td></tr><tr><td align="left" valign="top">Zhao et al (2024) [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">NCME-Net</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Shenzhen Eye Hospital and the Eye Hospital of Nanjing Medical University</td><td align="left" valign="top">553/100/139/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Zia et al (2023) [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Improved SqueezeNet model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">ODIR-IMAGE, Kaggle dataset</td><td align="left" valign="top">1500/400/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">ODIR-IMAGE, Kaggle dataset</td></tr><tr><td align="left" valign="top">Zhang et al (2023) [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">CASIA2 AS-OCT ophthalmology device (TOMEY Inc, Japan)</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Ensemble Logistic Regression (EMLR) framework</td><td align="left" valign="top">ML</td><td align="left" valign="top">No</td><td align="left" valign="top">AS-OCT-NC2 dataset</td><td align="left" valign="top">7831/3611/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Zeboulon et al (2022) [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">Swept Source Optical Coherence Tomography (SS-OCT)</td><td align="left" valign="top">No</td><td align="left" valign="top">U-Net model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Anterion (Heidelberg)</td><td align="left" valign="top">Development set/validation set: 504/1326</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Zhang et al (2024) [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">CASIA2 AS-OCT ophthalmology device (TOMEY Inc)</td><td align="left" valign="top">No</td><td align="left" valign="top">RCRNets</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">AS-OCT-NC2 dataset</td><td align="left" valign="top">9394/3390/3100/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">CASIA2 AS-OCT ophthalmology device (TOMEY Inc)</td><td align="left" valign="top">Yes</td><td align="left" valign="top">RIR-Net-2&#x2010;34</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The local hospital</td><td align="left" valign="top">7831/3611/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">UCSD dataset, Heidelberg OCT dataset</td></tr><tr><td align="left" valign="top">Xie et al (2023) [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">DenseNet121<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup></td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Zhejiang Eye Hospital at Wenzhou (ZEHWZ)</td><td align="left" valign="top">4901/1048/1048/1398</td><td align="left" valign="top">September 2020-March 2021</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Wu et al (2022) [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Anti-interference model (convolutional neural network [CNN])</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The Chinese PLA (People&#x2019;s Liberation Army) General Hospital</td><td align="left" valign="top">14400/17765/1800/NR</td><td align="left" valign="top">September 2018-May 2021</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Vasan et al (2023) [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">Smartphone camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">E-Paarvai App (CNN&#xFF09;</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">A large eye care hospital in South India</td><td align="left" valign="top">1400/NR/2619/NR</td><td align="left" valign="top">January 2022-April 2022</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Hassan et al (2024) [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">OcularNET</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Kaggle machine-learning platform</td><td align="left" valign="top">4000/2200/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle machine-learning platform.</td></tr><tr><td align="left" valign="top">Ueno et al (2024) [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">Smartphone camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">YOLO V.5</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">23 tertiary eye centers in Japan</td><td align="left" valign="top">5270/836/NR/NR</td><td align="left" valign="top">2019&#x2010;2020</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Singh et al (2024) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">A novel ensembled deep learning CNN model</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Kaggle database</td><td align="left" valign="top">4217/NR/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Shafiq et al, (2024) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">OCT, retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">The DualEye-FeatureNet model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Structured analysis of the retina (STARE), DRIVE, high-resolution fundus (HRF)</td><td align="left" valign="top">NR/483/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">STARE, DRIVE, HRF</td></tr><tr><td align="left" valign="top">Santone et al (2024) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">The STANDARD_CNN model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">the ODIR 5K dataset</td><td align="left" valign="top">6987/957/1627/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">the ODIR 5K dataset</td></tr><tr><td align="left" valign="top">Jawad et al (2024) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Swin Transformer models (Swin-T)</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">ODIR, the Retina dataset, available on Kaggle</td><td align="left" valign="top">7000/3400/NR/300</td><td align="left" valign="top">NR</td><td align="left" valign="top">ODIR, the Retina dataset, available on Kaggle</td></tr><tr><td align="left" valign="top">Janti et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Smartphone camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Smartphone-based cataract screening application</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The AIIMS (All India Institute of Medical Sciences) Bibinagar, Hyderabad, Telangana, India</td><td align="left" valign="top">NR/990/NR/NR</td><td align="left" valign="top">April 2024-July 2024</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Emir and Colak (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The residual neural network (ResNet) 50</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The ODIR dataset</td><td align="left" valign="top">3198/930/471/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The ODIR dataset</td></tr><tr><td align="left" valign="top">Ogundokun et al (2024) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">MobileNet<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup>V2-SVM (support vector machine)</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Ocular dataset from the Kaggle repository</td><td align="left" valign="top">16290/2012/1811/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Nguyen and Lin (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Hybrid CNN Approach</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Kaggle database</td><td align="left" valign="top">888/278/222/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Mai et al (2024) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">A Zeiss OPMI Lumera T surgical microscope</td><td align="left" valign="top">No</td><td align="left" valign="top">ConvNeXt-Tiny model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Department of Ophthalmology, Far Eastern Memorial Hospital, New Taipei, Taiwan.</td><td align="left" valign="top">NR/NR/103/NR</td><td align="left" valign="top">January 1, 2018-December 31, 2021</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">M et al (2024) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">XGB classifier model</td><td align="left" valign="top">ML</td><td align="left" valign="top">No</td><td align="left" valign="top">HRF, DR HAGIS, DIARET DB0, DRISHTI, KAGGLE, E-OPTHA, RIM ONE, ORIGA, ACRIMA, DRIONS-DB, STARE, ARIA, IDRID, ICHALLENGE AMD, ODIR, RFMID, KAGGLE CATARACT, HARVARD V1, DERBI DATA, ICHALLENGE, GLAUCOMA</td><td align="left" valign="top">10447/2612/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">HRF, DR HAGIS, DIARET DB0, DRISHTI, KAGGLE, E-OPTHA, RIM ONE, ORIGA, ACRIMA, DRIONS-DB, STARE, ARIA, IDRID, ICHALLENGE AMD, ODIR, RFMID, KAGGLE CATARACT, HARVARD V1, DERBI DATA, ICHALLENGE, GLAUCOMA</td></tr><tr><td align="left" valign="top">Rafay et al (2023) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">EfficientNet<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup> B3</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Kaggle database</td><td align="left" valign="top">2949/1268/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Abbas et al (2023) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Deep-ocular mode</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The retinal fundus multidisease image dataset (RFMiD) and ODIR</td><td align="left" valign="top">1222/521/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">RFMiD and ODIR</td></tr><tr><td align="left" valign="top">Uyar et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">ABC-based weighted ensemble model</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Eye Disease Dataset (EDD), from the Kaggle</td><td align="left" valign="top">3372/426/419/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">EDD, from the Kaggle</td></tr><tr><td align="left" valign="top">Serwaa et al (2024) [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">LBPSCN: Local Binary Pattern Scaled Capsule Network</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The Kaggle database</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">CASIA2 AS-OCT ophthalmology device (TOMEY Inc)</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Clinical-awareness attention network (CCA-Net)</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">A local health physical center</td><td align="left" valign="top">9619/3141/3441/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The ACRIMA dataset, the UCSD dataset</td></tr><tr><td align="left" valign="top">Glaret Subin and Muthukannan (2022) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">FPOA-CNN</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Various medical centers in China collected by the Shanggong Medical Technology Co., Ltd.</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">ODIR database</td></tr><tr><td align="left" valign="top">Xiao et al (2024) [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">CASIA2 AS-OCT ophthalmology device (TOMEY Inc)</td><td align="left" valign="top">No</td><td align="left" valign="top">ResNet34-MSSA</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">CASIA2 AS-OCT dataset</td><td align="left" valign="top">3969/1271/1305/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The LAG dataset, the SD-OCT dataset</td></tr><tr><td align="left" valign="top">Wang et al, 2024 [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">MGCNet</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The Cataract Center of Beijing Tongren Hospital (BTH; Beijing, China)</td><td align="left" valign="top">2912/970/970/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The APTOS2019 dataset, The HAM10000 dataset</td></tr><tr><td align="left" valign="top">Kumari and Saxena (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">RINet</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Multiple repositories</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Devaraj et al (2024) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Smartphone camera</td><td align="left" valign="top">No</td><td align="left" valign="top">EfficientNet-v2 Model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The ophthalmology departments at King George&#x2019;s Medical University (KGMU) and Balrampur Hospital, Lucknow</td><td align="left" valign="top">1708/753/275/NR</td><td align="left" valign="top">October 29, 2022-September 23, 2023</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Al-Saadi et al (2024) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">An Automated Wavelet Scattering Network</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The ODIR database</td><td align="left" valign="top">357/155/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The ODIR database</td></tr><tr><td align="left" valign="top">Elsawy et al (2023) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Color fundus photography (CFP)</td><td align="left" valign="top">No</td><td align="left" valign="top">Deep Opacity Net</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The AREDS2 dataset, the Singapore Epidemiology of Eye Diseases study (SEED)</td><td align="left" valign="top">12227/3514/1773/17088</td><td align="left" valign="top">2006&#x2010;2008</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Akram and Debnath (2020) [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">Digital camera</td><td align="left" valign="top">No</td><td align="left" valign="top">A deep convolution neural network (DCNN) model</td><td align="left" valign="top">ML</td><td align="left" valign="top">No</td><td align="left" valign="top">International Center for Eye Health, clinical images for symptoms on faces from the University of Rochester, UCSD School of Medicine and VA Medical Center, the Primary Care Dermatology Society, and other different resources</td><td align="left" valign="top">1402/350/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Jiang et al (2021) [<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">CCNN-Ensemble</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Zhongshan Ophthalmic Center of Sun Yat-sen University</td><td align="left" valign="top">470 (Training and validation)/132/79</td><td align="left" valign="top">June 2015-February 2020</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref56">56</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">CNN with 2D DFT</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">HRF, STARE, MESSIDOR, DRIVE, DRIONS_DB, and IDRiD datasets, as well as images obtained from the internet</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">HRF, STARE, MESSIDOR, DRIVE, DRIONS_DB, and IDRiD datasets</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref57">57</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">CNN with ensemble of SVM, NB, RF</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">HRF, STARE, MESSIDOR, DRIVE, DRIONS_DB, and IDRiD databases, as well as other images collected from the internet</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">HRF, STARE, MESSIDOR, DRIVE, DRIONS_DB, and IDRiD databases</td></tr><tr><td align="left" valign="top">Subin and Kannan (2022) [<xref ref-type="bibr" rid="ref58">58</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">AMSO-RNN (recurrent neural network) Model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">ODIR database</td><td align="left" valign="top">2240/960/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">ODIR database</td></tr><tr><td align="left" valign="top">Pratap and Kokil (2019) [<xref ref-type="bibr" rid="ref59">59</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Pre-trained CNN</td><td align="left" valign="top">TL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">HRF image database, STARE, standard diabetic retinopathy database (DIARETDB0), e-ophtha: a color fundus image database, MESSIDOR database, DRIVE database, fundus image registration (FIRE) dataset, digital retinal images for optic nerve segmentation database (DRIONS-DB), IDRiD, available datasets from Dr Hossein Rabbani, and other internet resources</td><td align="left" valign="top">400/400/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">HRF image database, STARE, DIARETDB0, e-ophtha: a color fundus image database, MESSIDOR database, DRIVE database, FIRE dataset, DRIONS-DB, IDRiD, available datasets from Dr Hossein Rabbani, and other internet resources</td></tr><tr><td align="left" valign="top">Luo et al (2021) [<xref ref-type="bibr" rid="ref60">60</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">FCL-EfficientNet-B3</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Shanggong Medical Technology Co, Ltd. OIA-ODIR dataset</td><td align="left" valign="top">1000/274/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">OIA-ODIR dataset</td></tr><tr><td align="left" valign="top">Imran et al (2020) [<xref ref-type="bibr" rid="ref61">61</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">The combination of DL models (AlexNet, ResNet, and VGG<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup>Net) and SVM</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Tongren Hospital, China</td><td align="left" valign="top">6424/1607/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Imran et al (2021) [<xref ref-type="bibr" rid="ref62">62</xref>]</td><td align="left" valign="top">High-resolution fundus camera Canon-EOS-40D with additional settings such as 72 DPI resolution, no-flash, manual exposure, and auto-white balance</td><td align="left" valign="top">No</td><td align="left" valign="top">A novel hybrid method, namely CRNN, based on CNN and RNN</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Tongren Hospital, China</td><td align="left" valign="top">6424/1606/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Acar et al (2021) [<xref ref-type="bibr" rid="ref63">63</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">VGGNet</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The Kaggle Ocular Disease Recognition database</td><td align="left" valign="top">3891/1216/973/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle Ocular Disease Recognition database</td></tr><tr><td align="left" valign="top">Olaniyan et al (2024) [<xref ref-type="bibr" rid="ref64">64</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">Hybrid Siamese-VGG16 model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Kaggle&#x2019;s public repository</td><td align="left" valign="top">NR/121/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Kaggle&#x2019;s public repository</td></tr><tr><td align="left" valign="top">Ganokratanaa et al (2023) [<xref ref-type="bibr" rid="ref65">65</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">LeNet-CNN</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">NR</td><td align="left" valign="top">5600/1400/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Gan et al (2023) [<xref ref-type="bibr" rid="ref66">66</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">Automatic segmentation DTL platform</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Department of Ophthalmology, Jiangxi Provincial People&#x2019;s Hospital</td><td align="left" valign="top">517/130/nr/nr</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Tham et al (2022) [<xref ref-type="bibr" rid="ref67">67</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">Yes</td><td align="left" valign="top">The ResNet-50</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Singapore Eye Research Institute</td><td align="left" valign="top">8045/1692/NR/16005</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Siddique (2022) [<xref ref-type="bibr" rid="ref68">68</xref>]</td><td align="left" valign="top">Photos from phones and internet</td><td align="left" valign="top">No</td><td align="left" valign="top">MobileNet</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">4 hospitals from Bangladesh</td><td align="left" valign="top">1762/439/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Sirajudeen et al (2022) [<xref ref-type="bibr" rid="ref69">69</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Novel Kernel-based CNN</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The Kaggle database</td><td align="left" valign="top">320/80/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database</td></tr><tr><td align="left" valign="top">Junayed et al (2021) [<xref ref-type="bibr" rid="ref70">70</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">&#x2003;CataractNet</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Multiple databases</td><td align="left" valign="top">904/226/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Multiple databases</td></tr><tr><td align="left" valign="top">Hu et al (2020) [<xref ref-type="bibr" rid="ref71">71</xref>]</td><td align="left" valign="top">Smartphone with slit-lamp</td><td align="left" valign="top">No</td><td align="left" valign="top">UDFA (Faster-RCNN)<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup></td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">Marked Slit Lamp Picture Project (MSLPP)</td><td align="left" valign="top">11272/819/4831/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">MSLPP</td></tr><tr><td align="left" valign="top">Hu et al (2021) [<xref ref-type="bibr" rid="ref72">72</xref>]</td><td align="left" valign="top">The iSpector-mini mobile phone slit lamp developed by Shenyang EyeROBO Intelligent Technology Co, Ltd.</td><td align="left" valign="top">No</td><td align="left" valign="top">ACCV</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">A cooperation hospital</td><td align="left" valign="top">1064/304/152/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Lai et al (2022) [<xref ref-type="bibr" rid="ref73">73</xref>]</td><td align="left" valign="top">Digital camera</td><td align="left" valign="top">No</td><td align="left" valign="top">CNNDCI</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">GitHub.com</td><td align="left" valign="top">7735/193/89/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">GitHub.com</td></tr><tr><td align="left" valign="top">Askarian et al (2021) [<xref ref-type="bibr" rid="ref74">74</xref>]</td><td align="left" valign="top">Smartphone camera</td><td align="left" valign="top">No</td><td align="left" valign="top">SVM</td><td align="left" valign="top">ML</td><td align="left" valign="top">No</td><td align="left" valign="top">NR</td><td align="left" valign="top">63/30/7/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Son et al (2022) [<xref ref-type="bibr" rid="ref75">75</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">An ensemble of 3 AI algorithms: ResNet18, WideResNet50-2, and ResNext50</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Local outpatient clinic</td><td align="left" valign="top">2706/792/446/NR</td><td align="left" valign="top">January 2017-December 2020</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Saju and Rajesh (2022) [<xref ref-type="bibr" rid="ref76">76</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">No</td><td align="left" valign="top">Dense CNN+BE_ResNet101 classification model</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The DRIMDB dataset, various hospitals</td><td align="left" valign="top">NR/264/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The DRIMDB dataset</td></tr><tr><td align="left" valign="top">Chellaswamy et al (2022) [<xref ref-type="bibr" rid="ref77">77</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">WODCNN method</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">KAGGLE, MESSIDOR, ORIGA, DRIVE, STARE datasets</td><td align="left" valign="top">1661/414/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">KAGGLE, MESSIDOR, ORIGA, DRIVE, STARE datasets</td></tr><tr><td align="left" valign="top">Lu et al (2022) [<xref ref-type="bibr" rid="ref78">78</xref>]</td><td align="left" valign="top">Slit-lamp photography</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Faster R-CNN and ResNet</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">An internal dataset from the EENT Hospital of Fudan University and an external dataset from the Pujiang Eye Study</td><td align="left" valign="top">964/214/156/NR</td><td align="left" valign="top">An internal dataset of slit lamp photographs of the anterior segment of cataract-affected eyes taken between 2018 and 2020. Another external dataset of slit lamp photographs taken between March 2018 and August 2019.</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Al-Naji et al (2024) [<xref ref-type="bibr" rid="ref79">79</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">InceptionResNetV2</td><td align="left" valign="top">DL</td><td align="left" valign="top">No</td><td align="left" valign="top">The Balad Ruz General Hospital and Ibn Al-Haitham Teaching Eye Hospital</td><td align="left" valign="top">453/194/NR/NR</td><td align="left" valign="top">January 2, 2023-July 7, 2023</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Elloumi (2022) [<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">Ensemble Learning (InceptionV3, MobileNet-V2, and NasNet-Mobile)</td><td align="left" valign="top">DL</td><td align="left" valign="top">Yes</td><td align="left" valign="top">the Kaggle platform</td><td align="left" valign="top">354/118/118/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The &#x201C;Cataract Dataset,&#x201D; &#x201C;Ocular Disease Recognition (ODiR)&#x201D;</td></tr><tr><td align="left" valign="top">Zannah et al (2024) [<xref ref-type="bibr" rid="ref81">81</xref>]</td><td align="left" valign="top">Retinal fundus camera</td><td align="left" valign="top">No</td><td align="left" valign="top">BayeSVM500 model</td><td align="left" valign="top">ML</td><td align="left" valign="top">Yes</td><td align="left" valign="top">Cataract dataset, Glaucoma dataset, High-Resolution Fundus (HRF) Image Database, Kaggle, IEEE-Dataport, and Pattern Recognition Lab</td><td align="left" valign="top">4144/1037/NR/NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">The Kaggle database, High-Resolution Fundus (HRF) Image Database</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>ML: machine learning.</p></fn><fn id="table3fn2"><p><sup>b</sup>DL: deep learning.</p></fn><fn id="table3fn3"><p><sup>c</sup>NR: not reported.</p></fn><fn id="table3fn4"><p><sup>d</sup>DenseNet: dense convolutional network.</p></fn><fn id="table3fn5"><p><sup>e</sup>MobileNet: efficient convolutional neural networks for mobile vision apps.</p></fn><fn id="table3fn6"><p><sup>f</sup>EfficientNet: rethinking model scaling for convolutional neural networks.</p></fn><fn id="table3fn7"><p><sup>g</sup>VGG: visual geometry group.</p></fn><fn id="table3fn8"><p><sup>h</sup>Faster-R CNN: fast region&#x2013;based convolutional network.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-4"><title>Quality Assessment</title><p>Three investigators independently assessed the risk of bias (RoB) and applicability concerns in the clinical context in the included studies by using the Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2) [<xref ref-type="bibr" rid="ref82">82</xref>]. Cross-validation was implemented to enhance interrater agreement, and the methodology for consistency assessment was explicitly reported. Deeks&#x2019; funnel plots were drawn to evaluate the publication bias if more than 10 studies were included. Statistical significance was set at <italic>P</italic>&#x003C;.05. Deeks&#x2019; funnel plot asymmetry test was performed using the Deeks command within the <italic>MIDAS</italic> package in STATA 18.0 (StataCorp LLC).</p></sec><sec id="s2-5"><title>Statistical Analysis</title><p>STATA 18.0 and RevMan 5.4 (Review Manager; The Cochrane Collaboration) were used for data analyses. The summary receiver operating characteristic (SROC) curve plotting, assessment of heterogeneity, and analysis of publication bias were performed using STATA 18.0 to enhance transparency and reproducibility. The heterogeneity was assessed by the Cochran Q test and <italic>I</italic>&#x00B2; statistic. The threshold of the <italic>I</italic>&#x00B2; statistic for quantifying heterogeneity proposed by Higgins et al [<xref ref-type="bibr" rid="ref83">83</xref>] was adopted: <italic>I</italic>&#x00B2;&#x2264;25%: low, <italic>I</italic>&#x00B2;&#x2248;50%: moderate, <italic>I</italic>&#x00B2;&#x2265;75%: substantial. This strategy was specifically designed to identify whether inconsistencies in study results stemmed from random factors or reflected substantive discrepancies. When significant heterogeneity (<italic>P</italic>&#x003C;.05 or <italic>I</italic>&#x00B2;&#x003E;50%) was identified [<xref ref-type="bibr" rid="ref84">84</xref>], a bivariate mixed effects model was adopted. The bivariate random-effects model was fitted using the <italic>MIDAS</italic> package in STATA 18.0. This method demonstrated particular efficacy in synthesizing pooled estimates of sensitivity, specificity, and AUC across studies. Its advantage resides in the capability to systematically address metric variability while preserving the intrinsic correlation.</p><p>To evaluate the accuracy of the DL algorithm, a hierarchical SROC curve was fitted. We calculated corresponding 95% CIs for sensitivity, specificity, and AUC using the Delta method, which linearized the nonlinear relation of the log-transformed sensitivity and specificity by a first-order Taylor expansion. Then the variance-covariance matrix of the parameter estimates was propagated, whereas prediction intervals (PIs) incorporated between-study heterogeneity by modeling the covariance structure of sensitivity and specificity [<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]. Sensitivity analyses were conducted on all DL algorithms to be evaluated, rather than only the one with the highest accuracy. A random-effects model was used to explain potential between-study variability.</p></sec><sec id="s2-6"><title>Ethical Considerations</title><p>This study required no informed consent or ethical approval. Data previously collected from human subjects in ethical/institutional review board&#x2013;approved studies were used. All studies included adhered to the Declaration of Helsinki.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Search Results</title><p>Initially, 2235 studies were retrieved, of which 492 duplicates were excluded. Following study screening, 1680 studies were excluded from quantitative synthesis (meta-analysis), including 1617 studies involving animal research, nondisease studies, surgical technique investigations, reviews, and conference reports; 10 studies not using deep learning algorithms; 46 studies lacking sufficient data for constructing 2&#x00D7;2 contingency tables or reporting data in formats incompatible with pooling (eg, AUC only); 4 studies not focusing on cataract diagnostic models; and 3 studies that did not address cataract disease (<xref ref-type="fig" rid="figure1">Figure 1</xref>; <xref ref-type="supplementary-material" rid="app2">Checklist 1</xref>).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flowchart of study selection. The PRISMA flow of search methodology and literature selection process is displayed.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig01.png"/></fig></sec><sec id="s3-2"><title>Study Characteristics</title><p>As shown in <xref ref-type="table" rid="table2">Tables 2</xref><xref ref-type="table" rid="table3"/>-<xref ref-type="table" rid="table4">4</xref>, all the included studies were published in 2019&#x2010;2024, involving 171,416 images. Retrospective data were used in 46 studies, prospective data in only 5 studies, and cross-sectional data in 12 studies. The data came from open-access sources in 31 studies. The sample size was prespecified in one study, and low-quality images were excluded in 15 studies. External validation was conducted in 12 studies, while the remainder performed internal validation. Six studies compared DL with traditional ML models using the same dataset, while another 6 compared DL models with human experts. Forty-four studies focused on cataract detection, 17 on cataract classification, and 2 on both detection and classification. Cataract detection was categorized as binary detection (presence vs absence of cataracts, n=21) and multidisease detection (n=25). Moreover, cataracts were classified into mild (n=18), moderate (n=11), and severe (n=19) types. Additionally, 49 studies did not describe cataract classification; among the other studies, clinical subtypes included posterior subcapsular cataract (PSC; n=4), pediatric cataract (n=2), posterior polar cataract (PPC; n=1), nuclear cataract (NC; n=10), and cortical cataract (CC; n=6).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Author information, reference standards, types of internal validation, and whether external validation is applied.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Studies</td><td align="left" valign="bottom">Reference standard</td><td align="left" valign="bottom">Type of internal validation</td><td align="left" valign="bottom">External validation</td></tr></thead><tbody><tr><td align="left" valign="top">Lin et al (2019) [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">Expert consensus, consistent label</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Deepak and Bhat (2024) [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zhao et al (2024) [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">AREDS No 4 guidelines</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zia et al (2023) [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zhang et al (2023) [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">The LOCS III<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup> system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zeboulon et al (2022) [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">The cataract fraction (CF)</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Zhang et al (2024) [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Xie et al (2023) [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Cataract specialists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Wu et al (2022) [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">Cataract specialists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Vasan et al (2023) [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">Slit lamp diagnosis with dilated eyes by an ophthalmologist</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Hassan et al (2024) [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Ueno et al (2024) [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">Corneal specialists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Singh et al (2024) [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Shafiq et al (2024) [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Santone et al (2024) [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists (based on electronic medical records and verified by trained readers)</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Jawad et al (2024) [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Janti et al (2024) [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">NR</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Emir and Colak (2024) [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Ogundokun et al (2024) [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Nguyen and Lin (2024) [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Mai et al (2024) [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">Visual inspection of the surgical view of the cataract in the surgery video</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Raveenthini et al (2024) [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">ten-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Rafay et al (2023) [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Abbas et al (2023) [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Uyar et al (2024) [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Serwaa et al (2024) [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">NR</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zhang et al (2022) [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Experienced ophthalmologists</td><td align="left" valign="top">NR</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Glaret Subin and Muthukannan (2022) [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Xiao et al (2024) [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">Clinical grading by ophthalmologists</td><td align="left" valign="top">NR</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Wang et al (2024) [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Kumari and Saxena (2024) [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Devaraj et al (2024) [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Al-Saadi et al (2024) [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Elsawy et al (2023) [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">The Wisconsin Cataract Grading System, the AREDS2 NS severity scale.</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Akram and Debnath (2020) [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">Clinical grading by ophthalmologists</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Jiang et al (2021) [<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Three senior ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref56">56</xref>]</td><td align="left" valign="top">A professional ophthalmologist</td><td align="left" valign="top">NR</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Yadav and Yadav (2023) [<xref ref-type="bibr" rid="ref57">57</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">NR</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Subin and Kannan (2022) [<xref ref-type="bibr" rid="ref58">58</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Pratap and Kokil (2019) [<xref ref-type="bibr" rid="ref59">59</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Luo et al (2021) [<xref ref-type="bibr" rid="ref60">60</xref>]</td><td align="left" valign="top">Trained ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Imran et al (2020) [<xref ref-type="bibr" rid="ref61">61</xref>]</td><td align="left" valign="top">Two retinal experts</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Imran et al (2021) [<xref ref-type="bibr" rid="ref62">62</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Acar et al (2021) [<xref ref-type="bibr" rid="ref63">63</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Monte-Carlo cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Olaniyan et al (2024) [<xref ref-type="bibr" rid="ref64">64</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Ganokratanaa et al (2023) [<xref ref-type="bibr" rid="ref65">65</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Gan et al (2023) [<xref ref-type="bibr" rid="ref66">66</xref>]</td><td align="left" valign="top">Experienced ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Tham et al (2022) [<xref ref-type="bibr" rid="ref67">67</xref>]</td><td align="left" valign="top">Wisconsin cataract grading system or AREDS system</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Siddique (2022) [<xref ref-type="bibr" rid="ref68">68</xref>]</td><td align="left" valign="top">Clinical grading by ophthalmologists</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Sirajudeen et al (2022) [<xref ref-type="bibr" rid="ref69">69</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Junayed et al (2021) [<xref ref-type="bibr" rid="ref70">70</xref>]</td><td align="left" valign="top">Composite reference standard (Expert clinical diagnosis or grading from original source datasets)</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Hu et al (2020) [<xref ref-type="bibr" rid="ref71">71</xref>]</td><td align="left" valign="top">Ophthalmologists with more than 5 years of clinical experience</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Hu et al (2021) [<xref ref-type="bibr" rid="ref72">72</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Lai et al (2022) [<xref ref-type="bibr" rid="ref73">73</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Askarian et al (2021) [<xref ref-type="bibr" rid="ref74">74</xref>]</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Son et al (2022) [<xref ref-type="bibr" rid="ref75">75</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Saju and Rajesh (2022) [<xref ref-type="bibr" rid="ref76">76</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Chellaswamy et al (2022) [<xref ref-type="bibr" rid="ref77">77</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Lu et al (2022) [<xref ref-type="bibr" rid="ref78">78</xref>]</td><td align="left" valign="top">The LOCS III system</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">Yes</td></tr><tr><td align="left" valign="top">Al-Naji et al (2024) [<xref ref-type="bibr" rid="ref79">79</xref>]</td><td align="left" valign="top">Clinical grading by ophthalmologists</td><td align="left" valign="top">Hold-out cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Elloumi (2022) [<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr><tr><td align="left" valign="top">Zannah et al (2024) [<xref ref-type="bibr" rid="ref81">81</xref>]</td><td align="left" valign="top">Clinical diagnosis by ophthalmologists; validated by trained readers</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">No</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>LOCS III: Lens Opacities Classification System III.</p></fn><fn id="table4fn2"><p><sup>b</sup>NR: not reported.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Pooled Performance of DL Algorithms</title><p>Finally, 63 studies [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref81">81</xref>] with sufficient data (97 contingency tables) were included for the assessment of DL performance in cataract detection and classification [<xref ref-type="bibr" rid="ref86">86</xref>]. Hierarchical SROC curves for cataract detection (45 contingency tables) and classification (52 contingency tables) are provided in <xref ref-type="fig" rid="figure2">Figures 2A</xref> and <xref ref-type="fig" rid="figure3">3A</xref>, respectively. The classification task involved multiclassification (eg, mild, moderate, and severe cataracts), and separate SROC curves were generated for each category. For cataract detection, the pooled sensitivity and specificity of DL were 96% (0.95&#x2010;0.97) and 98% (0.97&#x2010;0.99), respectively, with an AUC of 0.99 (0.98&#x2010;1.00). For cataract classification, DL had pooled sensitivity and specificity of 94% (0.93&#x2010;0.96) and 97% (0.96&#x2010;0.98), respectively, with an AUC of 0.99 (0.98&#x2010;1.00). Great heterogeneity and inconsistency were observed across cataract severity (mild: <italic>I</italic>&#x00B2;=99%, moderate: <italic>I</italic>&#x00B2;=96%, severe: <italic>I</italic>&#x00B2;=99%; <italic>P</italic>&#x003C;.001), suggesting substantial variability in diagnostic or methodological methods across studies. 25 contingency tables for mild cataracts were used in 18 studies, 16 contingency tables for moderate cataracts in 11 studies, and 28 contingency tables for severe cataracts in 19 studies, which may introduce classification imbalance, potentially influencing the performance assessment.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Pooled overall performance of deep learning (DL) algorithms for cataract detection. (A) Receiver operating characteristic (ROC) curves of all studies included in the meta-analysis (36 studies with 45 tables). (B) ROC curves of studies reporting the highest accuracy (36 studies with 36 tables). The cataract detection is divided into binary detection and multidisease detection. AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Pooled overall performance of deep learning (DL) algorithms for cataract classification. (A) Receiver operating characteristic (ROC) curves of all studies included in the meta-analysis (17 studies with 52 tables); (B) ROC curves of studies reporting the highest accuracy (17 studies with 17 tables). The cataract classification is divided by severity (mild, moderate, and severe) and clinical subtypes. AUC: area under the ROC curve; DL: deep learning; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig03.png"/></fig><p>One or more DL algorithms were reported in most studies, and DL with the highest accuracy was selected, ultimately obtaining 53 contingency tables. For cataract detection, DL had pooled sensitivity and specificity of 97% (96%&#x2010;98%) and 98% (97%&#x2010;99%), respectively, with an AUC of 0.99 (0.98&#x2010;1.00) (<xref ref-type="fig" rid="figure2">Figure 2B</xref>). For cataract classification, the pooled sensitivity and specificity were 95% (0.92&#x2010;0.97) and 98% (0.96&#x2010;0.99), respectively, with an AUC of 0.99 (0.98&#x2010;1.00) (<xref ref-type="fig" rid="figure3">Figure 3B</xref>). Threshold analyses were conducted using STATA 18.0 to investigate threshold effects. For diagnostic models in primary studies that did not prespecify a threshold, sensitivity and specificity corresponding to the optimal threshold reported in the study were extracted. If a primary study reported results for multiple thresholds, we prioritized extracting and analyzing data corresponding to the threshold associated with the reported primary endpoint or optimal operating point. The spatial distribution of classification thresholds in ROC curves was analyzed; each threshold corresponded to a unique point on the ROC curve, and systematic evaluation of these points could help us quantify performance variability across thresholds and detect threshold-driven instability (defined as significant performance fluctuations within narrow threshold ranges). The SROC curve displayed no &#x201C;shoulder-arm&#x201D; distribution (<xref ref-type="fig" rid="figure2">Figures 2</xref> and <xref ref-type="fig" rid="figure3">3</xref>).</p><p>However, the model&#x2019;s performance in independent external datasets (detection: sensitivity 87%, specificity 93%; classification: sensitivity 89%, specificity 90%) was lower than the overall estimates. Notably, the lower performance observed in external validation datasets compromised the generalization capability of the model, potentially attributable to domain shift, warranting caution when applied to new populations or settings.</p></sec><sec id="s3-4"><title>Subgroup Analyses</title><sec id="s3-4-1"><title>Overview</title><p>Traditional ML or DL algorithms were reported in the included studies. These studies varied in primary objectives (detection, classification, or both detection and classification). Due to overlapping objectives among studies, the sum of the number of studies on traditional ML and DL algorithms did not match the total number of included studies. According to the Lens Opacities Classification System III (LOCS III) [<xref ref-type="bibr" rid="ref87">87</xref>] and the methods of Mackenbrock et al [<xref ref-type="bibr" rid="ref88">88</xref>] and Gali et al [<xref ref-type="bibr" rid="ref89">89</xref>], we classified cataracts into mild, moderate, and severe types and further categorized cataracts into PSC, pediatric cataract, PPC, NC, and CC.</p></sec><sec id="s3-4-2"><title>Detection</title><sec id="s3-4-2-1"><title>Algorithm Types</title><p>DL algorithms were described in 36 studies (45 contingency tables). The pooled sensitivity and specificity of DL were 96% (95%&#x2010;97%) and 98% (97%&#x2010;99%), respectively, with an AUC of 0.99 (0.98&#x2010;1.00; <xref ref-type="fig" rid="figure2">Figure 2A</xref>). Additionally, traditional ML algorithms were described in 5 studies (13 contingency tables). The pooled sensitivity and specificity of ML were 90% (87%&#x2010;91%) and 94% (91%&#x2010;96%), respectively, with an AUC of 0.95 (0.93&#x2010;0.97; <xref ref-type="fig" rid="figure4">Figure 4A</xref>).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Pooled overall performance of (no-DL) machine learning (ML) algorithms. (A) Receiver operating characteristic (ROC) curves of studies using ML algorithms for cataract detection (5 studies with 13 tables); (B) ROC curves of studies using ML algorithms to classify cataracts (6 studies with 15 tables). AUC: area under the ROC curve; ML: machine learning; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig04.png"/></fig></sec><sec id="s3-4-2-2"><title>Disease Types</title><p>Around 37 studies (57 contingency tables) focused on nonspecific or general cataracts; the pooled sensitivity, specificity, and AUC were 96% (94%&#x2010;97%), 98% (97%-98%), and 0.99 (0.98&#x2010;1.00). The Cochran Q test revealed a statistically significant result (Q=199.023; <italic>P</italic>&#x003C;.001) and high inconsistency (<italic>I</italic>&#x00B2;=99%; 95% CI 98&#x2010;99), indicating significant between-study variability. For cataract detection (CC: one study with one contingency table, pediatric cataract: 2 studies with eight contingency tables, NC: 2 studies with 3 contingency tables, PSC: one study with one contingency table, PPC: one study with one contingency table), the pooled sensitivity and specificity were 93% (90%&#x2010;95%) and 96% (92%&#x2010;98%), respectively, and the AUC was 0.97 (0.95&#x2010;0.98) in 4 studies (13 contingency tables). Due to the extremely limited sample sizes across clinical subtypes, with most subtypes reported by only one study, the meta-analysis results were less robust. Consequently, only a pooled heterogeneity assessment could be conducted. The Cochran Q test revealed a statistically significant result (Q=9.167; <italic>P</italic>=.005) and high inconsistency (<italic>I</italic>&#x00B2;=78%; 95% CI 53&#x2010;100), indicating significant between-study variability (<xref ref-type="fig" rid="figure5">Figures 5A and 5B</xref>).</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Pooled performance based on disease types in cataract detection. (A) Receiver operating characteristic (ROC) curves of studies detecting unclassified cataracts (37 studies with 57 tables); (B) ROC curves of studies detecting subtypes of cataract (4 studies with 13 tables). AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig05.png"/></fig></sec><sec id="s3-4-2-3"><title>Validation Types</title><p>Great heterogeneity and inconsistency were observed in the validation methods for cataract detection (<italic>I</italic>&#x00B2;=99% for both internal and external validation; <italic>P</italic>&#x003C;.001), suggesting substantial variability in diagnostic or methodological methods across studies. The Deeks&#x2019; funnel plots revealed no great publication bias in the internal validation (<italic>P</italic>=.68) or the external validation (<italic>P</italic>=.23). The internal validation was conducted in 38 studies (54 contingency tables), which showed a pooled sensitivity of 96% (95%&#x2010;97%) and a pooled specificity of 98% (97%-98%), with an AUC of 0.99 (0.98&#x2010;1.00). The external validation was performed in only 8 studies (15 contingency tables), which showed a pooled sensitivity of 87% (81%&#x2010;92%) and a pooled specificity of 93% (86%&#x2010;96%), respectively, with an AUC of 0.95 (0.93&#x2010;0.97; <xref ref-type="fig" rid="figure6">Figures 6A and 6B</xref>).</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Pooled performance based on validation types in cataract detection. (A) Receiver operating characteristic (ROC) curves of studies with internal validations (38 studies with 54 tables); (B) ROC curves of studies with external validations (8 studies with 15 tables). AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig06.png"/></fig></sec></sec></sec><sec id="s3-5"><title>Classification</title><sec id="s3-5-1"><title>Algorithm Types</title><p>DL algorithms were described in 17 studies (52 contingency tables); the pooled sensitivity and specificity of DL were 94% (93%&#x2010;96%) and 97% (96%&#x2010;98%), respectively, with an AUC of 0.99 (0.98&#x2010;1.00; <xref ref-type="fig" rid="figure3">Figure 3A</xref>). Additionally, traditional ML algorithms were described in 6 studies (15 contingency tables); the pooled sensitivity and specificity of ML were 88% (85%&#x2010;90%) and 94% (90%&#x2010;96%), respectively, with an AUC of 0.94 (0.92&#x2010;0.96; <xref ref-type="fig" rid="figure4">Figure 4B</xref>).</p></sec><sec id="s3-5-2"><title>Disease Types</title><p>For mild cataracts, 18 studies (25 contingency tables) had a pooled sensitivity of 92% (89&#x2010;94%) and a pooled specificity of 96% (94%&#x2010;97%), with an AUC of 0.98 (0.96&#x2010;0.99). For moderate cataracts, 11 studies (16 contingency tables) found comparable performance: SE=94% (90%&#x2010;96%), specificity=97% (95%&#x2010;98%), and AUC=0.99 (0.97&#x2010;0.99). For severe cataracts, 19 studies (28 contingency tables) had a sensitivity of 93% (90%&#x2010;95%) and a specificity of 98% (96%&#x2010;99%), with an AUC of 0.98 (0.97&#x2010;0.99). Great heterogeneity and inconsistency were observed across cataract severity (mild: <italic>I</italic>&#x00B2;=99%, moderate: <italic>I</italic>&#x00B2;=96%, severe: <italic>I</italic>&#x00B2;=99%; <italic>P</italic>&#x003C;.001), suggesting substantial variability in diagnostic or methodological methods across studies. Furthermore, eight studies with 28 contingency tables focused on nonspecific or general cataracts, which had a pooled sensitivity of 95% (92%&#x2010;96%) and a pooled specificity of 98% (97%&#x2010;99%), with an AUC of 0.99 (0.98&#x2010;1.00). Great heterogeneity and inconsistency were observed across cataract severity (nonspecific or general cataract: <italic>I</italic>&#x00B2;=96%, NC: 100%; <italic>P</italic>&#x003C;.001), suggesting substantial variability in diagnostic or methodological methods across studies. In the NC subgroup (9 studies with 27 contingency tables), extremely high heterogeneity was observed (<italic>I</italic>&#x00B2;=100%). Consequently, to maintain statistical consistency, we did not calculate a pooled estimate for this subgroup. Instead, the descriptive analysis showed a sensitivity of 89%&#x2010;93% and a specificity of 93%&#x2010;97% across these studies. Four studies with 15 contingency tables considered other clinical subtypes (PSC: one study with 3 contingency tables; CC: 4 studies with 12 contingency tables), with pooled sensitivity, specificity, and AUC of 91% (84%&#x2010;95%), 96% (94%&#x2010;97%), and 0.98 (0.96&#x2010;0.99). Due to the extremely limited sample sizes of other clinical subtypes, the meta-analysis results were less robust. Consequently, only a pooled heterogeneity assessment could be conducted. The Cochran Q test revealed a statistically significant result (Q=120.355; <italic>P</italic>&#x003C;.001) and high inconsistency (<italic>I</italic>&#x00B2;=98%; 95% CI 97&#x2010;99), indicating significant between-study variability (<xref ref-type="fig" rid="figure7">Figures 7A-7E</xref>).</p><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Pooled performance based on disease types in cataract classification. (A) receiver operating characteristic (ROC) curves of studies on classification of mild cataract (18 studies with 25 tables); (B) ROC curves of studies on classification of moderate cataract (11 studies with 16 tables); (C) ROC curves of studies on classification of severe cataract (19 studies with 28 tables); (D) ROC curves of studies on classification of cataracts (8 studies with 28 tables); (E) ROC curves of studies on classification of other subtypes of cataract (4 studies with 15 tables). AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig07.png"/></fig></sec><sec id="s3-5-3"><title>Validation Types</title><p>The internal validation was adopted in 17 studies (59 contingency tables), which showed a pooled sensitivity of 93% (92%-95%) and a pooled specificity of 97% (96%&#x2010;98%), with an AUC of 0.99 (0.97&#x2010;0.99). The external validation was adopted in only 4 studies (9 contingency tables), which showed a pooled sensitivity of 89% (83%&#x2010;92%) and a pooled specificity of 90% (86%&#x2010;92%), with an AUC of 0.95 (0.93&#x2010;0.97; <xref ref-type="fig" rid="figure8">Figure 8</xref>).</p><fig position="float" id="figure8"><label>Figure 8.</label><caption><p>Pooled performance based on validation types in cataract classification. (A) Receiver operating characteristic (ROC) curves of studies with internal validations (17 studies with 59 tables); (B) ROC curves of studies with external validations (4 studies with 9 tables). AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig08.png"/></fig></sec></sec><sec id="s3-6"><title>Comparison Between DL and ML Algorithms</title><sec id="s3-6-1"><title>Detection</title><p>Two studies compared DL (8 contingency tables) and ML algorithms (7 contingency tables) using the same dataset. The pooled sensitivity was 94% (92%&#x2010;96%) for DL and 91% (85%&#x2010;95%) for ML algorithms. The pooled specificity was 99% (97%&#x2010;100%) for DL and 90% (86%&#x2010;93%) for ML algorithms. The AUC was 0.97 (0.95&#x2010;0.98) for DL and 0.96 (0.94&#x2010;0.97) for ML algorithms. For DL algorithms, the Cochran Q test revealed a statistically significant result (Q=9.675; <italic>P</italic>=.004) and high inconsistency (<italic>I</italic>&#x00B2;=79%; 95% CI 55&#x2010;100). For ML algorithms, the Cochran Q test also revealed a statistically significant result (Q=5.853; <italic>P</italic>=0.03) and high inconsistency (<italic>I</italic>&#x00B2;=66%; 95% CI 23&#x2010;100). Therefore, both DL and ML exhibited significant heterogeneity. However, due to overlapping CIs and no direct between-group comparisons, the statistical significance of the between-group heterogeneity could not be conclusively established based on these findings. The comparison between DL and ML algorithms for cataract detection was constrained at the time of this writing by minimal validation evidence, with only 2 studies available for direct benchmarking. As a result, it severely limited statistical power, compromised generalization capability, and inflated performance estimates for DL models (<xref ref-type="fig" rid="figure9">Figures 9A and 9B</xref>).</p><fig position="float" id="figure9"><label>Figure 9.</label><caption><p>Pooled subgroup performance of deep learning (DL) versus machine learning (ML) algorithms. AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig09.png"/></fig></sec><sec id="s3-6-2"><title>Classification</title><p>Four studies compared DL (10 contingency tables) and ML algorithms (10 contingency tables) using the same dataset. The pooled sensitivity was 95% (91%&#x2010;97%) for DL and 89% (86%&#x2010;91%) for ML algorithms. The pooled specificity was 98% (95%&#x2010;99%) for DL and 94% (89%&#x2010;97%) for ML algorithms. The AUC was 0.99 (0.98&#x2010;1.00) for DL and 0.94 (0.92&#x2010;0.96) for ML algorithms (<xref ref-type="fig" rid="figure9">Figures 9C and 9D</xref>).</p></sec></sec><sec id="s3-7"><title>DL Algorithms Versus Human Experts</title><p>Direct comparisons between DL algorithms (7 contingency tables) and human experts (10 contingency tables) were performed across 7 studies using the same datasets. For DL algorithms, quantitative pooling was not conducted due to extreme heterogeneity (Q=933.852; <italic>P</italic>&#x003C;.001; <italic>I</italic>&#x00B2;=100%). Instead, a descriptive analysis revealed highly variable performance of DL algorithms, with sensitivity estimates ranging from 72% to 93% and specificity ranging from 64% to 99%. In contrast, human experts demonstrated moderate heterogeneity (Q=5.811; <italic>P</italic>=0.03; <italic>I</italic>&#x00B2;=66%), allowing for a pooled analysis. The pooled sensitivity and pooled specificity for human experts were 93% (95% CI 77%&#x2010;98%) and 95% (95% CI 79%&#x2010;99%), respectively, with an AUC of 0.98 (95% CI 0.97&#x2010;0.99; <xref ref-type="fig" rid="figure10">Figure 10</xref>). These results highlight that DL algorithms exhibited significantly higher heterogeneity than human experts. The Deeks&#x2019; funnel plot indicated no significant publication bias for human experts (<italic>P</italic>=.16), but potential borderline publication bias for DL algorithms (<italic>P</italic>=.05). The extreme heterogeneity (<italic>I</italic>&#x00B2;=100%) observed in the DL algorithms underscores that their efficacy is highly dependent on specific study conditions and architectures, precluding a uniform performance metric.</p><fig position="float" id="figure10"><label>Figure 10.</label><caption><p>Pooled performance of human experts. AUC: area under the ROC curve; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig10.png"/></fig></sec><sec id="s3-8"><title>Publication Bias and Heterogeneity</title><p>The Deeks&#x2019; funnel plot revealed no significant publication bias for cataract detection (<italic>P</italic>=.48). However, potential borderline publication bias was detected for cataract classification (<italic>P</italic>=.05). However, the wide distribution of studies near the regression line in the plot should be further considered (<xref ref-type="fig" rid="figure11">Figures 11A and 11B</xref>).</p><fig position="float" id="figure11"><label>Figure 11.</label><caption><p>Summary estimate of pooled performance using funnel plots. (A) Deeks&#x2019; funnel plot asymmetry test of studies detecting cataracts; (B) Deeks&#x2019; funnel plot asymmetry test of studies classifying cataracts.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig11.png"/></fig><p>The included studies showed substantial heterogeneity. The high <italic>I</italic>&#x00B2; values (sensitivity: 95.00% and specificity: 97.11% for cataract detection; sensitivity: 95.94% and specificity: 98.55% for classification) suggested considerable between-study variability (<italic>P</italic>&#x003C;.001), which was further explored by subgroup analyses (<xref ref-type="fig" rid="figure12">Figures 12</xref> and <xref ref-type="fig" rid="figure13">13</xref>).</p><fig position="float" id="figure12"><label>Figure 12.</label><caption><p>Summary estimate of pooled performance using forest plots. The forest plot of studies for the cataract detection (41 studies).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig12.png"/></fig><fig position="float" id="figure13"><label>Figure 13.</label><caption><p>Summary estimate of pooled performance using forest plots. The forest plot of studies for the cataract classification (19 studies).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78869_fig13.png"/></fig></sec><sec id="s3-9"><title>RoB</title><p>The quality of included studies was assessed using QUADAS-2, and a summary of findings is displayed in Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> (summary of methodological quality of 63 studies included). A detailed assessment for each item of RoB and applicability concern is also provided in Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> (diagrams of methodological quality of 63 studies included). High RoB or selection bias was detected in 34 studies due to a lack of randomization or eligibility criteria in the patient selection domain; RoB was high or unclear in 44 studies due to no predefined threshold in the index test domain.</p><p>Due to inconsistencies in reference standard (no reporting of whether blinding was implemented and the presence or absence of a predefined threshold), RoB was high or unclear in 2 studies. RoB was high or unclear in 34 studies in the domain of flow and timing due to no mention of whether the same gold standard was used or the presence or absence of an appropriate time gap.</p><p>High or unclear applicability in the domain of patient selection was detected in one study and unclear applicability in the domain of index test in 5 studies; 2 studies showed applicability concerns in the domain of reference standard.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>This meta-analysis included 63 studies and rigorously assessed the study quality using the QUADAS-2 to summarize the evidence to date on the performance of image-based DL in cataract diagnosis. The results revealed that DL algorithms might offer higher accuracy than traditional ML algorithms and fall within the range of reported accuracy of human experts in the detection of cataracts, demonstrating potential as tools for automated diagnosis. However, given the moderate quality and high heterogeneity of the current evidence base, these DL algorithms are considered primarily as adjuncts to cataract diagnosis.</p><p>DL has made tremendous progress in automated image analysis [<xref ref-type="bibr" rid="ref6">6</xref>]. In clinical practice, a severe imbalance between supply and demand is present in ophthalmologic diagnosis. As AI advances, DL is expected to raise diagnostic efficiency and thus help alleviate health care resource inequality.</p><p>Four relevant systematic reviews and meta-analyses were identified. (1) Cheung et al [<xref ref-type="bibr" rid="ref90">90</xref>] found high sensitivity, specificity, and reproducibility and great heterogeneity of the ML model for cataract diagnosis in children and adults, but the strength of evidence was limited since only 11 studies with 13 contingency tables were included. (2) Liu et al [<xref ref-type="bibr" rid="ref91">91</xref>] found comparable performance of DL and experts in medical imaging, but only 2 out of 18 ophthalmology studies involved cataracts, necessitating in-depth research on DL in cataract diagnosis. (3) Aggarwal et al [<xref ref-type="bibr" rid="ref92">92</xref>] included 82 ophthalmology studies, which did not involve cataracts, and verified that DL possesses good sensitivity, specificity, and AUC for the feature identification of other eye diseases, but the algorithmic evaluation criteria remain to be standardized due to high heterogeneity. (4) Islam et al [<xref ref-type="bibr" rid="ref93">93</xref>] demonstrated that DL can be applied to retinal vessel segmentation, so it can be popularized in LMICs. Therefore, methodological limitations should be critically assessed during the clinical translation of DL to further improve its reliability.</p><p>This meta-analysis systematically evaluated the effectiveness of DL versus traditional ML in cataract detection and classification to provide a basis for clinical decision-making. For cataract detection, DL had a pooled sensitivity of 96% (95% CI 95%&#x2010;97%) and a pooled specificity of 98% (97%&#x2010;99%), with an AUC of 0.99 (0.98&#x2010;1.00); traditional ML had a pooled sensitivity of 90% (87%&#x2010;91%) and a pooled specificity of 94% (91%&#x2010;96%), with an AUC of 0.95 (0.93&#x2010;0.97). For cataract classification, the pooled sensitivity and specificity of DL were 94% (93%-96%) and 97% (96%&#x2010;98%), with an AUC of 0.99 (0.98&#x2010;1.00); the pooled sensitivity and specificity of traditional ML were 88% (85%&#x2010;90%) and 94% (90%&#x2010;96%), with an AUC of 0.94 (0.92&#x2010;0.96). Available evidence suggests that DL models exhibit high sensitivity and specificity in automated cataract diagnosis and that medical image-based DL still demonstrates superior robustness to ML despite the RoB in most studies. However, our conclusions were made partly based on studies of low quality due to a lack of external validation and nonstandardized reporting of performance metrics, which may overestimate algorithmic accuracy. The use of overlapping public repositories introduced potential clustering bias. To ensure robust generalizability, future research must prioritize validation on independent, multicenter, and nonpublic datasets. Furthermore, the lack of a sensitivity analysis excluding high-RoB studies implied that the high overall performance of DL models could be partially influenced by methodologically weaker studies, warranting validation in future high-quality trials. Finally, due to the limited number of eligible studies, we pooled smartphone-based data from varying modalities, including diffuse photography and slit-lamp adapters. While this aligned with our assessment of general mobile health accessibility, it introduced potential optical heterogeneity. Consequently, statistical quantification of performance differences between optical sectioning and diffuse lighting was not feasible, warranting future separate investigations.</p><p>We identified the potential of DL for clinical use from the included studies, but the number of studies directly comparing the performance of DL and ML in cataract diagnosis was limited, with only 2 in detection and 4 in classification. Traditional ML (eg, logistic regression, random forest, and support vector machine) is dependent on manual feature engineering and classifiers, whose performance is limited by feature quality and domain knowledge [<xref ref-type="bibr" rid="ref94">94</xref>]. By contrast, DL (eg, CNN and transformer-based vision models) has higher accuracy in image recognition by automatic feature extraction but requires higher data volume and stronger arithmetic support [<xref ref-type="bibr" rid="ref95">95</xref>].</p><p>The number of included studies was limited, so image segmentation performance was not meta-analyzed. Image segmentation is essentially a pixel-level prediction task, which relies on the ability to characterize discriminative features. However, it is often difficult for traditional ML to effectively capture such features. In contrast, DL demonstrates significantly better performance by automated learning of complex patterns in highly heterogeneous data [<xref ref-type="bibr" rid="ref6">6</xref>]. The property of DL models is such that they contain millions of parameters that need to be optimized by data-driven training, while traditional ML models display higher stability in small-sample multiclassification [<xref ref-type="bibr" rid="ref95">95</xref>]. These findings suggest that traditional ML models may have the potential to raise the accuracy in cataract classification, while DL is more advantageous in reducing misdiagnosis rates. A combination of the 2 may facilitate the clinical translation of AI in the future.</p><p>Despite the available evidence on the clinical translational potential of DL, only 7 studies compared the efficacy of DL and human experts in cataract detection, and no studies compared their performance in cataract classification. Notably, significant interrater variability was present in expert performance due to heterogeneity in the cumulative clinical experience and health care resource allocation, highlighting the need for more comprehensive comparative studies. These studies generally report positive results of DL, but an optimism bias in algorithm performance may be produced due to insufficient sample sizes and methodological heterogeneity. Therefore, it is urgently needed to adopt a standardized study design and transparent reporting norms for improving the quality of evidence and clinical translational value of DL. DL has been successfully applied to retinal fundus image analysis [<xref ref-type="bibr" rid="ref14">14</xref>] for automated diagnosis of DR [<xref ref-type="bibr" rid="ref96">96</xref>] and glaucoma [<xref ref-type="bibr" rid="ref97">97</xref>]. Considering the technical feasibility of DL in ophthalmic disease screening, it is recommended that DL serve as an adjunct to clinical diagnosis to optimize the treatment process by human-machine collaboration. Additionally, no great publication bias was detected, but the findings need to be interpreted with caution. It is recommended that a clinician&#x2019;s diagnostic efficiency be used as a core assessment metric and that a real-world validation be incorporated in subsequent studies.</p><p>In the field of AI methodology, several standardized guidelines have been recently issued [<xref ref-type="bibr" rid="ref98">98</xref>,<xref ref-type="bibr" rid="ref99">99</xref>]. However, no unified consensus has been reached on AI for cataract diagnosis. At the time of this writing, computer-aided diagnostic techniques mainly use a combination of medical image processing and AI, but research focuses mostly on eye diseases of the posterior segment (eg, DR, and age-related macular degeneration) and less on the anterior segment (eg, cataracts). Nowadays, cataract diagnosis relies on slit-lamp microscopy of lens morphology, and cataract classification is based on the LOCS III [<xref ref-type="bibr" rid="ref87">87</xref>], the Oxford Clinical Cataract Classification and Grading System [<xref ref-type="bibr" rid="ref100">100</xref>], and the American Cooperative Cataract Research Group method [<xref ref-type="bibr" rid="ref101">101</xref>]. DL has made a preliminary breakthrough in automated cataract classification, such as the turbidity-density-location assessment system developed by Lin et al [<xref ref-type="bibr" rid="ref19">19</xref>] based on anterior segment slit lamp images and the DL classification model developed by Zhou et al [<xref ref-type="bibr" rid="ref102">102</xref>] using retinal fundus images, but a standardized severity scale adapted to DL is urgently needed for its clinical use. Therefore, it is recommended that future studies standardize the criteria for performance assessment of DL algorithms, with a focus on improving the transparency of methodology reporting and repeatability validation process.</p><p>Data scarcity and lack of generalization capability are key scientific challenges for DL. In this systematic review, retrospective designs were adopted in most of the included studies, while prospective designs and multicenter clinical trials accounted for less than 5%, and their annotation criteria were not optimized for the need of DL, restricting methodological rigor. We should recognize that most of the studies used double-blind, annotated, and quality-controlled image data for model training, which effectively improved diagnostic accuracy and reduced the RoB while balancing data size and quality. The included studies generally used data enhancement techniques, indicating the lack of high-quality annotated datasets and prospective validation studies.</p><p>The acquisition of representative data for clinical validation remains the major bottleneck. The model&#x2019;s clinical generalization capability is severely restricted due to the long time consumption of pixel-level annotation of fundus images, domain shift resulting from cross-device and cross-race variations, and a lack of fine-grained pathology characterization. In the future, research should focus on multimodal data fusion for modeling (eg, OCT plus fundus images), deployment of edge computing architectures, and embedding of causal inference modules, thereby establishing intelligent diagnosis and treatment systems with both clinical credibility and engineering practicality.</p><p>In this study, great methodological heterogeneity was found in the reporting of DL performance metrics, including incomplete reporting of sensitivity or specificity, a lack of 2&#x00D7;2 contingency tables, and general overreliance on aggregated metrics such as AUC-ROC and <italic>F</italic><sub>1</sub>-score. Of particular note is that high AUC values (&#x003E;0.90) may mask the risk of misjudgment of key positive events (eg, progressive cataract) in clinical scenarios with a severe imbalance in category distribution. Based on these findings, it is recommended that the confusion matrix serve as a core reporting metric. The above-mentioned problems can be gradually settled by high-quality studies.</p><p>Another key obstacle is no consensus on the interpretability of DL decision-making mechanisms. It is difficult to intuitively understand the decision-making logic of DL models due to their complex network structure and nonlinear feature extraction, which, as a &#x201C;black-box&#x201D; characteristic, has been deemed a challenge for clinical use. Recently, researchers have gradually revealed the intrinsic mechanism of DL models by gradient-weighted class activation mapping, adversarial testing, and causal inference [<xref ref-type="bibr" rid="ref95">95</xref>]. For example, Chang et al [<xref ref-type="bibr" rid="ref103">103</xref>] used adversarial samples to analyze the decision-making basis of DL models in glaucoma detection. Ara&#x00FA;jo et al [<xref ref-type="bibr" rid="ref104">104</xref>] located the key region of DR in fundus images by multiple-instance learning. Explainable AI (XAI) is breaking through the limitations of traditional AI by synchronizing decision results with attributional explanations [<xref ref-type="bibr" rid="ref105">105</xref>]. Abr&#x00E0;moff et al [<xref ref-type="bibr" rid="ref106">106</xref>] systematically reviewed the interpretability framework for DL models in the medical field, laying a theoretical foundation for their clinical translation. Future research needs to further explore the innovative methods of XAI in medical image analysis to enhance clinical credibility.</p><p>This study showed that DL was applied primarily to the screening of eye diseases such as DR for which mature diagnostic guidelines have been established. Notably, differences in health care resource allocation should be considered when popularizing DL. In subsequent studies, the clinical effect and health economic benefits should be assessed across different DL algorithms, and the &#x201C;black-box&#x201D; problem of DL algorithms should be solved using interpretability methods to enhance clinical acceptance.</p></sec></body><back><ack><p>This work was supported by the Regional Science Fund Project of the National Natural Science Foundation of China (grant No. 82360203). The sponsors or funding organizations had no role in the design or conduct of this research.</p></ack><notes><sec><title>Funding</title><p>The authors declared no financial support was received for this work.</p></sec><sec><title>Data Availability</title><p>The data that support the findings of this study are available from the corresponding author upon reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Writing - original draft preparation: RL, HL</p><p>Writing - review and editing: RL, HL, DT</p><p>Conceptualization: RL, HL, SL, LL, DT, CL</p><p>Methodology: RL, HL, SL, LL, DT, CL</p><p>Formal analysis and investigation: RL, HL, SL, LL</p><p>Funding acquisition: DT</p><p>Resources: DT, CL</p><p>Supervision: DT, CL</p><p>And all authors commented on previous versions of the manuscript. All authors read and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC</term><def><p>area under the receiver operating characteristic curve</p></def></def-item><def-item><term id="abb3">CC</term><def><p>cortical cataract</p></def></def-item><def-item><term id="abb4">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb5">DL</term><def><p>deep learning</p></def></def-item><def-item><term id="abb6">DR</term><def><p>diabetic retinopathy</p></def></def-item><def-item><term id="abb7">FDA</term><def><p>the US Food and Drug Administration</p></def></def-item><def-item><term id="abb8">FN</term><def><p>false negative</p></def></def-item><def-item><term id="abb9">FP</term><def><p>false positive</p></def></def-item><def-item><term id="abb10"><italic>ICD-11</italic></term><def><p><italic>International Classification of Diseases, 11th Revision</italic></p></def></def-item><def-item><term id="abb11">LMIC</term><def><p>low- and middle-income country</p></def></def-item><def-item><term id="abb12">LOCS III</term><def><p>lens opacities classification system III</p></def></def-item><def-item><term id="abb13">LRS</term><def><p>low-resource settings</p></def></def-item><def-item><term id="abb14">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb15">NC</term><def><p>nuclear cataract</p></def></def-item><def-item><term id="abb16">OCT</term><def><p>optical coherence tomography</p></def></def-item><def-item><term id="abb17">PI</term><def><p>prediction interval</p></def></def-item><def-item><term id="abb18">PPC</term><def><p>posterior polar cataract</p></def></def-item><def-item><term id="abb19">PRISMA-DTA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-analyses of Diagnostic Test Accuracy</p></def></def-item><def-item><term id="abb20">PROSPERO</term><def><p>International Prospective Register of Systematic Reviews</p></def></def-item><def-item><term id="abb21">PSC</term><def><p>posterior subcapsular cataract</p></def></def-item><def-item><term id="abb22">QUADAS-2</term><def><p>Quality Assessment of Diagnostic Accuracy Studies-2</p></def></def-item><def-item><term id="abb23">ResNet</term><def><p>residual network</p></def></def-item><def-item><term id="abb24">RevMan</term><def><p>Review Manager</p></def></def-item><def-item><term id="abb25">RoB</term><def><p>risk of bias</p></def></def-item><def-item><term id="abb26">SROC</term><def><p>summary receiver operating characteristic curve</p></def></def-item><def-item><term id="abb27">TN</term><def><p>true negative</p></def></def-item><def-item><term id="abb28">TP</term><def><p>true positive</p></def></def-item><def-item><term id="abb29">WHO</term><def><p>World Health Organization</p></def></def-item><def-item><term id="abb30">XAI</term><def><p>explainable artificial intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cicinelli</surname><given-names>MV</given-names> </name><name name-style="western"><surname>Buchan</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Nicholson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Varadaraj</surname><given-names>V</given-names> </name><name name-style="western"><surname>Khanna</surname><given-names>RC</given-names> </name></person-group><article-title>Cataracts</article-title><source>Lancet</source><year>2023</year><month>02</month><day>4</day><volume>401</volume><issue>10374</issue><fpage>377</fpage><lpage>389</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(22)01839-6</pub-id><pub-id pub-id-type="medline">36565712</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Steinmetz</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Bourne</surname><given-names>RRA</given-names> </name><name name-style="western"><surname>Briant</surname><given-names>PS</given-names> </name><etal/></person-group><article-title>Causes of blindness and VISION impairment in 2020 and trends over 30 years, and prevalence of avoidable blindness in relation to VISION 2020: the Right to Sight: an analysis for the Global Burden of Disease Study</article-title><source>Lancet Glob Health</source><year>2021</year><month>02</month><volume>9</volume><issue>2</issue><fpage>e144</fpage><lpage>e160</lpage><pub-id pub-id-type="doi">10.1016/S2214-109X(20)30489-7</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bourne</surname><given-names>R</given-names> </name><name name-style="western"><surname>Steinmetz</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Flaxman</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Trends in prevalence of blindness and distance and near vision impairment over 30 years: an analysis for the Global Burden of Disease Study</article-title><source>Lancet Glob Health</source><year>2021</year><month>02</month><volume>9</volume><issue>2</issue><fpage>e130</fpage><lpage>e143</lpage><pub-id pub-id-type="doi">10.1016/S2214-109X(20)30425-3</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Dawson-Squibb</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Davids</surname><given-names>EL</given-names> </name><name name-style="western"><surname>Viljoen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rice</surname><given-names>K</given-names> </name><name name-style="western"><surname>Stein</surname><given-names>DJ</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Matson</surname><given-names>JL</given-names> </name></person-group><article-title>The WHO international classification of diseases 11th revision (ICD-11)</article-title><source>Handbook of Clinical Child Psychology: Integrating Theory and Research into Practice</source><year>2023</year><publisher-name>Springer International Publishing</publisher-name><fpage>53</fpage><lpage>78</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-24926-6_4</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ming</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>X</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Performance of ChatGPT in ophthalmic registration and clinical diagnosis: cross-sectional study</article-title><source>J Med Internet Res</source><year>2024</year><month>11</month><day>14</day><volume>26</volume><fpage>e60226</fpage><pub-id pub-id-type="doi">10.2196/60226</pub-id><pub-id pub-id-type="medline">39541581</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Recent advances and clinical applications of deep learning in medical image analysis</article-title><source>Med Image Anal</source><year>2022</year><month>07</month><volume>79</volume><fpage>102444</fpage><pub-id pub-id-type="doi">10.1016/j.media.2022.102444</pub-id><pub-id pub-id-type="medline">35472844</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Anwar</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Majid</surname><given-names>M</given-names> </name><name name-style="western"><surname>Qayyum</surname><given-names>A</given-names> </name><name name-style="western"><surname>Awais</surname><given-names>M</given-names> </name><name name-style="western"><surname>Alnowami</surname><given-names>M</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>MK</given-names> </name></person-group><article-title>Medical image analysis using convolutional neural networks: a review</article-title><source>J Med Syst</source><year>2018</year><month>10</month><day>8</day><volume>42</volume><issue>11</issue><fpage>226</fpage><pub-id pub-id-type="doi">10.1007/s10916-018-1088-1</pub-id><pub-id pub-id-type="medline">30298337</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>K</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>J</given-names> </name></person-group><article-title>Deep residual learning for image recognition</article-title><conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 27-30, 2016</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hasanah</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Pravitasari</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Abdullah</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Yulita</surname><given-names>IN</given-names> </name><name name-style="western"><surname>Asnawi</surname><given-names>MH</given-names> </name></person-group><article-title>A deep learning review of ResNet Architecture for lung disease identification in CXR image</article-title><source>Appl Sci (Basel)</source><year>2023</year><volume>13</volume><issue>24</issue><fpage>13111</fpage><pub-id pub-id-type="doi">10.3390/app132413111</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williams</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hornung</surname><given-names>H</given-names> </name><name name-style="western"><surname>Nadimpalli</surname><given-names>A</given-names> </name><name name-style="western"><surname>Peery</surname><given-names>A</given-names> </name></person-group><article-title>Deep learning and its application for healthcare delivery in low and middle income countries</article-title><source>Front Artif Intell</source><year>2021</year><volume>4</volume><fpage>553987</fpage><pub-id pub-id-type="doi">10.3389/frai.2021.553987</pub-id><pub-id pub-id-type="medline">33997772</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>K</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Xiang</surname><given-names>T</given-names> </name><name name-style="western"><surname>Loy</surname><given-names>CC</given-names> </name></person-group><article-title>Domain generalization: a survey</article-title><source>IEEE Trans Pattern Anal Mach Intell</source><year>2023</year><month>04</month><volume>45</volume><issue>4</issue><fpage>4396</fpage><lpage>4415</lpage><pub-id pub-id-type="doi">10.1109/TPAMI.2022.3195549</pub-id><pub-id pub-id-type="medline">35914036</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>DW</given-names> </name><name name-style="western"><surname>Jang</surname><given-names>HY</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>KW</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Park</surname><given-names>SH</given-names> </name></person-group><article-title>Design characteristics of studies reporting the performance of artificial intelligence algorithms for diagnostic analysis of medical images: results from recently published papers</article-title><source>Korean J Radiol</source><year>2019</year><month>03</month><volume>20</volume><issue>3</issue><fpage>405</fpage><lpage>410</lpage><pub-id pub-id-type="doi">10.3348/kjr.2019.0025</pub-id><pub-id pub-id-type="medline">30799571</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kelly</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Karthikesalingam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Suleyman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Corrado</surname><given-names>G</given-names> </name><name name-style="western"><surname>King</surname><given-names>D</given-names> </name></person-group><article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title><source>BMC Med</source><year>2019</year><month>10</month><day>29</day><volume>17</volume><issue>1</issue><fpage>195</fpage><pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id><pub-id pub-id-type="medline">31665002</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bo</surname><given-names>W</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Applications of deep learning in fundus images: a review</article-title><source>Med Image Anal</source><year>2021</year><month>04</month><volume>69</volume><fpage>101971</fpage><pub-id pub-id-type="doi">10.1016/j.media.2021.101971</pub-id><pub-id pub-id-type="medline">33524824</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benjamens</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dhunnoo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Mesk&#x00F3;</surname><given-names>B</given-names> </name></person-group><article-title>The state of artificial intelligence-based FDA-approved medical devices and algorithms: an online database</article-title><source>NPJ Digit Med</source><year>2020</year><volume>3</volume><fpage>118</fpage><pub-id pub-id-type="doi">10.1038/s41746-020-00324-0</pub-id><pub-id pub-id-type="medline">32984550</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Gaidhane</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of IDX-DR for detecting diabetic retinopathy: a systematic review and meta-analysis</article-title><source>Am J Ophthalmol</source><year>2025</year><month>05</month><volume>273</volume><fpage>192</fpage><lpage>204</lpage><pub-id pub-id-type="doi">10.1016/j.ajo.2025.02.022</pub-id><pub-id pub-id-type="medline">39986640</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Verbraak</surname><given-names>FD</given-names> </name><name name-style="western"><surname>Abramoff</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Bausch</surname><given-names>GCF</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of a device for the automated detection of diabetic retinopathy in a primary care setting</article-title><source>Diabetes Care</source><year>2019</year><month>04</month><volume>42</volume><issue>4</issue><fpage>651</fpage><lpage>656</lpage><pub-id pub-id-type="doi">10.2337/dc18-0148</pub-id><pub-id pub-id-type="medline">30765436</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>MDF</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thombs</surname><given-names>BD</given-names> </name><etal/></person-group><article-title>Preferred Reporting Items for a Systematic Review and Meta-analysis of Diagnostic Test Accuracy Studies: the PRISMA-DTA statement</article-title><source>JAMA</source><year>2018</year><month>01</month><day>23</day><volume>319</volume><issue>4</issue><fpage>388</fpage><lpage>396</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id><pub-id pub-id-type="medline">29362800</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>R</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Diagnostic efficacy and therapeutic decision-making capacity of an artificial intelligence platform for childhood cataracts in eye clinics: a multicentre randomized controlled trial</article-title><source>EClinicalMedicine</source><year>2019</year><month>03</month><volume>9</volume><fpage>52</fpage><lpage>59</lpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2019.03.001</pub-id><pub-id pub-id-type="medline">31143882</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Deepak</surname><given-names>GD</given-names> </name><name name-style="western"><surname>Bhat</surname><given-names>SK</given-names> </name></person-group><article-title>Deep learning-based CNN for multiclassification of ocular diseases using transfer learning</article-title><source>Comput Methods Biomech Biomed Eng Imaging Vis</source><year>2024</year><month>12</month><day>31</day><volume>12</volume><issue>1</issue><pub-id pub-id-type="doi">10.1080/21681163.2024.2335959</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wan</surname><given-names>C</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Li</surname><given-names>K</given-names> </name></person-group><article-title>NCME-Net: nuclear cataract mask encoder network for intelligent grading using self-supervised learning from anterior segment photographs</article-title><source>Heliyon</source><year>2024</year><month>07</month><volume>10</volume><issue>14</issue><fpage>e34726</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e34726</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zia</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mahum</surname><given-names>R</given-names> </name><name name-style="western"><surname>Ahmad</surname><given-names>N</given-names> </name><name name-style="western"><surname>Awais</surname><given-names>M</given-names> </name><name name-style="western"><surname>Alshamrani</surname><given-names>AM</given-names> </name></person-group><article-title>Eye diseases detection using deep learning with BAM attention module</article-title><source>Multimed Tools Appl</source><year>2024</year><volume>83</volume><issue>20</issue><fpage>59061</fpage><lpage>59084</lpage><pub-id pub-id-type="doi">10.1007/s11042-023-17839-9</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Nuclear cataract classification in anterior segment OCT based on clinical global&#x2013;local features</article-title><source>Complex Intell Syst</source><year>2023</year><month>04</month><volume>9</volume><issue>2</issue><fpage>1479</fpage><lpage>1493</lpage><pub-id pub-id-type="doi">10.1007/s40747-022-00869-5</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Z&#x00E9;boulon</surname><given-names>P</given-names> </name><name name-style="western"><surname>Panthier</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rouger</surname><given-names>H</given-names> </name><name name-style="western"><surname>Bijon</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ghazal</surname><given-names>W</given-names> </name><name name-style="western"><surname>Gatinel</surname><given-names>D</given-names> </name></person-group><article-title>Development and validation of a pixel wise deep learning model to detect cataract on swept-source optical coherence tomography images</article-title><source>J Optom</source><year>2022</year><volume>15 Suppl 1</volume><issue>Suppl 1</issue><fpage>S43</fpage><lpage>S49</lpage><pub-id pub-id-type="doi">10.1016/j.optom.2022.08.003</pub-id><pub-id pub-id-type="medline">36229338</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Higashita</surname><given-names>R</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name></person-group><article-title>Regional context-based recalibration network for cataract recognition in AS-OCT</article-title><source>Pattern Recognit DAGM</source><year>2024</year><month>03</month><volume>147</volume><fpage>110069</fpage><pub-id pub-id-type="doi">10.1016/j.patcog.2023.110069</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Attention to region: region-based integration-and-recalibration networks for nuclear cataract classification using AS-OCT images</article-title><source>Med Image Anal</source><year>2022</year><month>08</month><volume>80</volume><fpage>102499</fpage><pub-id pub-id-type="doi">10.1016/j.media.2022.102499</pub-id><pub-id pub-id-type="medline">35704990</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xie</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Deep learning for detecting visually impaired cataracts using fundus images</article-title><source>Front Cell Dev Biol</source><year>2023</year><volume>11</volume><fpage>1197239</fpage><pub-id pub-id-type="doi">10.3389/fcell.2023.1197239</pub-id><pub-id pub-id-type="medline">37576595</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Artificial intelligence model for antiinterference cataract automatic diagnosis: a diagnostic accuracy study</article-title><source>Front Cell Dev Biol</source><year>2022</year><volume>10</volume><fpage>906042</fpage><pub-id pub-id-type="doi">10.3389/fcell.2022.906042</pub-id><pub-id pub-id-type="medline">35938155</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vasan</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>S</given-names> </name><name name-style="western"><surname>Shekhar</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Accuracy of an artificial intelligence-based mobile application for detecting cataracts: results from a field study</article-title><source>Indian J Ophthalmol</source><year>2023</year><month>08</month><volume>71</volume><issue>8</issue><fpage>2984</fpage><lpage>2989</lpage><pub-id pub-id-type="doi">10.4103/IJO.IJO_3372_22</pub-id><pub-id pub-id-type="medline">37530269</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ul Hassan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Al-Awady</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Ahmed</surname><given-names>N</given-names> </name><etal/></person-group><article-title>A transfer learning enabled approach for ocular disease detection and classification</article-title><source>Health Inf Sci Syst</source><year>2024</year><month>12</month><volume>12</volume><issue>1</issue><fpage>36</fpage><pub-id pub-id-type="doi">10.1007/s13755-024-00293-8</pub-id><pub-id pub-id-type="medline">38868156</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ueno</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Oda</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yamaguchi</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Deep learning model for extensive smartphone-based diagnosis and triage of cataracts and multiple corneal diseases</article-title><source>Br J Ophthalmol</source><year>2024</year><month>09</month><day>20</day><volume>108</volume><issue>10</issue><fpage>1406</fpage><lpage>1413</lpage><pub-id pub-id-type="doi">10.1136/bjo-2023-324488</pub-id><pub-id pub-id-type="medline">38242700</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Banoub</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sanghvi</surname><given-names>HA</given-names> </name><etal/></person-group><article-title>An artificial intelligence driven approach for classification of ophthalmic images using convolutional neural network: an experimental study</article-title><source>Curr Med Imaging</source><year>2024</year><volume>20</volume><fpage>e15734056286918</fpage><pub-id pub-id-type="doi">10.2174/0115734056286918240419100058</pub-id><pub-id pub-id-type="medline">38721793</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shafiq</surname><given-names>M</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Alghamedy</surname><given-names>FH</given-names> </name><name name-style="western"><surname>Obidallah</surname><given-names>WJ</given-names> </name></person-group><article-title>DualEye-FeatureNet: a dual-stream feature transfer framework for multi-modal ophthalmic image classification</article-title><source>IEEE Access</source><year>2024</year><volume>12</volume><fpage>143985</fpage><lpage>144008</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2024.3469244</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Santone</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cesarelli</surname><given-names>M</given-names> </name><name name-style="western"><surname>Colasuonno</surname><given-names>E</given-names> </name><name name-style="western"><surname>Bevilacqua</surname><given-names>V</given-names> </name><name name-style="western"><surname>Mercaldo</surname><given-names>F</given-names> </name></person-group><article-title>A method for ocular disease diagnosis through visual prediction explainability</article-title><source>Electronics (Basel)</source><year>2024</year><month>07</month><volume>13</volume><issue>14</issue><fpage>2706</fpage><pub-id pub-id-type="doi">10.3390/electronics13142706</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jawad</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Khursheed</surname><given-names>F</given-names> </name><name name-style="western"><surname>Nawaz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mir</surname><given-names>AH</given-names> </name></person-group><article-title>Towards improved fundus disease detection using Swin Transformers</article-title><source>Multimed Tools Appl</source><year>2024</year><volume>83</volume><issue>32</issue><fpage>78125</fpage><lpage>78159</lpage><pub-id pub-id-type="doi">10.1007/s11042-024-18627-9</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Janti</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Saluja</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tiwari</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Evaluation of the clinical impact of a smartphone application for cataract detection</article-title><source>Cureus</source><year>2024</year><month>10</month><volume>16</volume><issue>10</issue><fpage>e71467</fpage><pub-id pub-id-type="doi">10.7759/cureus.71467</pub-id><pub-id pub-id-type="medline">39539903</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Emir</surname><given-names>B</given-names> </name><name name-style="western"><surname>Colak</surname><given-names>E</given-names> </name></person-group><article-title>Performance analysis of pretrained convolutional neural network models for ophthalmological disease classification</article-title><source>Arq Bras Oftalmol</source><year>2023</year><volume>87</volume><issue>5</issue><fpage>e20220124</fpage><pub-id pub-id-type="doi">10.5935/0004-2749.2022-0124</pub-id><pub-id pub-id-type="medline">39298728</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ogundokun</surname><given-names>RO</given-names> </name><name name-style="western"><surname>Awotunde</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Akande</surname><given-names>HB</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Imoize</surname><given-names>AL</given-names> </name></person-group><article-title>Deep transfer learning models for mobile-based ocular disorder identification on retinal images</article-title><source>CMC</source><year>2024</year><volume>80</volume><issue>1</issue><fpage>139</fpage><lpage>161</lpage><pub-id pub-id-type="doi">10.32604/cmc.2024.052153</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nguyen</surname><given-names>VV</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CL</given-names> </name></person-group><article-title>Enhancing cataract detection through hybrid CNN approach and image quadration: a solution for precise diagnosis and improved patient care</article-title><source>Electronics (Basel)</source><year>2024</year><volume>13</volume><issue>12</issue><fpage>2344</fpage><pub-id pub-id-type="doi">10.3390/electronics13122344</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mai</surname><given-names>ELC</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>BH</given-names> </name><name name-style="western"><surname>Su</surname><given-names>TY</given-names> </name></person-group><article-title>Innovative utilization of ultra-wide field fundus images and deep learning algorithms for screening high-risk posterior polar cataract</article-title><source>J Cataract Refract Surg</source><year>2024</year><month>06</month><day>1</day><volume>50</volume><issue>6</issue><fpage>618</fpage><lpage>623</lpage><pub-id pub-id-type="doi">10.1097/j.jcrs.0000000000001419</pub-id><pub-id pub-id-type="medline">38350234</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Raveenthini</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lavanya</surname><given-names>R</given-names> </name><name name-style="western"><surname>Benitez</surname><given-names>R</given-names> </name></person-group><article-title>Interpretable diagnostic system for multiocular diseases based on hybrid meta-heuristic feature selection</article-title><source>Comput Biol Med</source><year>2025</year><month>01</month><volume>184</volume><fpage>109486</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.109486</pub-id><pub-id pub-id-type="medline">39615233</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rafay</surname><given-names>A</given-names> </name><name name-style="western"><surname>Asghar</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Manzoor</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hussain</surname><given-names>W</given-names> </name></person-group><article-title>EyeCNN: exploring the potential of convolutional neural networks for identification of multiple eye diseases through retinal imagery</article-title><source>Int Ophthalmol</source><year>2023</year><month>10</month><volume>43</volume><issue>10</issue><fpage>3569</fpage><lpage>3586</lpage><pub-id pub-id-type="doi">10.1007/s10792-023-02764-5</pub-id><pub-id pub-id-type="medline">37291412</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abbas</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Albathan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Altameem</surname><given-names>A</given-names> </name><name name-style="western"><surname>Almakki</surname><given-names>RS</given-names> </name><name name-style="western"><surname>Hussain</surname><given-names>A</given-names> </name></person-group><article-title>Deep-ocular: improved transfer learning architecture using self-attention and dense layers for recognition of ocular diseases</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>10</month><day>10</day><volume>13</volume><issue>20</issue><fpage>3165</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13203165</pub-id><pub-id pub-id-type="medline">37891986</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Uyar</surname><given-names>K</given-names> </name><name name-style="western"><surname>Yurdakul</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ta&#x015F;demir</surname><given-names>&#x015E;</given-names> </name></person-group><article-title>Abc-based weighted voting deep ensemble learning model for multiple eye disease detection</article-title><source>Biomed Signal Process Control</source><year>2024</year><month>10</month><volume>96</volume><fpage>106617</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2024.106617</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Serwaa</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mensah</surname><given-names>PK</given-names> </name><name name-style="western"><surname>Adekoya</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Ayidzoe</surname><given-names>MA</given-names> </name></person-group><article-title>LBPSCN: local binary pattern scaled capsule network for the recognition of ocular diseases</article-title><source>Int J Adv Comput Sci Appl</source><year>2024</year><month>01</month><volume>15</volume><issue>6</issue><pub-id pub-id-type="doi">10.14569/IJACSA.2024.01506155</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>L</given-names> </name><etal/></person-group><article-title>CCA-Net: clinical-awareness attention network for nuclear cataract classification in AS-OCT</article-title><source>Knowl Based Syst</source><year>2022</year><month>08</month><volume>250</volume><fpage>109109</fpage><pub-id pub-id-type="doi">10.1016/j.knosys.2022.109109</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Glaret Subin</surname><given-names>P</given-names> </name><name name-style="western"><surname>Muthukannan</surname><given-names>P</given-names> </name></person-group><article-title>Optimized convolution neural network based multiple eye disease detection</article-title><source>Comput Biol Med</source><year>2022</year><month>07</month><volume>146</volume><fpage>105648</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105648</pub-id><pub-id pub-id-type="medline">35751184</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xiao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>B</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Higashita</surname><given-names>R</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name></person-group><article-title>Multi-style spatial attention module for cortical cataract classification in AS-OCT image with supervised contrastive learning</article-title><source>Comput Methods Programs Biomed</source><year>2024</year><month>02</month><volume>244</volume><fpage>107958</fpage><pub-id pub-id-type="doi">10.1016/j.cmpb.2023.107958</pub-id><pub-id pub-id-type="medline">38070390</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>H</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Bai</surname><given-names>C</given-names> </name></person-group><article-title>MGCNet: multi-granularity cataract classification using denoising diffusion probabilistic model</article-title><source>Displays</source><year>2024</year><month>07</month><volume>83</volume><fpage>102716</fpage><pub-id pub-id-type="doi">10.1016/j.displa.2024.102716</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumari</surname><given-names>P</given-names> </name><name name-style="western"><surname>Saxena</surname><given-names>P</given-names> </name></person-group><article-title>Cataract detection and visualization based on multi-scale deep features by RINet tuned with cyclic learning rate hyperparameter</article-title><source>Biomed Signal Process Control</source><year>2024</year><month>01</month><volume>87</volume><fpage>105452</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2023.105452</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Devaraj</surname><given-names>M</given-names> </name><name name-style="western"><surname>Namasivayam</surname><given-names>V</given-names> </name><name name-style="western"><surname>Srichandan</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>Development and testing of artificial intelligence-based mobile application to achieve cataract backlog-free status in Uttar Pradesh, India</article-title><source>Asia Pac J Ophthalmol (Phila)</source><year>2024</year><volume>13</volume><issue>5</issue><fpage>100094</fpage><pub-id pub-id-type="doi">10.1016/j.apjo.2024.100094</pub-id><pub-id pub-id-type="medline">39187013</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Al-Saadi</surname><given-names>EH</given-names> </name><name name-style="western"><surname>Khdiar</surname><given-names>AN</given-names> </name><name name-style="western"><surname>Al-Saadi</surname><given-names>LH</given-names> </name></person-group><article-title>An automated wavelet scattering network classification using three stages of cataract disease</article-title><source>Baghdad SciJ</source><year>2024</year><month>02</month><access-date>2026-04-24</access-date><volume>21</volume><issue>9</issue><fpage>3044</fpage><comment><ext-link ext-link-type="uri" xlink:href="https://bsj.uobaghdad.edu.iq/home/vol21/iss9/12/">https://bsj.uobaghdad.edu.iq/home/vol21/iss9/12/</ext-link></comment><pub-id pub-id-type="doi">10.21123/bsj.2024.8995</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elsawy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Keenan</surname><given-names>TDL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>A deep network DeepOpacityNet for detection of cataracts from color fundus photographs</article-title><source>Commun Med (Lond)</source><year>2023</year><month>12</month><day>16</day><volume>3</volume><issue>1</issue><fpage>184</fpage><pub-id pub-id-type="doi">10.1038/s43856-023-00410-w</pub-id><pub-id pub-id-type="medline">38104223</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Akram</surname><given-names>A</given-names> </name><name name-style="western"><surname>Debnath</surname><given-names>R</given-names> </name></person-group><article-title>An automated eye disease recognition system from visual content of facial imagesusing machine learning techniques</article-title><source>Turk J Elec Eng Comp Sci</source><year>2020</year><month>04</month><volume>28</volume><issue>2</issue><fpage>917</fpage><lpage>932</lpage><pub-id pub-id-type="doi">10.3906/elk-1905-42</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Automatic classification of heterogeneous slit-illumination images using an ensemble of cost-sensitive convolutional neural networks</article-title><source>Ann Transl Med</source><year>2021</year><month>04</month><volume>9</volume><issue>7</issue><fpage>550</fpage><pub-id pub-id-type="doi">10.21037/atm-20-6635</pub-id><pub-id pub-id-type="medline">33987248</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yadav</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yadav</surname><given-names>J</given-names> </name></person-group><article-title>Automatic cataract severity detection and grading using deep learning</article-title><source>J Sens</source><year>2023</year><month>01</month><access-date>2026-04-24</access-date><volume>2023</volume><issue>1</issue><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/doi/10.1155/2023/2973836">https://onlinelibrary.wiley.com/doi/10.1155/2023/2973836</ext-link></comment><pub-id pub-id-type="doi">10.1155/2023/2973836</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yadav</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yadav</surname><given-names>J</given-names> </name></person-group><article-title>Enhancing cataract detection precision: a deep learning approach</article-title><source>Trait Signal</source><year>2023</year><month>08</month><day>31</day><access-date>2026-04-24</access-date><volume>40</volume><issue>4</issue><fpage>1413</fpage><lpage>1424</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.iieta.org/journals/ts/paper/10.18280/ts.400410">https://www.iieta.org/journals/ts/paper/10.18280/ts.400410</ext-link></comment><pub-id pub-id-type="doi">10.18280/ts.400410</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Subin</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Kannan</surname><given-names>PM</given-names> </name></person-group><article-title>Multiple eye disease detection using hybrid adaptive mutation swarm optimization and RNN</article-title><source>Int J Adv Comput Sci Appl</source><year>2022</year><volume>13</volume><issue>9</issue><pub-id pub-id-type="doi">10.14569/IJACSA.2022.0130946</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pratap</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kokil</surname><given-names>P</given-names> </name></person-group><article-title>Computer-aided diagnosis of cataract using deep transfer learning</article-title><source>Biomed Signal Process Control</source><year>2019</year><month>08</month><volume>53</volume><fpage>101533</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2019.04.010</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Luo</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name></person-group><article-title>Ophthalmic disease detection via deep learning with a novel mixture loss function</article-title><source>IEEE J Biomed Health Inform</source><year>2021</year><month>09</month><volume>25</volume><issue>9</issue><fpage>3332</fpage><lpage>3339</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2021.3083605</pub-id><pub-id pub-id-type="medline">34033552</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Imran</surname><given-names>A</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Akhtar</surname><given-names>F</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Dang</surname><given-names>Y</given-names> </name></person-group><article-title>Automated identification of cataract severity using retinal fundus images</article-title><source>Comput Methods Biomech Biomed Eng Imaging Vis</source><year>2020</year><month>11</month><day>1</day><volume>8</volume><issue>6</issue><fpage>691</fpage><lpage>698</lpage><pub-id pub-id-type="doi">10.1080/21681163.2020.1806733</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Imran</surname><given-names>A</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Akhtar</surname><given-names>F</given-names> </name><name name-style="western"><surname>Mahmood</surname><given-names>T</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>L</given-names> </name></person-group><article-title>Fundus image-based cataract classification using a hybrid convolutional and recurrent neural network</article-title><source>Vis Comput</source><year>2021</year><month>08</month><volume>37</volume><issue>8</issue><fpage>2407</fpage><lpage>2417</lpage><pub-id pub-id-type="doi">10.1007/s00371-020-01994-3</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Acar</surname><given-names>E</given-names> </name><name name-style="western"><surname>T&#x00FC;rk</surname><given-names>&#x00D6;</given-names> </name><name name-style="western"><surname>Ertu&#x011F;rul</surname><given-names>&#x00D6;F</given-names> </name><name name-style="western"><surname>Aldemi&#x0307;r</surname><given-names>E</given-names> </name></person-group><article-title>Employing deep learning architectures for image-based automatic cataract diagnosis</article-title><source>Turk J Elec Eng Comp Sci</source><year>2021</year><month>09</month><volume>29</volume><issue>SI-1</issue><fpage>2649</fpage><lpage>2662</lpage><pub-id pub-id-type="doi">10.3906/elk-2103-77</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Olaniyan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Olaniyan</surname><given-names>D</given-names> </name><name name-style="western"><surname>Obagbuwa</surname><given-names>IC</given-names> </name><name name-style="western"><surname>Esiefarienrhe</surname><given-names>BM</given-names> </name><name name-style="western"><surname>Odighi</surname><given-names>M</given-names> </name></person-group><article-title>Transformative transparent hybrid deep learning framework for accurate cataract detection</article-title><source>Appl Sci (Basel)</source><year>2024</year><volume>14</volume><issue>21</issue><fpage>10041</fpage><pub-id pub-id-type="doi">10.3390/app142110041</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ganokratanaa</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ketcham</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pramkeaw</surname><given-names>P</given-names> </name></person-group><article-title>Advancements in cataract detection: the systematic development of LeNet-convolutional neural network models</article-title><source>J Imaging</source><year>2023</year><month>09</month><day>26</day><volume>9</volume><issue>10</issue><fpage>197</fpage><pub-id pub-id-type="doi">10.3390/jimaging9100197</pub-id><pub-id pub-id-type="medline">37888304</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gan</surname><given-names>F</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Qin</surname><given-names>WG</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>SL</given-names> </name></person-group><article-title>Application of artificial intelligence for automatic cataract staging based on anterior segment images: comparing automatic segmentation approaches to manual segmentation</article-title><source>Front Neurosci</source><year>2023</year><volume>17</volume><fpage>1182388</fpage><pub-id pub-id-type="doi">10.3389/fnins.2023.1182388</pub-id><pub-id pub-id-type="medline">37152605</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tham</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Goh</surname><given-names>JHL</given-names> </name><name name-style="western"><surname>Anees</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Detecting visually significant cataract using retinal photograph-based deep learning</article-title><source>Nat Aging</source><year>2022</year><month>03</month><volume>2</volume><issue>3</issue><fpage>264</fpage><lpage>271</lpage><pub-id pub-id-type="doi">10.1038/s43587-022-00171-6</pub-id><pub-id pub-id-type="medline">37118370</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Siddique</surname><given-names>M</given-names> </name></person-group><article-title>Convolutional neural network modeling for eye disease recognition</article-title><source>Int J Onl Eng</source><year>2022</year><month>07</month><volume>18</volume><issue>9</issue><fpage>115</fpage><lpage>130</lpage><pub-id pub-id-type="doi">10.3991/ijoe.v18i09.29847</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sirajudeen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Balasubramaniam</surname><given-names>A</given-names> </name><name name-style="western"><surname>Karthikeyan</surname><given-names>S</given-names> </name></person-group><article-title>Novel angular binary pattern (NABP) and kernel based convolutional neural networks classifier for cataract detection</article-title><source>Multimed Tools Appl</source><year>2022</year><month>11</month><volume>81</volume><issue>27</issue><fpage>38485</fpage><lpage>38512</lpage><pub-id pub-id-type="doi">10.1007/s11042-022-13092-8</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Junayed</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Islam</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Sadeghzadeh</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rahman</surname><given-names>S</given-names> </name></person-group><article-title>CataractNet: an automated cataract detection system using deep learning for fundus images</article-title><source>IEEE Access</source><year>2021</year><volume>9</volume><fpage>128799</fpage><lpage>128808</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2021.3112938</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Unified diagnosis framework for automated nuclear cataract grading based on smartphone slit-lamp images</article-title><source>IEEE Access</source><year>2020</year><volume>8</volume><fpage>174169</fpage><lpage>174178</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2020.3025346</pub-id><pub-id pub-id-type="medline">33747677</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Luan</surname><given-names>X</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>ACCV: automatic classification algorithm of cataract video based on deep learning</article-title><source>Biomed Eng Online</source><year>2021</year><month>08</month><day>5</day><volume>20</volume><issue>1</issue><fpage>78</fpage><pub-id pub-id-type="doi">10.1186/s12938-021-00906-3</pub-id><pub-id pub-id-type="medline">34353324</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lai</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Pai</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Marvin</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hung</surname><given-names>HH</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>DN</given-names> </name></person-group><article-title>The use of convolutional neural networks and digital camera images in cataract detection</article-title><source>Electronics (Basel)</source><year>2022</year><volume>11</volume><issue>6</issue><fpage>887</fpage><pub-id pub-id-type="doi">10.3390/electronics11060887</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Askarian</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ho</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chong</surname><given-names>JW</given-names> </name></person-group><article-title>Detecting cataract using smartphones</article-title><source>IEEE J Transl Eng Health Med</source><year>2021</year><volume>9</volume><fpage>3800110</fpage><pub-id pub-id-type="doi">10.1109/JTEHM.2021.3074597</pub-id><pub-id pub-id-type="medline">34786216</pub-id></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Son</surname><given-names>KY</given-names> </name><name name-style="western"><surname>Ko</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Deep learning-based cataract detection and grading from slit-lamp and retro-illumination photographs: model development and validation study</article-title><source>Ophthalmol Sci</source><year>2022</year><month>06</month><volume>2</volume><issue>2</issue><fpage>100147</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2022.100147</pub-id><pub-id pub-id-type="medline">36249697</pub-id></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saju</surname><given-names>B</given-names> </name><name name-style="western"><surname>Rajesh</surname><given-names>R</given-names> </name></person-group><article-title>Eye-vision net: cataract detection and classification in retinal and slit lamp images using deep network</article-title><source>Int J Adv Comput Sci Appl</source><year>2022</year><volume>13</volume><issue>12</issue><pub-id pub-id-type="doi">10.14569/IJACSA.2022.0131227</pub-id></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chellaswamy</surname><given-names>C</given-names> </name><name name-style="western"><surname>Geetha</surname><given-names>TS</given-names> </name><name name-style="western"><surname>Ramasubramanian</surname><given-names>B</given-names> </name><name name-style="western"><surname>Abirami</surname><given-names>R</given-names> </name><name name-style="western"><surname>Archana</surname><given-names>B</given-names> </name><name name-style="western"><surname>Divya Bharathi</surname><given-names>A</given-names> </name></person-group><article-title>Optimized convolutional neural network based multiple eye disease detection and information sharing system</article-title><conf-name>2022 6th International Conference on Intelligent Computing and Control Systems (ICICCS)</conf-name><conf-date>May 25-27, 2022</conf-date><pub-id pub-id-type="doi">10.1109/ICICCS53718.2022.9788334</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>L</given-names> </name><name name-style="western"><surname>He</surname><given-names>W</given-names> </name><etal/></person-group><article-title>Lens opacities classification system III-based artificial intelligence program for automatic cataract grading</article-title><source>J Cataract Refract Surg</source><year>2022</year><month>05</month><day>1</day><volume>48</volume><issue>5</issue><fpage>528</fpage><lpage>534</lpage><pub-id pub-id-type="doi">10.1097/j.jcrs.0000000000000790</pub-id><pub-id pub-id-type="medline">34433780</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Al&#x2010;Naji</surname><given-names>A</given-names> </name><name name-style="western"><surname>Khalid</surname><given-names>GA</given-names> </name><name name-style="western"><surname>Mahmood</surname><given-names>MF</given-names> </name><name name-style="western"><surname>Chahl</surname><given-names>JS</given-names> </name></person-group><article-title>Computer vision for eye diseases detection using pre&#x2010;trained deep learning techniques and raspberry Pi</article-title><source>J Eng</source><year>2024</year><month>07</month><volume>2024</volume><issue>7</issue><comment><ext-link ext-link-type="uri" xlink:href="https://ietresearch.onlinelibrary.wiley.com/toc/20513305/2024/7">https://ietresearch.onlinelibrary.wiley.com/toc/20513305/2024/7</ext-link></comment><pub-id pub-id-type="doi">10.1049/tje2.12410</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elloumi</surname><given-names>Y</given-names> </name></person-group><article-title>Cataract grading method based on deep convolutional neural networks and stacking ensemble learning</article-title><source>Int J Imaging Syst Tech</source><year>2022</year><month>05</month><volume>32</volume><issue>3</issue><fpage>798</fpage><lpage>814</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/toc/10981098/32/3">https://onlinelibrary.wiley.com/toc/10981098/32/3</ext-link></comment><pub-id pub-id-type="doi">10.1002/ima.22722</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zannah</surname><given-names>TB</given-names> </name><name name-style="western"><surname>Abdulla-Hil-Kafi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sheakh</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Bayesian optimized machine learning model for automated eye disease classification from fundus images</article-title><source>Computation</source><year>2024</year><volume>12</volume><issue>9</issue><fpage>190</fpage><pub-id pub-id-type="doi">10.3390/computation12090190</pub-id></nlm-citation></ref><ref id="ref82"><label>82</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Westwood</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title><source>Ann Intern Med</source><year>2011</year><month>10</month><day>18</day><volume>155</volume><issue>8</issue><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id><pub-id pub-id-type="medline">22007046</pub-id></nlm-citation></ref><ref id="ref83"><label>83</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Higgins</surname><given-names>JPT</given-names> </name><name name-style="western"><surname>Thompson</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Deeks</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name></person-group><article-title>Measuring inconsistency in meta-analyses</article-title><source>BMJ</source><year>2003</year><month>09</month><day>6</day><volume>327</volume><issue>7414</issue><fpage>557</fpage><lpage>560</lpage><pub-id pub-id-type="doi">10.1136/bmj.327.7414.557</pub-id><pub-id pub-id-type="medline">12958120</pub-id></nlm-citation></ref><ref id="ref84"><label>84</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reitsma</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Glas</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Scholten</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Zwinderman</surname><given-names>AH</given-names> </name></person-group><article-title>Bivariate analysis of sensitivity and specificity produces informative summary measures in diagnostic reviews</article-title><source>J Clin Epidemiol</source><year>2005</year><month>10</month><volume>58</volume><issue>10</issue><fpage>982</fpage><lpage>990</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2005.02.022</pub-id><pub-id pub-id-type="medline">16168343</pub-id></nlm-citation></ref><ref id="ref85"><label>85</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dwamena</surname><given-names>B</given-names> </name></person-group><article-title>MIDAS: Stata module for meta-analytical integration of diagnostic test accuracy studies</article-title><source>Statistical Software Components</source><year>2009</year><month>02</month><access-date>2026-04-23</access-date><volume>14</volume><comment><ext-link ext-link-type="uri" xlink:href="https://www.semanticscholar.org/paper/MIDAS%3A-Stata-module-for-meta-analytical-integration-Dwamena/ba5239784c4ae152163c214e8d022c7d6c1202f2?utm_source=direct_link">https://www.semanticscholar.org/paper/MIDAS%3A-Stata-module-for-meta-analytical-integration-Dwamena/ba5239784c4ae152163c214e8d022c7d6c1202f2?utm_source=direct_link</ext-link></comment></nlm-citation></ref><ref id="ref86"><label>86</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Image analysis-based machine learning for the diagnosis of retinopathy of prematurity: a meta-analysis and systematic review</article-title><source>Ophthalmol Retina</source><year>2024</year><month>07</month><volume>8</volume><issue>7</issue><fpage>678</fpage><lpage>687</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2024.01.013</pub-id><pub-id pub-id-type="medline">38237772</pub-id></nlm-citation></ref><ref id="ref87"><label>87</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chylack</surname><given-names>LT</given-names> </name><name name-style="western"><surname>Wolfe</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Singer</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Leske</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Bullimore</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Bailey</surname><given-names>IL</given-names> </name></person-group><article-title>The Lens Opacities Classification System III</article-title><source>Arch Ophthalmol</source><year>1993</year><month>06</month><day>1</day><volume>111</volume><issue>6</issue><fpage>831</fpage><pub-id pub-id-type="doi">10.1001/archopht.1993.01090060119035</pub-id></nlm-citation></ref><ref id="ref88"><label>88</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mackenbrock</surname><given-names>LHB</given-names> </name><name name-style="western"><surname>Labuz</surname><given-names>G</given-names> </name><name name-style="western"><surname>Baur</surname><given-names>ID</given-names> </name><name name-style="western"><surname>Yildirim</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Auffarth</surname><given-names>GU</given-names> </name><name name-style="western"><surname>Khoramnia</surname><given-names>R</given-names> </name></person-group><article-title>Cataract classification systems: a review</article-title><source>Klin Monbl Augenheilkd</source><year>2024</year><month>01</month><volume>241</volume><issue>1</issue><fpage>75</fpage><lpage>83</lpage><pub-id pub-id-type="doi">10.1055/a-2003-2369</pub-id><pub-id pub-id-type="medline">38242135</pub-id></nlm-citation></ref><ref id="ref89"><label>89</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gali</surname><given-names>HE</given-names> </name><name name-style="western"><surname>Sella</surname><given-names>R</given-names> </name><name name-style="western"><surname>Afshari</surname><given-names>NA</given-names> </name></person-group><article-title>Cataract grading systems: a review of past and present</article-title><source>Curr Opin Ophthalmol</source><year>2019</year><month>01</month><volume>30</volume><issue>1</issue><fpage>13</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1097/ICU.0000000000000542</pub-id><pub-id pub-id-type="medline">30489359</pub-id></nlm-citation></ref><ref id="ref90"><label>90</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheung</surname><given-names>R</given-names> </name><name name-style="western"><surname>So</surname><given-names>S</given-names> </name><name name-style="western"><surname>Malvankar-Mehta</surname><given-names>MS</given-names> </name></person-group><article-title>Diagnostic accuracy of machine learning classifiers for cataracts: a systematic review and meta-analysis</article-title><source>Expert Rev Ophthalmol</source><year>2022</year><month>11</month><day>2</day><volume>17</volume><issue>6</issue><fpage>427</fpage><lpage>437</lpage><pub-id pub-id-type="doi">10.1080/17469899.2022.2142120</pub-id></nlm-citation></ref><ref id="ref91"><label>91</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Faes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kale</surname><given-names>AU</given-names> </name><etal/></person-group><article-title>A comparison of deep learning performance against health-care professionals in detecting diseases from medical imaging: a systematic review and meta-analysis</article-title><source>Lancet Digit Health</source><year>2019</year><month>10</month><volume>1</volume><issue>6</issue><fpage>e271</fpage><lpage>e297</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(19)30123-2</pub-id><pub-id pub-id-type="medline">33323251</pub-id></nlm-citation></ref><ref id="ref92"><label>92</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aggarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Martin</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of deep learning in medical imaging: a systematic review and meta-analysis</article-title><source>NPJ Digit Med</source><year>2021</year><month>04</month><day>7</day><volume>4</volume><issue>1</issue><fpage>65</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00438-z</pub-id><pub-id pub-id-type="medline">33828217</pub-id></nlm-citation></ref><ref id="ref93"><label>93</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Islam</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Poly</surname><given-names>TN</given-names> </name><name name-style="western"><surname>Walther</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Li</surname><given-names>YCJ</given-names> </name></person-group><article-title>Artificial intelligence in ophthalmology: a meta-analysis of deep learning models for retinal vessels segmentation</article-title><source>J Clin Med</source><year>2020</year><month>04</month><day>3</day><volume>9</volume><issue>4</issue><fpage>1018</fpage><pub-id pub-id-type="doi">10.3390/jcm9041018</pub-id><pub-id pub-id-type="medline">32260311</pub-id></nlm-citation></ref><ref id="ref94"><label>94</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>G</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Suzuki</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>P</given-names> </name></person-group><article-title>Machine learning in medical imaging</article-title><source>Comput Med Imaging Graph</source><year>2015</year><month>04</month><volume>41</volume><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1016/j.compmedimag.2015.02.001</pub-id><pub-id pub-id-type="medline">25727143</pub-id></nlm-citation></ref><ref id="ref95"><label>95</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Castiglioni</surname><given-names>I</given-names> </name><name name-style="western"><surname>Rundo</surname><given-names>L</given-names> </name><name name-style="western"><surname>Codari</surname><given-names>M</given-names> </name><etal/></person-group><article-title>AI applications to medical images: from machine learning to deep learning</article-title><source>Phys Med</source><year>2021</year><month>03</month><volume>83</volume><fpage>9</fpage><lpage>24</lpage><pub-id pub-id-type="doi">10.1016/j.ejmp.2021.02.006</pub-id><pub-id pub-id-type="medline">33662856</pub-id></nlm-citation></ref><ref id="ref96"><label>96</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Islam</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Poly</surname><given-names>TN</given-names> </name><name name-style="western"><surname>Jian</surname><given-names>WS</given-names> </name><name name-style="western"><surname>Jack Li</surname><given-names>YC</given-names> </name></person-group><article-title>Deep learning algorithms for detection of diabetic retinopathy in retinal fundus photographs: a systematic review and meta-analysis</article-title><source>Comput Methods Programs Biomed</source><year>2020</year><month>07</month><volume>191</volume><fpage>105320</fpage><pub-id pub-id-type="doi">10.1016/j.cmpb.2020.105320</pub-id><pub-id pub-id-type="medline">32088490</pub-id></nlm-citation></ref><ref id="ref97"><label>97</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buisson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Navel</surname><given-names>V</given-names> </name><name name-style="western"><surname>Labb&#x00E9;</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Deep learning versus ophthalmologists for screening for glaucoma on fundus examination: a systematic review and meta-analysis</article-title><source>Clin Exp Ophthalmol</source><year>2021</year><month>12</month><volume>49</volume><issue>9</issue><fpage>1027</fpage><lpage>1038</lpage><pub-id pub-id-type="doi">10.1111/ceo.14000</pub-id><pub-id pub-id-type="medline">34506041</pub-id></nlm-citation></ref><ref id="ref98"><label>98</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rivera</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>AW</given-names> </name><name name-style="western"><surname>Denniston</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Calvert</surname><given-names>MJ</given-names> </name><collab>SPIRIT-AI and CONSORT-AI Working Group</collab></person-group><article-title>Guidelines for clinical trial protocols for interventions involving artificial intelligence: the SPIRIT-AI Extension</article-title><source>BMJ</source><year>2020</year><month>09</month><day>9</day><volume>370</volume><fpage>m3210</fpage><pub-id pub-id-type="doi">10.1136/bmj.m3210</pub-id><pub-id pub-id-type="medline">32907797</pub-id></nlm-citation></ref><ref id="ref99"><label>99</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Rivera</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Calvert</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Denniston</surname><given-names>AK</given-names> </name><collab>SPIRIT-AI and CONSORT-AI Working Group</collab></person-group><article-title>Reporting guidelines for clinical trial reports for interventions involving artificial intelligence: the CONSORT-AI Extension</article-title><source>BMJ</source><year>2020</year><month>09</month><day>9</day><volume>370</volume><fpage>m3164</fpage><pub-id pub-id-type="doi">10.1136/bmj.m3164</pub-id><pub-id pub-id-type="medline">32909959</pub-id></nlm-citation></ref><ref id="ref100"><label>100</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sparrow</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Bron</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>NAP</given-names> </name><name name-style="western"><surname>Ayliffe</surname><given-names>W</given-names> </name><name name-style="western"><surname>Hill</surname><given-names>AR</given-names> </name></person-group><article-title>The Oxford clinical cataract classification and grading system</article-title><source>Int Ophthalmol</source><year>1986</year><month>12</month><volume>9</volume><issue>4</issue><fpage>207</fpage><lpage>225</lpage><pub-id pub-id-type="doi">10.1007/BF00137534</pub-id><pub-id pub-id-type="medline">3793374</pub-id></nlm-citation></ref><ref id="ref101"><label>101</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chylack</surname><given-names>LT</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Tung</surname><given-names>WH</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>HM</given-names> </name></person-group><article-title>Classification of human senile cataractous changes by the American Cooperative Cataract Research Group (CCRG) method. I. Instrumentation and technique</article-title><source>Invest Ophthalmol Vis Sci</source><year>1983</year><month>04</month><volume>24</volume><issue>4</issue><fpage>424</fpage><lpage>431</lpage><pub-id pub-id-type="medline">6832915</pub-id></nlm-citation></ref><ref id="ref102"><label>102</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>G</given-names> </name><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name></person-group><article-title>Automatic cataract classification using deep neural network with discrete state transition</article-title><source>IEEE Trans Med Imaging</source><year>2020</year><month>02</month><volume>39</volume><issue>2</issue><fpage>436</fpage><lpage>446</lpage><pub-id pub-id-type="doi">10.1109/TMI.2019.2928229</pub-id><pub-id pub-id-type="medline">31295110</pub-id></nlm-citation></ref><ref id="ref103"><label>103</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ha</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Explaining the rationale of deep learning glaucoma decisions with adversarial examples</article-title><source>Ophthalmology</source><year>2021</year><month>01</month><volume>128</volume><issue>1</issue><fpage>78</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2020.06.036</pub-id><pub-id pub-id-type="medline">32598951</pub-id></nlm-citation></ref><ref id="ref104"><label>104</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ara&#x00FA;jo</surname><given-names>T</given-names> </name><name name-style="western"><surname>Aresta</surname><given-names>G</given-names> </name><name name-style="western"><surname>Mendon&#x00E7;a</surname><given-names>L</given-names> </name><etal/></person-group><article-title>DR|GRADUATE: uncertainty-aware deep learning-based diabetic retinopathy grading in eye fundus images</article-title><source>Med Image Anal</source><year>2020</year><month>07</month><volume>63</volume><fpage>101715</fpage><pub-id pub-id-type="doi">10.1016/j.media.2020.101715</pub-id><pub-id pub-id-type="medline">32434128</pub-id></nlm-citation></ref><ref id="ref105"><label>105</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van der Velden</surname><given-names>BHM</given-names> </name><name name-style="western"><surname>Kuijf</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Gilhuijs</surname><given-names>KGA</given-names> </name><name name-style="western"><surname>Viergever</surname><given-names>MA</given-names> </name></person-group><article-title>Explainable artificial intelligence (XAI) in deep learning-based medical image analysis</article-title><source>Med Image Anal</source><year>2022</year><month>07</month><volume>79</volume><fpage>102470</fpage><pub-id pub-id-type="doi">10.1016/j.media.2022.102470</pub-id><pub-id pub-id-type="medline">35576821</pub-id></nlm-citation></ref><ref id="ref106"><label>106</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abr&#x00E0;moff</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Cunningham</surname><given-names>B</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Foundational considerations for artificial intelligence using ophthalmic images</article-title><source>Ophthalmology</source><year>2022</year><month>02</month><volume>129</volume><issue>2</issue><fpage>e14</fpage><lpage>e32</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2021.08.023</pub-id><pub-id pub-id-type="medline">34478784</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Methodological quality summary and risk of bias graph of the studies included in the meta-analysis.</p><media xlink:href="jmir_v28i1e78869_app1.doc" xlink:title="DOC File, 1647 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>PRISMA checklist.</p><media xlink:href="jmir_v28i1e78869_app2.pdf" xlink:title="PDF File, 183 KB"/></supplementary-material></app-group></back></article>