<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e81328</article-id><article-id pub-id-type="doi">10.2196/81328</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Detection and Management of Geographic Atrophy Secondary to Age-Related Macular Degeneration Using Noninvasive Retinal Images and Artificial Intelligence: Systematic Review</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Shi</surname><given-names>Nannan</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Li</surname><given-names>Jiaxian</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shang</surname><given-names>Mengqiu</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Weidao</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Xu</surname><given-names>Kai</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Yamin</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Liang</surname><given-names>Lina</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Eye Function Laboratory, Eye Hospital China Academy of Chinese Medical Sciences</institution><addr-line>No.33 Lugu Road, Shijingshan District</addr-line><addr-line>Beijing</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Ophthalmology, The First Affiliated Hospital of Yunnan University of Chinese Medicine, Yunnan Provincial Hospital of Traditional Chinese Medicine</institution><addr-line>Kunming</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Mavragani</surname><given-names>Amaryllis</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Brini</surname><given-names>Stefano</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Zhang</surname><given-names>Jiale</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Liang</surname><given-names>Xiaolong</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Lina Liang, MD, PhD, Department of Eye Function Laboratory, Eye Hospital China Academy of Chinese Medical Sciences, No.33 Lugu Road, Shijingshan District, Beijing, 100040, China, +86 010-68683451; <email>lianglina163@163.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>21</day><month>11</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e81328</elocation-id><history><date date-type="received"><day>26</day><month>07</month><year>2025</year></date><date date-type="rev-recd"><day>11</day><month>10</month><year>2025</year></date><date date-type="accepted"><day>11</day><month>10</month><year>2025</year></date></history><copyright-statement>&#x00A9; Nannan Shi, Jiaxian Li, Mengqiu Shang, Weidao Zhang, Kai Xu, Yamin Li, Lina Liang. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 21.11.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e81328"/><abstract><sec><title>Background</title><p>Geographic atrophy (GA), the endpoint of dry age-related macular degeneration (AMD), is irreversible. The recent approval by the Food and Drug Administration of a complement component 3 inhibitor marks a significant breakthrough, highlighting the critical importance of early detection and management of GA. Consequently, there is an urgent and unmet need for efficient, accurate, and accessible methods to identify and monitor GA. Artificial intelligence (AI), particularly deep learning (DL), applied to noninvasive retinal imaging, offers a promising solution for automating and enhancing GA management.</p></sec><sec><title>Objective</title><p>This systematic review aimed to assess the performance of AI using noninvasive imaging modalities and compare it with clinical expert assessment as the ground truth.</p></sec><sec sec-type="methods"><title>Methods</title><p>Two consecutive searches were conducted on PubMed, Embase, Web of Science, Scopus, Cochrane Library, and CINAHL. The last search was performed on October 5, 2025. Studies using AI for GA secondary to dry AMD via noninvasive retinal imaging were included. Two authors worked in pairs to extract the study characteristics independently. A third author adjudicated disagreements. Quality Assessment of Diagnostic Accuracy Studies-AI and Prediction Model Risk of Bias Assessment Tool (PROBAST) were applied to evaluate the risk of bias and application.</p></sec><sec sec-type="results"><title>Results</title><p>Of the 803 records initially identified, 176 were found through an updated search. Subsequently, 200 papers were assessed in full text, of which 41 were included in the final analysis, 10 for GA detection, 20 for GA assessment and progression, and 11 for GA lesion prediction. The reviewed studies collectively involved at least 24,592 participants (detection: n=7132, assessment and progression: n=14,064, and prediction: n=6706), with a wide age range of 50 to 94 years. The studies spanned a diverse array of countries, including the United States, the United Kingdom, China, Austria, Australia, France, Israel, Italy, Switzerland, and Germany, as well as a multicenter study encompassing 7 European nations. The studies used a variety of imaging modalities to assess GA, including color fundus photography, fundus autofluorescence, near-infrared reflectance, spectral domain&#x2013;optical coherence tomography (OCT), swept-source (SS)-OCT, and 3D-OCT. DL algorithms (eg, U-Net, ResNet50, EfficientNetB4, Xception, Inception v3, and PSC-UNet) consistently showed remarkable performance in GA detection and management tasks, with several studies achieving performance comparable to clinical experts.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>AI, particularly DL-based algorithms, holds considerable promise for the detection and management of GA secondary to dry AMD with performance comparable to ophthalmologists. This review innovatively consolidates evidence across GA management&#x2014;from initial detection to progression prediction&#x2014;using diverse noninvasive imaging. It has strong potential to augment clinical decision-making. However, to realize this potential in real-world settings, future research is needed to robustly enhance reporting specifications, ensure data diversity across populations and devices, and implement rigorous external validation in prospective, multicenter studies.</p></sec><sec><title>Trial Registration</title><p>PROSPERO CRD420251000963; https://www.crd.york.ac.uk/PROSPERO/view/CRD420251000963</p></sec></abstract><kwd-group><kwd>geographic atrophy</kwd><kwd>dry age-related macular degeneration</kwd><kwd>artificial intelligence</kwd><kwd>noninvasive retinal images</kwd><kwd>systematic review</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Age-related macular degeneration (AMD) is a progressive retinal disorder affecting millions of people worldwide [<xref ref-type="bibr" rid="ref1">1</xref>]. In its advanced stages, characterized by neovascularization and geographic atrophy (GA), it can lead to significant vision loss, although symptoms may be subtle during the early and intermediate phases [<xref ref-type="bibr" rid="ref2">2</xref>]. The Classification of Atrophy Meetings group has defined atrophy lesion development as incomplete retinal pigment epithelium (RPE) and outer retinal atrophy and complete RPE and outer retinal atrophy (cRORA) based on imaging methods [<xref ref-type="bibr" rid="ref3">3</xref>]. GA, also known as cRORA, is the endpoint of dry AMD and is characterized by the loss of photoreceptors, RPE, and choriocapillaris [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. With the advent of 2 approved therapies for GA secondary to AMD in 2023, namely pegcetacoplan (Syfovre) [<xref ref-type="bibr" rid="ref6">6</xref>] and avacincaptad pegol [<xref ref-type="bibr" rid="ref7">7</xref>], the treatment of GA represents a significant breakthrough. However, the effectiveness of these therapies relies heavily on early detection and the ability to monitor treatment response&#x2014;a significant unmet need in current clinical practice. The recent approval of complement inhibitors underscores the necessity for precise, reproducible, and practical tools to not only identify GA at its earliest stages but also to objectively track morphological changes over time, thereby evaluating therapeutic efficacy [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Artificial intelligence (AI) is uniquely positioned to address this gap by enabling precise, reproducible, and automated quantification of GA progression and treatment response using noninvasive imaging modalities [<xref ref-type="bibr" rid="ref10">10</xref>]. Unlike conventional methods that rely on subjective and time-consuming manual assessments, AI algorithms can detect subtle subclinical changes in retinal structures&#x2014;such as photoreceptor integrity loss, RPE atrophy, and hyperreflective foci&#x2014;long before they become clinically apparent. Thus, AI-based retinal imaging offers a critical foundation for early detection and timely intervention in GA.</p><p>Various imaging techniques, both invasive and noninvasive, can directly visualize GA lesions. Invasive methods, such as fluorescence angiography, often result in a poor patient experience and entail high costs due to pupil dilation and sodium fluorescein injection. While it remains the gold standard for assessing neovascular AMD and offers significant diagnostic insights for retinal vascular diseases, in most cases, noninvasive fundus images are used for GA diagnosis and management [<xref ref-type="bibr" rid="ref2">2</xref>]. Color fundus photography (CFP), fundus autofluorescence (FAF), and near-infrared reflectance (NIR) are based on 2D images, which can generally produce results to quantify the atrophic area but fail to identify the retinal structure axially [<xref ref-type="bibr" rid="ref11">11</xref>]. Compared with fundus imaging, optical coherence tomography (OCT) provides high-resolution, noninvasive 3D images of retinal structures for macular assessment. In addition, conventional B-scan (axial direction) OCT images can be integrated with en-face scans, facilitating the identification of atrophy borders similar to FAF [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Nonetheless, manual labeling is tedious, time-consuming, and impractical in a clinical setup [<xref ref-type="bibr" rid="ref13">13</xref>]. There is an urgent and unmet need for early detection and management of GA using retinal image modalities. Recent advancements in AI, especially deep learning (DL), present a promising opportunity for enhancing GA detection, classification, segmentation, quantification, and prediction.</p><p>In the 1950s, AI referred to computer systems capable of performing complex tasks that historically only a human could do. So what is AI? How is it used in medicine today? And what may it do in the future? AI refers to the theory and development of computer systems capable of performing tasks that historically required human intelligence, such as recognizing speech, making decisions, and identifying patterns. AI is an umbrella term that encompasses a wide variety of technologies, including machine learning (ML) and DL [<xref ref-type="bibr" rid="ref14">14</xref>]. ML is a subfield of AI that uses algorithms trained on datasets to create self-learning models capable of predicting outcomes and classifying information without human intervention [<xref ref-type="bibr" rid="ref15">15</xref>]. ML refers to the general use of algorithms and data to create autonomous or semiautonomous machines. DL, meanwhile, is a subset of ML that layers algorithms into &#x201C;neural networks&#x201D; with 3 or more layers. Thus, it somewhat resembles the human brain, enabling machines to perform increasingly complex tasks [<xref ref-type="bibr" rid="ref16">16</xref>]. DL algorithms generally have high and clinically acceptable diagnostic accuracy across different areas (ophthalmology, respiratory, breast cancer, etc) in radiology [<xref ref-type="bibr" rid="ref17">17</xref>]. Within ophthalmology, DL algorithms showed reliable performance for detecting multiple findings in macular-centered retinal fundus images [<xref ref-type="bibr" rid="ref18">18</xref>]. Therefore, automatic GA segmentation plays a vital role in the diagnosis and management of advanced AMD and its application in the clinical setting.</p><p>Given the rapid evolution of AI applications in ophthalmology and the growing clinical importance of GA, this study aimed to systematically review the current evidence on AI-based approaches for the detection and management of GA secondary to dry AMD using noninvasive imaging modalities. We aimed to evaluate diagnostic accuracy relative to reference standards and examine methodological challenges to inform the design of future research and clinical implementation.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Protocol and Registration</title><p>Before starting this systematic review and meta-analysis, we registered a protocol on the PROSPERO website. This review adhered to the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) and PRISMA-DTA (PRISMA of Diagnostic Test Accuracy) checklists [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>].</p></sec><sec id="s2-2"><title>Eligibility Criteria</title><p>We included studies using AI algorithms to detect, classify, identify, segment, quantify, or predict GA secondary to AMD from CFP, OCT, OCT angiography, FAF, or NIR. The data were from participants, with or without symptoms, who were diagnosed with GA (or cRORA) secondary to nonexudative AMD. Study designs were not restricted; multicenter or single-center, prospective or retrospective, post hoc analysis, clinical study, or model development studies were all accepted. Eyes with neovascular complications or macular atrophy from causes other than AMD, any previous anti-vascular endothelial growth factor treatment, any confounding retinopathy, or poor image quality were excluded.</p></sec><sec id="s2-3"><title>Electronic Search Strategy</title><p>Two consecutive searches were conducted on PubMed, Embase, Web of Science, Scopus, Cochrane Library, and CINAHL. Because this review required the extraction of baseline data and items, considering the completeness of the data, we did not conduct any in press or print source searches and excluded conference proceedings and similar materials. The initial search was completed from the date of entry to December 1, 2024; the updated search, from December 1, 2024, to October 5, 2025. We used a search strategy for patient (GA) and index tests (AI and retinal images) that had been used in previous Cochrane Review without any search peer review process [<xref ref-type="bibr" rid="ref21">21</xref>]. There were no restrictions on the date of publication. The language was limited to English. In <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, detailed search strategies for each database are provided. During this process, no filters were used. During the search process, we adhered to the PRISMA-S (Preferred Reporting Items for Systematic reviews and Meta-Analyses literature search extension) reporting guidelines [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s2-4"><title>Selection Process</title><p>All relevant literature was imported into EndNote (version 20; Clarivate Analytics) software, and literature screening was conducted independently by 2 researchers (NS and JL) who specialize in ophthalmology. Duplicates were removed from the software, and the titles and abstracts of the literature were reviewed to identify those relevant to the topic. Finally, the full texts were downloaded and examined, leading to the selection of literature that met the inclusion criteria. In cases of inconsistencies in the final inclusion decisions made by the 2 researchers, a third professional (LL) was consulted to resolve the dispute.</p></sec><sec id="s2-5"><title>Data Collection Process</title><p>Using standardized data items, the data were extracted independently from the included studies by 2 researchers (NS and JL). A third review author (LL) confirmed or adjudicated any discrepancies through group discussion. We retrieved the following data items: (1) study characteristics (author, year, study design, region, and theme), (2) dataset characteristics (databases, source of databases, training/validation/testing ratio, patient number, number of images or volumes, scan number, mean age, clinical registration number, and model evaluation method), (3) image and algorithm characteristics (devices, metrics, image modality, image resolution, and AI algorithms), (4) performance metrics (outcomes, performance of models, ground truth, and performance of the ophthalmologists), and (5) main results. All the information was retrieved from the main text and the tables provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. Therefore, we did not seek additional data by contacting the authors or experts. In some studies, the authors reported multiple sets of performance data based on a subset of a single dataset. For example, they may have reported results such as sensitivity, specificity, accuracy, and so forth, conducted on the cross-validation set, the test set, or the development set. We referred to the relevant literature to select the optimal set of test performance results [<xref ref-type="bibr" rid="ref21">21</xref>]. However, when the primary study provided performance results based on a single test, the development dataset was used to train the AI model, and an external validation set ultimately was used to determine the performance of the optimal model. We extracted the external validation set performance data [<xref ref-type="bibr" rid="ref23">23</xref>].</p></sec><sec id="s2-6"><title>Risk of Bias and Application</title><p>We worked in pairs to assess the risk of bias and the applicability of the studies, which involved detection, classification, identification, segmentation, and quantification using the Quality Assessment of Diagnostic Accuracy Studies (QUADAS)-AI [<xref ref-type="bibr" rid="ref24">24</xref>] and the modified QUADAS-2 tool [<xref ref-type="bibr" rid="ref25">25</xref>], while predictive studies used the Prediction Model Risk of Bias Assessment Tool (PROBAST) [<xref ref-type="bibr" rid="ref26">26</xref>].</p><p>In the current context, QUADAS-AI has not yet established a complete specification of items. Therefore, we referenced the examples provided by QUASAS-AI and the published literature to compile the revised QUADAS-AI items, which included 4 domains and 9 leading questions (Table S4 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The PROBAST tool comprises participants, predictors, outcomes, and analysis, containing 20 signaling questions across 4 domains (Table S5 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). We also evaluated the applicability of the study based on the leading or signaling questions in the first 3 domains. A study with &#x201C;yes&#x201D; answers to all index questions was considered to have a low risk of bias. If the answer to any of the informational questions was &#x201C;no,&#x201D; there was a potential for bias, leading the authors to rate the risk of bias as high. &#x201C;Indeterminate&#x201D; grades were applied when detailed content was not provided in the literature, making it difficult for the evaluator to reach a judgment. They were used only when the reported data were insufficient. Throughout the process, disagreements between the 2 reviewers (NS and JL) were resolved by consulting the senior reviewer (LL).</p></sec><sec id="s2-7"><title>Data Synthesis</title><p>As very few studies reported the number of true positives, true negatives, false positives, and false negatives, we restricted the quantitative analysis to determine the diagnostic accuracy of AI as a triaging tool for GA secondary to nonexudative AMD. However, a meta-analysis was not performed due to significant methodological heterogeneity across studies, arising from diverse AI architectures, imaging modalities, outcome metrics, and validation protocols. Instead, a systematic review was performed to qualitatively summarize performance trends. This approach allowed for a comprehensive evaluation of the AI capabilities in the detection and management of GA via noninvasive images.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Study Selection</title><p>A total of 979 records related to the topic of this systematic review were searched across 6 different databases using a combination of subject terms and free-text terms. After removing duplicates, 335 records remained and were examined for titles and abstracts. Excluding studies not relevant to the research topic resulted in 200 reports. The full texts were then downloaded and reviewed in detail based on the eligibility criteria for the studies. In the final qualitative analysis, 41 studies were included. Of these, 10 focusing on GA diagnosis, 20 on GA assessment and progression, and 11 on GA prediction. <xref ref-type="fig" rid="figure1">Figure 1</xref> presents the detailed flow diagram of the literature selection.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram for literature selection. GA: geographic atrophy.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e81328_fig01.png"/></fig></sec><sec id="s3-2"><title>AI in Detecting the Presence of GA</title><p>Ten of the 41 included studies focused on AI-based detection of GA using noninvasive retinal images (Table S1 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). As listed in <xref ref-type="table" rid="table1">Table 1</xref>, the studies were published from 2018 to 2025. Four of the studies [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref30">30</xref>] focused on model development, 3 [<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref33">33</xref>] were retrospective studies, and 3 [<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>] were prospective studies (1 multicenter cohort study, 1 multicenter and low-interventional clinical study, and 1 clinical study). Geographically, half were from the United States, with others from Israel, Italy, Switzerland, Germany, and a multicenter European collaboration. The studies addressed several detection-related tasks: 5 focused solely on GA detection [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>], 2 covered detection and classification [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>], and others integrated detection with quantification or segmentation [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref36">36</xref>].</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of studies evaluating artificial intelligence (AI) models for geographic atrophy (GA) detection using noninvasive retinal imaging.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Author</td><td align="left" valign="bottom">Study design</td><td align="left" valign="bottom">Region</td><td align="left" valign="bottom">Purpose of the study</td><td align="left" valign="bottom">Source of datasets</td><td align="left" valign="bottom">Number of patients</td><td align="left" valign="bottom">Number of images or scans</td><td align="left" valign="bottom">Model evaluation method</td><td align="left" valign="bottom">Image modality (image resolution)</td><td align="left" valign="bottom">AI<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> algorithms</td><td align="left" valign="bottom">Outcomes</td><td align="left" valign="bottom">Performance of models</td></tr></thead><tbody><tr><td align="left" valign="top">Fineberg et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Retrospective cohort study</td><td align="left" valign="top">Israel (Petah Tikva)</td><td align="left" valign="top">Detection and classification (GA)</td><td align="left" valign="top">Rabin Medical Center</td><td align="char" char="." valign="top">113</td><td align="char" char="." valign="top">659</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">NIR (640*640 pixels)</td><td align="left" valign="top">CNNs:<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup> ResNet50, EfficientNetB0, ViT_B_16, and YOLOv8 variants.</td><td align="left" valign="top">ACC, <italic>P</italic>, SEN, SPE, <italic>F</italic><sub>1</sub>, IoU<sup><xref ref-type="table-fn" rid="table1fn9">i</xref></sup>, and DSC<sup><xref ref-type="table-fn" rid="table1fn7">g</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>GA classification:</p><p>EfficientNetB0: ACC<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup>=0.9148; <italic>P</italic><sup><xref ref-type="table-fn" rid="table1fn13">m</xref></sup>=0.9204; SEN<sup><xref ref-type="table-fn" rid="table1fn16">p</xref></sup>=0.9233; SPE<sup><xref ref-type="table-fn" rid="table1fn17">q</xref></sup>=1.0; <italic>F</italic><sub>1</sub>=0.9147.</p></list-item><list-item><p>ResNet50: ACC=0.8815; <italic>P</italic>=.8933; SEN=0.8917; SPE=0.9833; <italic>F</italic><sub>1</sub>=0.8812.</p></list-item><list-item><p>ViT_B_16: ACC=0.963; <italic>P</italic>=.9632; SEN=0.9667; SPE=1.0; <italic>F</italic><sub>1</sub>=0.9629.</p></list-item><list-item><p>GA detection: YOLOv8-Large: SEN=0.91; <italic>P</italic>=0.91; IoU=0.84; DSC<sup><xref ref-type="table-fn" rid="table1fn7">g</xref></sup>=0.88.</p></list-item></list></td></tr><tr><td align="left" valign="top">Kalra et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">Retrospective clinical study</td><td align="left" valign="top">United States (Cleveland)</td><td align="left" valign="top">Detection, quantification, and segmentation (presence of GA and pixel-wise GA area measurement)</td><td align="left" valign="top">the Cole Eye Institute of the Cleveland Clinic</td><td align="char" char="." valign="top">341</td><td align="char" char="." valign="top">900</td><td align="left" valign="top">triple-fold cross-validation</td><td align="left" valign="top">SD-OCT<sup><xref ref-type="table-fn" rid="table1fn15">o</xref></sup> (256*256 pixels)</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup>: U-Net</td><td align="left" valign="top"><italic>F</italic><sub>1</sub>, ACC, <italic>P,</italic> R<sup><xref ref-type="table-fn" rid="table1fn14">n</xref></sup>, SEN, and SPE</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>GA detection- ACC=0.91, SEN=0.86, SPE=0.94, <italic>F</italic><sub>1</sub>=0.87.</p></list-item><list-item><p>GA segmentation: ACC=0.96, SEN=0.95, SPE=0.93, <italic>F</italic><sub>1</sub>=0.82.</p></list-item></list></td></tr><tr><td align="left" valign="top">Derradji et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Retrospective clinical study</td><td align="left" valign="top">Switzerland (Lausanne)</td><td align="left" valign="top">Detection and segmentation (RORA)</td><td align="left" valign="top">An existing image database of the Medical Retina Department at Jules-Gonin Eye Hospital</td><td align="char" char="." valign="top">57</td><td align="char" char="." valign="top">62</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (NR<sup><xref ref-type="table-fn" rid="table1fn10">j</xref></sup>)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">SEN, DSC, <italic>P</italic>, and Kappa</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Grader 1: DSC: mean 0.881 (SD 0.074); Precision: mean 0.928 (SD 0.054); SEN: mean 0.850 (SD 0.119); Kappa: mean 0.846 (SD 0.072).</p></list-item><list-item><p>Grader 2: DSC: mean 0.844 (SD 0.076); Precision: mean 0.799 (SD 0.133); SEN: mean 0.915 (SD 0.064); Kappa: mean 0.800 (SD 0.082).</p></list-item></list></td></tr><tr><td align="left" valign="top">de Vente et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Prospective multicenter and low-interventional clinical study (including cross-sectional and longitudinal study part)</td><td align="left" valign="top">20 sites in 7 European countries</td><td align="left" valign="top">Detection and quantification (cRORA<sup><xref ref-type="table-fn" rid="table1fn6">f</xref></sup>)</td><td align="left" valign="top">The MACUSTAR Study Cohort</td><td align="char" char="." valign="top">168</td><td align="left" valign="top">143 (ZEISS); 167 (Spectrails)</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT (512*650 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">SEN, SPE, PPV<sup><xref ref-type="table-fn" rid="table1fn12">l</xref></sup>, NPV<sup><xref ref-type="table-fn" rid="table1fn20">t</xref></sup>, and Kappa</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ZEISS: SEN=0.6; SPE=0.964; PPV=0.375; NPV=0.985.</p></list-item><list-item><p>Spectralis: SEN=0.625; SPE=0.974; PPV=0.714; NPV=0.961.</p></list-item></list></td></tr><tr><td align="left" valign="top">Sarao et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">Prospective clinical study</td><td align="left" valign="top">Italy (Udine)</td><td align="left" valign="top">Detection (presence of GA)</td><td align="left" valign="top">the Istituto Europeo di Microchirurgia Oculare (IEMO) study</td><td align="char" char="." valign="top">180</td><td align="char" char="." valign="top">540</td><td align="left" valign="top">NR</td><td align="left" valign="top">CFP<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup> (NR)</td><td align="left" valign="top">CNN: Efficientnet_b2</td><td align="left" valign="top">SEN, SPE, ACC, <italic>F</italic><sub>1</sub>, R, AUROC, and AUPRC<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>SEN: 100% (95%CI 83.2%-100%); SPE=97.5% (95% CI 86.8%-99.9%); ACC=98.4%; <italic>F</italic><sub>1</sub>=0.976; R=1; AUROC<sup><xref ref-type="table-fn" rid="table1fn18">r</xref></sup>=0.988 (95% CI 0.918-1); AUPRC<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>=0.952 (95%CI 0.719-0.994).</p></list-item></list></td></tr><tr><td align="left" valign="top">Keenan et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">Multicenter and prospective cohort study</td><td align="left" valign="top">United States (Maryland)</td><td align="left" valign="top">Detection (presence of GA)</td><td align="left" valign="top">Age-Related Eye Disease Study (AREDS) dataset</td><td align="char" char="." valign="top">4582</td><td align="char" char="." valign="top">59,812</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">CFP (512 pixels)</td><td align="left" valign="top">CNN: inception v3</td><td align="left" valign="top">ACC, SEN, SPE, <italic>P</italic>, AUC, and Kappa</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC=0.965 (95% CI 0.959-0.971); Kappa=0.611 (95% CI 0.533-0.689); SEN=0.692 (95% CI 0.560-0.825); SPE=0.978 (95% CI 0.970-0.985); Precision=0.584 (95% CI 0.491-0.676).</p></list-item></list></td></tr><tr><td align="left" valign="top">Yao et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">Model development and evaluation</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Detection (presence of nGA)</td><td align="left" valign="top">the Early Stages of AMD<sup><xref ref-type="table-fn" rid="table1fn19">s</xref></sup> (LEAD) study</td><td align="char" char="." valign="top">140</td><td align="char" char="." valign="top">1884</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (512*496 pixels)</td><td align="left" valign="top">CNN: ResNet18</td><td align="left" valign="top">SEN, SPE, ACC, <italic>P</italic>, and <italic>F</italic><sub>1</sub></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>SEN=0.76 (95% CI 0.67-0.84); SPE=0.98 (95% CI 0.96-0.99); PRE=0.73 (95% CI 0.54-0.89); ACC=0.97 (95% CI 0.95-0.98); <italic>F</italic><sub>1</sub>=0.74 (95% CI 0.61-0.84).</p></list-item></list></td></tr><tr><td align="left" valign="top">Chiang et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Detection (complete retinal pigment epithelial and outer retinal atrophy (cRORA) in eyes with AMD)</td><td align="left" valign="top">University of Pennsylvania, University of Miami, and Case Western Reserve University; (2) Doheny Image Reading Research Laboratory, Doheny-UCLA (University of California Los Angeles Eye Centers)</td><td align="left" valign="top">71 (training); 649 (testing #1); 60 (testing #2)</td><td align="left" valign="top">188 (training); 1117 (testing #1)</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (256*256 pixels)</td><td align="left" valign="top">CNN: ResNet18</td><td align="left" valign="top">SEN, SPE, PPV, NPV, AUROC, and AUPRC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>SEN=0.909 (95% CI 0.778-1.000); SPE=0.553 (95% CI 0.394-0.703); PPV=0.541 (95% CI 0.375-0.707); NPV=0.913 (95% CI 0.778-1.000); AUROC=0.84 (95% CI 0.75-0.94); AUPRC=0.82 (95% CI 0.70-0.93).</p></list-item></list></td></tr><tr><td align="left" valign="top">Elsawy et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">United States (Maryland)</td><td align="left" valign="top">Detection (explain decision making and compare methods)</td><td align="left" valign="top">The Age-Related Eye Disease Study 2 (AREDS2) Ancillary SD-OCT<sup><xref ref-type="table-fn" rid="table1fn11">k</xref></sup> study from Devers Eye Institute, Emory Eye Center, Duke Eye Center, and the National Eye Institute</td><td align="char" char="." valign="top">311</td><td align="char" char="." valign="top">1284 scans</td><td align="left" valign="top">10-fold cross-validation</td><td align="left" valign="top">SD-OCT (128*128 or 224* pixels)</td><td align="left" valign="top">3D CNN: deep-GA-Net</td><td align="left" valign="top">ACC, <italic>P</italic>, R, <italic>F</italic><sub>1</sub>, Kappa, AUROC, and AUPRC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC=0.93 (95% CI 0.92-0.94); Precision=0.90 (95% CI 0.88-0.91); Recall=0.90 (95% CI 0.89-0.92); <italic>F</italic><sub>1</sub> score=0.90 (95% CI 0.89-0.91); Kappa=0.80 (95% CI 0.77-0.83); AUROC=0.94 (95% CI 0.93-0.95); AUPRC=0.91 (95% CI 0.90-0.93).</p></list-item></list></td></tr><tr><td align="left" valign="top">Treder et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">Germany (Muenster)</td><td align="left" valign="top">Detection and classification (GA)</td><td align="left" valign="top">Public database: ImageNet</td><td align="left" valign="top">400 (training); 60 (test set)</td><td align="left" valign="top">400 (training); 60 (test set)</td><td align="left" valign="top">NR</td><td align="left" valign="top">FAF<sup><xref ref-type="table-fn" rid="table1fn8">h</xref></sup> (NR)</td><td align="left" valign="top">Deep CNN: self-learning algorithm</td><td align="left" valign="top">SEN, SPE, and ACC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Probability score: mean 0.981 (SD 0.048); SEN=100%; SPE=100%; ACC=100%.</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table1fn2"><p><sup>b</sup>ACC: accuracy.</p></fn><fn id="table1fn3"><p><sup>c</sup>AUPRC: area under the precision-recall curve.</p></fn><fn id="table1fn4"><p><sup>d</sup>CNN: convolutional neural network.</p></fn><fn id="table1fn5"><p><sup>e</sup>CFP: color fundus photography.</p></fn><fn id="table1fn6"><p><sup>f</sup>cRORA: complete retinal pigment epithelium and outer retinal atrophy.</p></fn><fn id="table1fn7"><p><sup>g</sup>DSC: dice similarity coefficient.</p></fn><fn id="table1fn8"><p><sup>h</sup>FAF: fundus autofluorescence.</p></fn><fn id="table1fn9"><p><sup>i</sup>IoU: intersection over union.</p></fn><fn id="table1fn10"><p><sup>j</sup>NR: not reported.</p></fn><fn id="table1fn11"><p><sup>k</sup>OCT: optical coherence tomography.</p></fn><fn id="table1fn12"><p><sup>l</sup>PPV: positive predictive value.</p></fn><fn id="table1fn13"><p><sup>m</sup>P: precision.</p></fn><fn id="table1fn14"><p><sup>n</sup>R: recall.</p></fn><fn id="table1fn15"><p><sup>o</sup>SD-OCT: spectral domain OCT.</p></fn><fn id="table1fn16"><p><sup>p</sup>SEN: sensitivity.</p></fn><fn id="table1fn17"><p><sup>q</sup>SPE: specificity. </p></fn><fn id="table1fn18"><p><sup>r</sup>AUROC: area under the receiver operating characteristic curve.</p></fn><fn id="table1fn19"><p><sup>s</sup>AMD: age-related macular degeneration.</p></fn><fn id="table1fn20"><p><sup>t</sup>NPV: negative predictive value.</p></fn></table-wrap-foot></table-wrap><p>Dataset configurations varied: 6 studies used training, validation, and test sets [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]; 3 used only training and test sets [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]; and 1 included a tuning set [<xref ref-type="bibr" rid="ref29">29</xref>]. Collectively, these studies involved at least 7132 participants, with ages ranging from 50 to 85 years. Three studies were registered with ClinicalTrials.gov (NCT00734487, NCT01790802, and NCT03349801) [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Cross-validation methods included 5-fold (40% of studies) [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>], 10-fold (20%) [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref33">33</xref>], and triple-fold (10%) [<xref ref-type="bibr" rid="ref31">31</xref>]; 30% did not report validation details.</p><p>Spectral-domain (SD)&#x2013;OCT was the most frequently used imaging modality (6/10 of studies) [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref36">36</xref>], followed by CFP (2/10) [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>], and FAF or NIR (2/10 each) [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. Most studies applied image preprocessing techniques&#x2014;such as size standardization, orientation adjustment, intensity normalization, and noise reduction&#x2014;to improve model performance. DL-based algorithms for GA detection have been developed for multiple image modalities. For example, Derradji et al [<xref ref-type="bibr" rid="ref32">32</xref>] trained a convolutional neural networks (CNNs), U-Net, to predict atrophic signs in the retina, based on the EfficientNet-b3 architecture. Kalra et al [<xref ref-type="bibr" rid="ref31">31</xref>] and de Vente et al [<xref ref-type="bibr" rid="ref36">36</xref>] also trained a DL model based on U-Net. Yao et al [<xref ref-type="bibr" rid="ref29">29</xref>] applied 3D OCT scans with ResNet18 pretrained on the ImageNet dataset, and Chiang et al [<xref ref-type="bibr" rid="ref27">27</xref>] developed CNN (ResNet18) to improve computational efficiency. Elsawy et al [<xref ref-type="bibr" rid="ref28">28</xref>] proposed Deep-GA-Net, a 3D backbone CNN with a 3D loss-based attention layer, and evaluated the effectiveness of using attention layers. Sarao et al [<xref ref-type="bibr" rid="ref35">35</xref>] used a deep CNN, the EfficientNet_b2 model, which was pretrained on the ImageNet dataset and is well-known for its high efficiency and small size. Keenan et al [<xref ref-type="bibr" rid="ref34">34</xref>] established their model using Inception v3, while Treder et al [<xref ref-type="bibr" rid="ref30">30</xref>] performed a deep CNN, a self-learning algorithm, processing input data with FAF images.</p><p>A total of 14 performance sets were extracted from the 10 studies. Key metrics included sensitivity, specificity, accuracy, positive predictive value, negative predictive value, intersection over union, area under the receiver operating characteristic curve, area under the precision-recall curve, <italic>F</italic><sub>1</sub>-score, precision, recall, Kappa, and dice similarity coefficient. Six OCT-based studies showed that DL models could detect GA with high accuracy, comparable to human graders [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Two studies using CFP also reported strong performance [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>], while FAF- and NIR-based approaches demonstrated excellent repeatability and reliability [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>We conducted a thorough evaluation of the 10 diagnostic studies&#x2019; methodological quality for the &#x201C;participant selection,&#x201D; &#x201C;index test,&#x201D; &#x201C;reference standard,&#x201D; and &#x201C;flow and timing&#x201D; domains at the study level (<xref ref-type="table" rid="table2">Table 2</xref>). None of the studies had an overall low or unclear risk of bias; instead, every study had a high risk of bias in at least 1 of the 4 domains. Regarding &#x201C;patient selection,&#x201D; only 4 studies [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>] described the eligibility criteria; the rest did not report them. One study [<xref ref-type="bibr" rid="ref30">30</xref>] used an open dataset (ImageNet) and did not include a test set. The small sample size of 4 studies [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>] may have resulted in overfitting. In addition, 3 studies [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>] did not report image formats and resolutions. Five studies [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>] had a high risk of bias in participant selection because the included participants were not only GA secondary to dry AMD but also had other unrelated diseases. Regarding the &#x201C;Index test,&#x201D; only 1 algorithm was externally validated using a different dataset [<xref ref-type="bibr" rid="ref27">27</xref>]; all other items were evaluated as low risk.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Methodological quality and applicability assessment for studies on geographic atrophy (GA) detection using the revised Quality Assessment of Diagnostic Accuracy Studies&#x2013;Artificial Intelligence (QUADAS-AI).</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom" colspan="4">Risk of bias</td><td align="left" valign="bottom" colspan="3">Concerns regarding applicability</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="bottom">Patient selection</td><td align="left" valign="bottom">Index test</td><td align="left" valign="bottom">Reference standard</td><td align="left" valign="bottom">Flow and timing</td><td align="left" valign="bottom">Patient selection</td><td align="left" valign="bottom">Index test</td><td align="left" valign="bottom">Reference standard</td></tr></thead><tbody><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Chiang et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Elsawy et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Kalra et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Keenan et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Sarao et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Yao et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Treder et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Vente et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Derradji et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">Fineberg et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr></tbody></table></table-wrap></sec><sec id="s3-3"><title>AI in GA Assessment and Progression</title><p>Twenty studies explored AI for GA assessment and progression using noninvasive imaging, published between 2019 and 2025 (Table S2 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). As shown in <xref ref-type="table" rid="table3">Table 3</xref>, these studies covered 11 segmentation [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref45">45</xref>], 2 algorithm optimization [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>], 3 AMD progression classification [<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref50">50</xref>], and 3 combined tasks such as identification, segmentation, and quantification [<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref53">53</xref>]. One study focused solely on GA quantification [<xref ref-type="bibr" rid="ref54">54</xref>]. Retrospective analyses accounted for 9 studies [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>], while 7 were model development [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>], and the remainder were prospective [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref54">54</xref>], comparative [<xref ref-type="bibr" rid="ref46">46</xref>], or cross-sectional [<xref ref-type="bibr" rid="ref42">42</xref>]. Geographically, contributions came from China (6/20), the United States (7/20), the United Kingdom (2/20), Australia (2/20), France (1/20), Israel (1/20), and Austria (1/20).</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Characteristics of studies evaluating artificial intelligence (AI) models for geographic atrophy (GA) assessment and progression using noninvasive retinal imaging.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Author</td><td align="left" valign="bottom">Study design</td><td align="left" valign="bottom">Region</td><td align="left" valign="bottom">Purpose of the study</td><td align="left" valign="bottom">Source of datasets</td><td align="left" valign="bottom">Number of patients</td><td align="left" valign="bottom">Number of images or scans</td><td align="left" valign="bottom">Model evaluation method</td><td align="left" valign="bottom">Image modality (Image resolution)</td><td align="left" valign="bottom">AI algorithms</td><td align="left" valign="bottom">Outcomes</td><td align="left" valign="bottom">Performance of models</td></tr></thead><tbody><tr><td align="left" valign="top">Pramil et al [<xref ref-type="bibr" rid="ref5">5</xref>]</td><td align="left" valign="top">Retrospective review of images</td><td align="left" valign="top">United States (Boston)</td><td align="left" valign="top">Segmentation (GA lesions)</td><td align="left" valign="top">The &#x201C;SWAGGER&#x201D; cohort of the non-Exudative Age-Related Macular Degeneration (from New England Eye Center at Tufts Medical Center)</td><td align="left" valign="top">90</td><td align="left" valign="top">126</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SS-OCT<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> (500*500 pixels)</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup>: U-Net</td><td align="left" valign="top">SEN<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup>, SPE<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup>, and DICE</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>SEN=0.95; SPE=0.91; DSC (vs G1): mean 0.92 (SD 0.11); DSC<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup> (vs G2): mean 0.91 (SD 0.12).</p></list-item></list></td></tr><tr><td align="left" valign="top">Siraz et al [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Retrospective comparative study</td><td align="left" valign="top">United States (North Carolina)</td><td align="left" valign="top">Classification (central and noncentral GA)</td><td align="left" valign="top">Atrium Health Wake Forest Baptist</td><td align="left" valign="top">104</td><td align="left" valign="top">355</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup></td><td align="left" valign="top">SD-OCT<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup> (224*224 pixels)</td><td align="left" valign="top">CNNs: ResNet50, MobileNetV2, and ViT-B/16</td><td align="left" valign="top">AUROC<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup>, <italic>F</italic><sub>1</sub>, and ACC<sup><xref ref-type="table-fn" rid="table3fn9">i</xref></sup></td><td align="left" valign="top"><list list-type="simple"><list-item><p>(CGA<sup><xref ref-type="table-fn" rid="table3fn10">ah</xref></sup> vs NCGA<sup><xref ref-type="table-fn" rid="table3fn11">k</xref></sup>)</p></list-item></list><list list-type="bullet"><list-item><p>ResNet50: AUROC: mean 0.545 (SD 0.004), <italic>F</italic><sub>1</sub>: mean 0.431 (SD 0.00); ACC: mean 0.756 (SD 0.00).</p></list-item><list-item><p>MobileNetV2: AUROC: mean 0.521 (SD 0.016), <italic>F</italic>1: mean 0.432 (SD 0.002); ACC: mean 0.756 (SD 0.00).</p></list-item><list-item><p>ViT-B/16: AUROC: mean 0.718 (SD 0.002), <italic>F</italic><sub>1</sub>: mean 0.602 (SD 0.004); ACC: mean 0.780 (SD 0.005).</p></list-item></list></td></tr><tr><td align="left" valign="top">Arslan et al [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Retrospective cohort clinical study</td><td align="left" valign="top">Australia (Victoria)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">The Center for Eye Research Australia or a private ophthalmology practice diagnosed with GA</td><td align="left" valign="top">51</td><td align="left" valign="top">702</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">FAF<sup><xref ref-type="table-fn" rid="table3fn12">l</xref></sup> (768*768 or 1536*1536 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">DSC, DSC loss, SEN, SPE, MAE<sup><xref ref-type="table-fn" rid="table3fn13">m</xref></sup>, ACC, R, and P</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>DSC: mean 0.9780 (SD 0.0124); DSC loss: mean 0.0220 (SD 0.0041); SEN: mean 0.9903 (SD 0.0041); SPE: mean 0.7498 (SD 0.0955); MAE: mean 0.0376 (SD 0.0184); ACC: mean 0.9774 (SD 0.0090); P: mean 9837 (SD 0.0116).</p></list-item></list></td></tr><tr><td align="left" valign="top">Hu et al [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">Retrospective clinical study</td><td align="left" valign="top">China (Shenyang)</td><td align="left" valign="top">Classification (dry AMD<sup><xref ref-type="table-fn" rid="table3fn16">p</xref></sup> progression phases)</td><td align="left" valign="top">Shenyang Aier Eye Hospital</td><td align="left" valign="top">338</td><td align="left" valign="top">3401</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (NR)</td><td align="left" valign="top">CNNs: EfficientNetV2, DenseNet169, Xception, and ResNet50NF</td><td align="left" valign="top">ACC, SEN, SPE, <italic>F</italic><sub>1</sub>, Macro-f1, and Kappa</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC=97.31%; SEN=89.25%; SPE=98.80%; <italic>F</italic><sub>1</sub>=91.21%; Macro-f1=92.08%; Kappa=95.45%.</p></list-item></list></td></tr><tr><td align="left" valign="top">Spaide et al [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">Retrospective analysis and model comparison</td><td align="left" valign="top">United States (Washington)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">The SWAGGER cohort from the New England Eye Center at Tufts Medical Center</td><td align="left" valign="top">87</td><td align="left" valign="top">126 scans</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SS-OCT (NR)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">DSC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>UNet-1: 0.82 (95% CI 0.78-0.86).</p></list-item><list-item><p>UNet-Avg: 0.88 (95% CI 0.85-0.91).</p></list-item><list-item><p>UNet-Drop: 0.90 (95% CI 0.87-0.93).</p></list-item></list></td></tr><tr><td align="left" valign="top">Vogl et al [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">Austria (Vienna)</td><td align="left" valign="top">Identification (GA progression after pegcetacoplan treatment)</td><td align="left" valign="top">The FILLY trial</td><td align="left" valign="top">156</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT (512*512 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">LPR<sup><xref ref-type="table-fn" rid="table3fn17">q</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Compared with sham treatment, monthly: &#x2212;28% (&#x2212;42.8 to &#x2212;9.4).</p></list-item><list-item><p>Every other month: &#x2212;23.9% (&#x2212;40.2 to &#x2212;3.0).</p></list-item></list></td></tr><tr><td align="left" valign="top">Szeskin et al [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">Israel (Jerusalem)</td><td align="left" valign="top">Identification, quantification (GA lesion)</td><td align="left" valign="top">Datasets D1 and D2 from the Hadassah University Medical Center</td><td align="left" valign="top">D1: 18; D2: 16</td><td align="left" valign="top">NR</td><td align="left" valign="top">4-fold cross-validation</td><td align="left" valign="top">SD-OCT (496*1024 pixels and 496*1536 pixels)</td><td align="left" valign="top">CNN: the custom column classification CNN</td><td align="left" valign="top">AUROC, <italic>P</italic>, R<sup><xref ref-type="table-fn" rid="table3fn14">n</xref></sup>, and <italic>F</italic><sub>1</sub></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>AUROC=0.970; (Segment) P: mean 0.84 (SD 0.11); R: mean 0.94 (SD 0.03); (Lesion) P: mean 0.72 (SD 0.03); R: mean 0.91 (SD 0.18).</p></list-item></list></td></tr><tr><td align="left" valign="top">Spaide et al [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">Proxima A and B</td><td align="left" valign="top">Proxima A: 154; Proxima B: 183</td><td align="left" valign="top">Proxima A: 497; Proxima B: 940</td><td align="left" valign="top">NR</td><td align="left" valign="top">FAF, NIR<sup><xref ref-type="table-fn" rid="table3fn18">r</xref></sup> (768 *768 pixels)</td><td align="left" valign="top">Multimodal DL<sup><xref ref-type="table-fn" rid="table3fn19">s</xref></sup>: U-Net; YNet</td><td align="left" valign="top">DSC and r<sup>2<xref ref-type="table-fn" rid="table3fn20">t</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>(G1-Ynet)DSC: mean 0.92 (SD 0.09).</p></list-item><list-item><p>(G1-Unet)DSC: mean 0.90 (SD 0.09).</p></list-item><list-item><p>(G2-Ynet)DSC: mean 0.91 (SD 0.09).</p></list-item><list-item><p>(G2-Unet)DSC: mean 0.90 (SD 0.09).</p></list-item><list-item><p>(Ynet) r<sup>2</sup>: 0.981.</p></list-item><list-item><p>(Unet) r<sup>2</sup>: 0.959.</p></list-item></list></td></tr><tr><td align="left" valign="top">AI-khersan et al [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">United States (Texas)</td><td align="left" valign="top">Segmentation (GA)</td><td align="left" valign="top">The Retina Consultants of Texas and Retina Vitreous Associates</td><td align="left" valign="top">33; 326</td><td align="left" valign="top">367; 348</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (512*496pixels; 200*1024pixels)</td><td align="left" valign="top">CNN: 3D-to-2D U-Net</td><td align="left" valign="top">DSC and r2</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>For Spectralis data, DSC=0.826; <italic>r</italic><sup>2</sup>=0.906.</p></list-item><list-item><p>For Cirrus data, DSC=0.824; <italic>r</italic><sup>2</sup>=0.883.</p></list-item></list></td></tr><tr><td align="left" valign="top">Chu et al [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Prospective study</td><td align="left" valign="top">United States (Washington)</td><td align="left" valign="top">Identification, segmentation, and quantification (GA)</td><td align="left" valign="top">The University of Miami</td><td align="left" valign="top">70; 20; 25</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">SS-OCT (512*512 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">DSC, SEN, and SPE</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>DSC: mean 0.940 (SD 0.032). SEN=100%; SPE: 100%.</p></list-item></list></td></tr><tr><td align="left" valign="top">Merle et al [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">Prospective observational study</td><td align="left" valign="top">Australia (Victoria)</td><td align="left" valign="top">Quantification (GA)</td><td align="left" valign="top">The Center for Eye Research Australia</td><td align="left" valign="top">50</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT; FAF (NR)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">Spearman correlation coefficient and SEN</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>(OCT<sup><xref ref-type="table-fn" rid="table3fn21">u</xref></sup>-automatically) Spearman correlation coefficient=0.85 (95% CI 0.71-0.91); SEN=0.59.</p></list-item></list></td></tr><tr><td align="left" valign="top">Yang et al [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Shenyang)</td><td align="left" valign="top">Classification (stage of dry AMD progression)</td><td align="left" valign="top">Shenyang Aier Excellence Eye Hospital</td><td align="left" valign="top">1310</td><td align="left" valign="top">16,384</td><td align="left" valign="top">3-fold cross-validation</td><td align="left" valign="top">SD-OCT (NR)</td><td align="left" valign="top">CNNs: ResNet50, EfficientNetB4, MobileNetV3, Xception</td><td align="left" valign="top">ACC, SEN, SPE, and <italic>F</italic><sub>1</sub></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC(GA): ResNet50=92.35%; EfficientNetB4=93.85%; MobileNetV3=89.64%; Xception=91.16%.</p></list-item><list-item><p>ACC (nascent GA): ResNet50=91.56%; EfficientNetB4=89.66%; MobileNetV3=89.43%; Xception=85.22%.</p></list-item></list></td></tr><tr><td align="left" valign="top">Ji et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Nanjing)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">Dataset1 and dataset2</td><td align="left" valign="top">8; 54</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT (224*224 pixels)</td><td align="left" valign="top">Weakly supervised multitask learning: Mirrored X-Net</td><td align="left" valign="top">DSC, IoU<sup><xref ref-type="table-fn" rid="table3fn22">v</xref></sup>, AAD<sup><xref ref-type="table-fn" rid="table3fn23">w</xref></sup>, and CC<sup><xref ref-type="table-fn" rid="table3fn24">x</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>DSC: mean 0.862 (SD 0.080); IoU: mean 0.765 (SD 0.119); AAD: mean 0.090 (SD 0.090); CC: 0.992.</p></list-item></list></td></tr><tr><td align="left" valign="top">Ma et al [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Jinan)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">Dataset1 and dataset2</td><td align="left" valign="top">62</td><td align="left" valign="top">NR</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (224*224 pixels)</td><td align="left" valign="top">Weakly supervised model: VGG16</td><td align="left" valign="top">DSC, OR<sup><xref ref-type="table-fn" rid="table3fn25">y</xref></sup>, AAD, CC, and AUROC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>DSC: mean 0.847 (SD 0.087); OR: mean 0.744 (SD 0.126); AAD: mean 0.150 (SD 0.149); CC: 0.969; AUROC: 0.933.</p></list-item></list></td></tr><tr><td align="left" valign="top">Royer et al [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">France (Issy-Les-Moulineaux)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">the Clinical Imaging Center of the Quinze-Vingts Hospital</td><td align="left" valign="top">18</td><td align="left" valign="top">328</td><td align="left" valign="top">8 different random combinations of 12 series to train the model and 6 for the tests</td><td align="left" valign="top">NIR (256*256 pixels)</td><td align="left" valign="top">Unsupervised neural networks: W-net</td><td align="left" valign="top"><italic>F</italic>1, <italic>P</italic>, and R</td><td align="left" valign="top"><list list-type="bullet"><list-item><p><italic>F</italic><sub>1</sub>: mean 0.87 (SD 0.07); <italic>P</italic>: mean 0.90 (SD 0.07); R: mean 0.85 (SD 0.11).</p></list-item></list></td></tr><tr><td align="left" valign="top">Xu et al [<xref ref-type="bibr" rid="ref11">11</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Jinan)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">dataset1 and dataset2</td><td align="left" valign="top">8 (test I); 56 (test II)</td><td align="left" valign="top">55 (dataset1); 56 (dataset2)</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT (1024*512*128pixels; 1024*200*200pixels)</td><td align="left" valign="top">Self-learning algorithm</td><td align="left" valign="top">OR, AAD, and CC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>OR: mean 84.48% (SD 11.98%); AAD: mean 11.09% (SD 13.61%); CC: 0.9948.</p></list-item></list></td></tr><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">United Kingdom (London)</td><td align="left" valign="top">Segmentation and quantification (GA)</td><td align="left" valign="top">The FILLY study</td><td align="left" valign="top">200</td><td align="left" valign="top">984</td><td align="left" valign="top">NR</td><td align="left" valign="top">SD-OCT (NR)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">DSC, ICC<sup><xref ref-type="table-fn" rid="table3fn26">z</xref></sup>, ACC, SEN, SPE, and <italic>F</italic><sub>1</sub></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Approach 1: ACC=0.91 (95% CI 0.89-0.93); <italic>F</italic><sub>1</sub>=0.94 (95% CI 0.92-0.96); SEN=0.99 (95% CI 0.97-1.00); SPE=0.54 (95% CI 0.47-0.61); DSC: mean 0.92 (SD 0.14); ICC=0.94.</p></list-item><list-item><p>Approach 2: ACC=0.94 (95% CI 0.92-0.96); <italic>F</italic><sub>1</sub>=0.96 (95% CI 0.94-0.98); SEN=0.98 (95% CI 0.96-1.00); SPE=0.76 (95% CI 0.70-0.82); DSC: mean 0.89 (SD 0.18); ICC: 0.91.</p></list-item></list></td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Wuhan)</td><td align="left" valign="top">Segmentation (GA)</td><td align="left" valign="top">Wuhan Aier Eye Hospital; the public dataset OCTA500</td><td align="left" valign="top">300</td><td align="left" valign="top">2923</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (512*512 pixels)</td><td align="left" valign="top">Self-learning algorithm (dual-branch image projection network)</td><td align="left" valign="top">Jaccard index, DSC, ACC, <italic>P</italic><sup><xref ref-type="table-fn" rid="table3fn15">o</xref></sup>, and R</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>DSC: mean 7.03 (SD 2.73); Jaccard index: mean 80.96 (SD 4.29); ACC: mean 91.84 (SD 2.13); P: mean 87.12 (SD 2.34); R: mean 86.56 (SD 2.92).</p></list-item></list></td></tr><tr><td align="left" valign="top">Williamson et al [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Cross-sectional study</td><td align="left" valign="top">United Kingdom (London)</td><td align="left" valign="top">Segmentation (GA lesion area)</td><td align="left" valign="top">INSIGHT Health Data Research Hub at Moorfields Eye Hospital</td><td align="left" valign="top">9875 (OCT); 81 (FAF)</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">3D-OCT; FAF (512*512 pixels)</td><td align="left" valign="top">Self-learning algorithm</td><td align="left" valign="top">PPV<sup><xref ref-type="table-fn" rid="table3fn27">aa</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>0.86 (95% CI 0.79-0.92).</p></list-item></list></td></tr><tr><td align="left" valign="top">Safai et al [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Comparative analysis</td><td align="left" valign="top">United States (Wisconsin)</td><td align="left" valign="top">Identification (the best AI framework for segmentation of GA)</td><td align="left" valign="top">AREDS2<sup><xref ref-type="table-fn" rid="table3fn28">ab</xref></sup> study; the GlaxoSmithKline (GSK) study</td><td align="left" valign="top">271(AREDS2); 100(GSK)</td><td align="left" valign="top">601 (AREDS2); 156 (GSK)</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">FAF (512*512 pixels)</td><td align="left" valign="top">CNNs: UNet, FPN<sup><xref ref-type="table-fn" rid="table3fn29">ac</xref></sup>, PSPNet, EfficientNet, ResNet, VGG<sup><xref ref-type="table-fn" rid="table3fn30">ad</xref></sup>, mViT<sup><xref ref-type="table-fn" rid="table3fn31">ae</xref></sup></td><td align="left" valign="top">CC and DSC</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>FPN_EfficientNet: CC=0.98, DSC=0.931.</p></list-item><list-item><p>FPN_CCesNet: CC=0.98, DSC=0.902.</p></list-item><list-item><p>FPN_VGG: CC=0.98, DSC=0.934.</p></list-item><list-item><p>FPN_mViT: CC=0.99, DSC=0.939.</p></list-item></list><list list-type="bullet"><list-item><p>UNet_EfficientNet: CC=0.98, DSC=0.924.</p></list-item><list-item><p>UNet_CCesNet: CC=0.97, DSC=0.930.</p></list-item><list-item><p>UNet_VGG: CC=0.97, DSC=0.896; UNet_mViT: CC=0.99, DSC=0.938.</p></list-item><list-item><p>PSPNet_EfficientNet: CC=0.93, DSC=0.890.</p></list-item><list-item><p>PSPNet_CCesNet: CC=0.87, DSC=0.877.</p></list-item><list-item><p>PSPNet_VGG: CC=0.95, DSC=0.900.</p></list-item><list-item><p>PSPNet_mViT: CC=0.98, DSC=0.889.</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>SS-OCT: swept-source OCT.</p></fn><fn id="table3fn2"><p><sup>b</sup>CNN: convolutional neural network.</p></fn><fn id="table3fn3"><p><sup>c</sup>SEN: sensitivity.</p></fn><fn id="table3fn4"><p><sup>d</sup>SPE: specificity.</p></fn><fn id="table3fn5"><p><sup>e</sup>DSC: dice similarity coefficient.</p></fn><fn id="table3fn6"><p><sup>f</sup>NR: not reported.</p></fn><fn id="table3fn7"><p><sup>g</sup>SD-OCT: spectral domain OCT.</p></fn><fn id="table3fn8"><p><sup>h</sup>AUROC: area under the receiver operating characteristic curve.</p></fn><fn id="table3fn9"><p><sup>i</sup>ACC: accuracy.</p></fn><fn id="table3fn10"><p><sup>j</sup>CGA: central geographic atrophy. </p></fn><fn id="table3fn11"><p><sup>k</sup>NCGA: noncentral geographic atrophy. </p></fn><fn id="table3fn12"><p><sup>l</sup>FAF: fundus autofluorescence.</p></fn><fn id="table3fn13"><p><sup>m</sup>MAE: mean absolute error.</p></fn><fn id="table3fn14"><p><sup>n</sup>R: recall.</p></fn><fn id="table3fn15"><p><sup>o</sup>P: precision.</p></fn><fn id="table3fn16"><p><sup>p</sup>AMD: age-related macular degeneration.</p></fn><fn id="table3fn17"><p><sup>q</sup>LPR: local progression rate.</p></fn><fn id="table3fn18"><p><sup>r</sup>NIR: near-infrared reflectance.</p></fn><fn id="table3fn19"><p><sup>s</sup>DL: deep learning.</p></fn><fn id="table3fn20"><p><sup>t</sup><italic>r</italic><sup>2</sup>: Pearson correlation coefficient.</p></fn><fn id="table3fn21"><p><sup>u</sup>OCT: optical coherence tomography.</p></fn><fn id="table3fn22"><p><sup>v</sup>IoU: intersection over union.</p></fn><fn id="table3fn23"><p><sup>w</sup>AAD: absolute area difference.</p></fn><fn id="table3fn24"><p><sup>x</sup>CC: correlation coefficient.</p></fn><fn id="table3fn25"><p><sup>y</sup>OR: overlap ratio.</p></fn><fn id="table3fn26"><p><sup>z</sup>ICC: intraclass coefficient.</p></fn><fn id="table3fn27"><p><sup>aa</sup>PPV: positive predictive value.</p></fn><fn id="table3fn28"><p><sup>ab</sup>AREDS2: Age-Related Eye Disease Study 2.</p></fn><fn id="table3fn29"><p><sup>ac</sup>FPN: Feature Pyramid Network.</p></fn><fn id="table3fn30"><p><sup>ad</sup>VGG: Visual Geometry Group.</p></fn><fn id="table3fn31"><p><sup>ae</sup>mViT: Mix Vision Transformer. </p></fn></table-wrap-foot></table-wrap><p>Dataset configurations varied: 9 out of 20 studies used training, validation, and test sets [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]; 11 studies used training and test sets [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]; 2 studies used training and validation sets [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]; 1 study comprised training, tuning, and internal validation sets [<xref ref-type="bibr" rid="ref53">53</xref>]; and 2 studies did not specify [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. Across studies, at least 14,064 participants provided image data for analysis. Less than half of the studies (9/20, 45%) provided demographic information, with the average age of participants ranging from 55 to 94 years. Six studies were registered with ClinicalTrials.gov (NCT01342926, NCT02503332, NCT02479386, NCT02399072, and NCT04469140 [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]). To assess the generalization ability of the DL model, cross-validation methods included 5-fold (8/20 studies [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]), 4-fold (1/20 study [<xref ref-type="bibr" rid="ref51">51</xref>]), 3-fold (1/20 study [<xref ref-type="bibr" rid="ref49">49</xref>]), and other approaches (1/20 study [<xref ref-type="bibr" rid="ref39">39</xref>]). Nine studies did not report validation specifics.</p><p>Multiple imaging modalities supported GA assessment: spectral domain optical coherence tomography (SD-OCT) was most common, followed by swept-source OCT (SS-OCT), 3D-OCT, FAF, and NIR. Preprocessing techniques were widely applied to standardize images and improve model performance. Algorithm architectures varied, with U-Net being the most frequently used. Other approaches included custom CNNs, self-learning algorithms, weakly supervised models, and multimodal networks. For example, Hu et al [<xref ref-type="bibr" rid="ref48">48</xref>] trained the DL models (ResNet-50, Xception, DenseNet169, and EfficientNetV2), evaluating them on a single fold of the validation dataset, with all <italic>F</italic><sub>1</sub>-scores exceeding 90%. Yang [<xref ref-type="bibr" rid="ref49">49</xref>] proposed an ensemble DL architecture that integrated 4 different CNNs, including ResNet50, EfficientNetB4, MobileNetV3, and Xception, to classify dry AMD progression stages. GA lesions on FAF were automatically segmented using multimodal DL networks (U-Net and Y-Net) fed with FAF and NIR images [<xref ref-type="bibr" rid="ref40">40</xref>]. In contrast to the multimodal algorithms mentioned above (ie, the examples of DL models), Safai [<xref ref-type="bibr" rid="ref46">46</xref>] investigated 3 distinct segmentation architectures along with 4 commonly used encoders, resulting in 12 different AI model combinations to determine the optimal AI framework for GA segmentation on FAF images.</p><p>From 20 studies, 42 performance sets were collected. Common metrics included correlation coefficient, mean absolute error, Spearman correlation coefficient, intraclass coefficient, overlap ratio, Pearson correlation coefficient (<italic>r</italic><sup>2</sup>), Kappa, specificity (SPE), sensitivity (SEN), accuracy, positive predictive value (PPV), <italic>F</italic><sub>1</sub>-score, P, R, intersection over union, and dice similarity coefficient (DSC). Regarding the segmentation, classification, identification, and quantification of GA in SD-OCT, 12 studies demonstrated performance comparable to that of clinical experts [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. AI was also capable of efficiently detecting, segmenting, and measuring GA in SS-OCT, 3D-OCT, and FAF images, according to 4 studies [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. AI for GA segmentation in FAF and NIR images, with clinical data showing good segmentation performance [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>].</p><p>We performed a comprehensive assessment of the methodological quality of 16 GA assessment and progression studies encompassing 4 domains (<xref ref-type="table" rid="table4">Table 4</xref>). Only 8 studies detailed the eligibility criteria in the &#x201C;patient selection&#x201D; category, while the others had not been published. Three of the studies [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>] lacked complete datasets, and 3 others [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref54">54</xref>] had small datasets or limited volumes of data. In addition, 3 studies [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref54">54</xref>] failed to provide information on image formats or resolutions. Two studies [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>] were ranked as high risk regarding patient selection since the participants included other types of dry AMD (drusen, nascent GA). In terms of applicability, 18 studies were classified as low risk, while 2 were deemed high risk concerning patient selection. Concerning the &#x201C;Index test,&#x201D; only 3 algorithms underwent external validation with a different dataset [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. All other items were evaluated as low risk.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Methodological quality and applicability summary of geographic atrophy (GA) assessment and progression studies using revised Quality Assessment of Diagnostic Accuracy Studies&#x2013;Artificial Intelligence (QUAUAS-AI).</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="middle">Study</td><td align="left" valign="middle" colspan="4">Risk of bias</td><td align="left" valign="middle" colspan="3">Concerns regarding applicability</td></tr></thead><tbody><tr><td align="left" valign="middle"/><td align="left" valign="middle">Patient selection</td><td align="left" valign="middle">Index test</td><td align="left" valign="middle">Reference standard</td><td align="left" valign="middle">Flow and timing</td><td align="left" valign="middle">Patient selection</td><td align="left" valign="middle">Index test</td><td align="left" valign="middle">Reference standard</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top">M Hu [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">JK Yang [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">A Safai [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">WD Vogl [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">A Szeskin [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">ZD Chu [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">ZX Ji [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">X Ma [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">C Royer [<xref ref-type="bibr" rid="ref39">39</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">T Spaide [<xref ref-type="bibr" rid="ref40">40</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">T Spaide [<xref ref-type="bibr" rid="ref41">41</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">DJ Williamson [<xref ref-type="bibr" rid="ref42">42</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">RB Xu [<xref ref-type="bibr" rid="ref11">11</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">J Arslan [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">V Pramil [<xref ref-type="bibr" rid="ref5">5</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">GY Zhang [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">DA Merle [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">H AI-khersan [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">S Siraz [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr><tr><td align="left" valign="top">XM Liu [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">High risk</td><td align="left" valign="top">High risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td><td align="left" valign="top">Low risk</td></tr></tbody></table></table-wrap></sec><sec id="s3-4"><title>AI in Predicting GA Lesion Area and Progression</title><p>Eleven studies used AI for predicting GA lesion growth and progression using noninvasive imaging (Table S3 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). These studies were published between 2021 and 2025, with some information provided in <xref ref-type="table" rid="table5">Table 5</xref>. The study designs consisted of 6 retrospective studies [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref60">60</xref>], 2 model development studies [<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref62">62</xref>], 2 post hoc analyses [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref64">64</xref>], and 1 clinical evaluation of a DL algorithm [<xref ref-type="bibr" rid="ref65">65</xref>]. Participants or images came from various regions: 6 studies were based in the United States [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>], 3 in Australia [<xref ref-type="bibr" rid="ref63">63</xref>-<xref ref-type="bibr" rid="ref65">65</xref>], 1 in Switzerland [<xref ref-type="bibr" rid="ref56">56</xref>], and another involving multiple centers in China and the United States [<xref ref-type="bibr" rid="ref61">61</xref>]. Research aims focused on GA growth prediction [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref59">59</xref>-<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>], combined prediction and evaluation of lesion features [<xref ref-type="bibr" rid="ref57">57</xref>], treatment response assessment [<xref ref-type="bibr" rid="ref58">58</xref>], and integrated segmentation-prediction tasks [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>].</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Characteristics of studies evaluating artificial intelligence (AI) models for geographic atrophy (GA) prediction using noninvasive retinal imaging.</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Author</td><td align="left" valign="bottom">Study design</td><td align="left" valign="bottom">Region</td><td align="left" valign="bottom">Purpose of the study</td><td align="left" valign="bottom">Source of datasets</td><td align="left" valign="bottom">Number of patients</td><td align="left" valign="bottom">Number of images or scans or cubes</td><td align="left" valign="bottom">Model evaluation method</td><td align="left" valign="bottom">Image modality (resolution)</td><td align="left" valign="bottom">AI algorithms</td><td align="left" valign="bottom">Outcomes</td><td align="left" valign="bottom">Performance of models</td></tr></thead><tbody><tr><td align="left" valign="top">Gigon et al [<xref ref-type="bibr" rid="ref56">56</xref>]</td><td align="left" valign="top">Retrospective monocentric study</td><td align="left" valign="top">Switzerland (Lausanne)</td><td align="left" valign="top">Prediction (RORA<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup> progression)</td><td align="left" valign="top">Jules Gonin Eye Hospital</td><td align="left" valign="top">119</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table5fn2">b</xref></sup></td><td align="left" valign="top">SD-OCT<sup><xref ref-type="table-fn" rid="table5fn3">c</xref></sup> (384*384 pixels)</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table5fn4">d</xref></sup>: EfficientNet-b3</td><td align="left" valign="top">DSC<sup><xref ref-type="table-fn" rid="table5fn5">e</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>0-6 months: 0.84</p></list-item><list-item><p>6-12 months: 0.84</p></list-item><list-item><p>&#x003E;12 months: 0.89</p></list-item></list></td></tr><tr><td align="left" valign="top">Dow et al [<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Retrospective cohort study</td><td align="left" valign="top">United States (Atlanta, Georgia, Portland, Oregon, North Carolina; Maryland, Raleigh, Morrisville, Cary); United Kingdom (Durham, South Durham)</td><td align="left" valign="top">Prediction (iAMD<sup><xref ref-type="table-fn" rid="table5fn6">f</xref></sup> to GA within 1 year)</td><td align="left" valign="top">3 independent datasets from AREDS2<sup><xref ref-type="table-fn" rid="table5fn7">g</xref></sup> and a tertiary referral center and associated satellites</td><td align="left" valign="top">316; 53; 48</td><td align="left" valign="top">1085; 53; 48</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT (512 *1000 pixels)</td><td align="left" valign="top">CNN: Inception v3</td><td align="left" valign="top">SEN<sup><xref ref-type="table-fn" rid="table5fn8">h</xref></sup>, SPE<sup><xref ref-type="table-fn" rid="table5fn9">i</xref></sup>, PPV<sup><xref ref-type="table-fn" rid="table5fn10">j</xref></sup>, NPV<sup><xref ref-type="table-fn" rid="table5fn11">k</xref></sup>, ACC<sup><xref ref-type="table-fn" rid="table5fn12">l</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>SEN: 0.91 (95% CI 0.74-0.98); SPE: 0.80 (95% CI 0.63-0.91); PPV: 0.78 (95% CI 0.70-0.85); NPV: 0.92 (95% CI 0.90-0.95); ACC: 0.85 (95% CI 0.87-0.91)</p></list-item></list></td></tr><tr><td align="left" valign="top">Cluceru et al [<xref ref-type="bibr" rid="ref57">57</xref>]</td><td align="left" valign="top">Retrospective clinical study; observation study</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Prediction and evaluation (GA growth rate and GA features related to shape and size)</td><td align="left" valign="top">The lampalizumab phase 3 clinical trials and an accompanying observational study</td><td align="left" valign="top">1041; 255</td><td align="left" valign="top">NR</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">FAF<sup><xref ref-type="table-fn" rid="table5fn13">m</xref></sup> (384 * 384 pixels)</td><td align="left" valign="top">CNN: VGG16</td><td align="left" valign="top"><italic>r</italic><sup>2</sup><sup><xref ref-type="table-fn" rid="table5fn14">n</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Full FAF images: 0.44 (95% CI 0.36-0.49)</p></list-item><list-item><p>Rim only: 0.37 (95% CI 0.35-0.4)</p></list-item><list-item><p>Lesion only: 0.34 (95% CI 0.31-0.36)</p></list-item><list-item><p>Background only: 0.3 (95% CI 0.27-0.33)</p></list-item><list-item><p>Mask only: 0.27 (95% CI 0.24-0.29)</p></list-item></list></td></tr><tr><td align="left" valign="top">Anegondi et al [<xref ref-type="bibr" rid="ref58">58</xref>]</td><td align="left" valign="top">Retrospective clinical study; observation study</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Prediction and prognosis (GA lesion area and GA growth rate after lampalizumab treatment)</td><td align="left" valign="top">The lampalizumab phase 3 clinical trials and an accompanying observational study</td><td align="left" valign="top">1279; 443; 106; 169</td><td align="left" valign="top">NR</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT, FAF (512*512 pixels)</td><td align="left" valign="top">CNN: Inception v3</td><td align="left" valign="top"><italic>r</italic><sup>2</sup></td><td align="left" valign="top">GA prediction:<list list-type="bullet"><list-item><p>FAF-only: 0.98 (95% CI 0.97&#x2010;0.99)</p></list-item><list-item><p>OCT-only: 0.91 (95% CI 0.87&#x2010;0.95),</p></list-item><list-item><p>Multimodal: 0.94 (95% CI 0.92&#x2010;0.96).</p></list-item></list><break/>GA growth rate:<list list-type="bullet"><list-item><p>FAF-only: 0.65 (95% CI 0.52&#x2010;0.75),</p></list-item><list-item><p>OCT-only: 0.36 (95% CI 0.29&#x2010;0.43),</p></list-item><list-item><p>Multimodal: 0.47 (95% CI 0.40&#x2010;0.54)</p></list-item></list></td></tr><tr><td align="left" valign="top">Salvi et al [<xref ref-type="bibr" rid="ref59">59</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Prediction (the 1 year region of growth of GA lesions)</td><td align="left" valign="top">The following lampalizumab clinical trials and prospective observational studies</td><td align="left" valign="top">597</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">FAF (768*768 pixels or 1536*1536 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">P<sup><xref ref-type="table-fn" rid="table5fn15">o</xref></sup>, R<sup><xref ref-type="table-fn" rid="table5fn16">p</xref></sup>, DSC, r2</td><td align="left" valign="top">Whole lesion:<list list-type="bullet"><list-item><p>P: mean 0.70 (SD 0.12); R: mean 0.73 (SD 0.12); DSC: mean 0.70 (SD 0.09); r2: 0.79</p></list-item></list></td></tr><tr><td align="left" valign="top">Yoshida [<xref ref-type="bibr" rid="ref60">60</xref>]</td><td align="left" valign="top">Retrospective analysis</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Prediction (GA progression)</td><td align="left" valign="top">Three prospective clinical trials</td><td align="left" valign="top">1219; 442</td><td align="left" valign="top">NR</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">3D OCT (496*1024*49 voxels)</td><td align="left" valign="top">CNNs: (1) en-face intensity maps; (2) SLIVER-net; (3) a 3D CNN; and (4) en-face layer thickness and between-layer intensity maps from a segmentation model</td><td align="left" valign="top"><italic>r</italic><sup>2</sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>GA lesion area: En-face intensity map: 0.91; SLIVER-net: 0.83; 3D DenseNet: 0.90; OCT EZ<sup><xref ref-type="table-fn" rid="table5fn17">q</xref></sup> and RPE<sup><xref ref-type="table-fn" rid="table5fn18">r</xref></sup> thickness map: 0.90;</p></list-item><list-item><p>GA growth rate: En-face intensity map: 0.33; SLIVER-net: 0.33; 3D DenseNet: 0.35; OCT EZ and RPE thickness map: 0.35.</p></list-item></list></td></tr><tr><td align="left" valign="top">GS Reiter [<xref ref-type="bibr" rid="ref63">63</xref>]</td><td align="left" valign="top">Post hoc analysis</td><td align="left" valign="top">Austria (Vienna)</td><td align="left" valign="top">Prediction (GA lesions progression)</td><td align="left" valign="top">the phase II randomized controlled trial FILLY</td><td align="left" valign="top">134</td><td align="left" valign="top">268 scans</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">FAF, NIR<sup><xref ref-type="table-fn" rid="table5fn19">s</xref></sup>, SD-OCT (NR)</td><td align="left" valign="top">CNN: PSC-UNet</td><td align="left" valign="top">ACC, Kappa, concordance index</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC: 0.48; Kappa: 0.23; concordance index: 0.69</p></list-item></list></td></tr><tr><td align="left" valign="top">J Mai [<xref ref-type="bibr" rid="ref64">64</xref>]</td><td align="left" valign="top">Post hoc analysis</td><td align="left" valign="top">Austria (Vienna)</td><td align="left" valign="top">Segmentation, quantification, and prediction (GA lesion and progression)</td><td align="left" valign="top">The phase 2 FILLY clinical trial and the Medical University of Vienna (MUV)</td><td align="left" valign="top">113; 100</td><td align="left" valign="top">226; 967</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT, FAF (768*768 and 1536*1536 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">DSC, Hausdorff distance, ICC<sup><xref ref-type="table-fn" rid="table5fn20">t</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>MUV: DSC: mean 0.86 (SD 0.12); Hausdorff distance: mean 0.54 (SD 0.45);</p></list-item><list-item><p>FILLY: DSC: mean 0.91 (SD 0.05); Hausdorff distance: mean 0.38 (SD 0.40)</p></list-item></list></td></tr><tr><td align="left" valign="top">YH Zhang [<xref ref-type="bibr" rid="ref61">61</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">China (Nanjing); United States (California)</td><td align="left" valign="top">Prediction (GA growth)</td><td align="left" valign="top">The Byers Eye Institute of Stanford University; the Jiangsu Provincial People&#x2019;s Hospital</td><td align="left" valign="top">22; 3</td><td align="left" valign="top">86 cubes; 33 cubes</td><td align="left" valign="top">Leave-one-out cross-validation</td><td align="left" valign="top">SD-OCT (178*270 pixels)</td><td align="left" valign="top">Recurrent neural network: the bi-directional long-short term memory network; CNN: 3D-UNet</td><td align="left" valign="top">DSC, CC<sup><xref ref-type="table-fn" rid="table5fn21">u</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Scenario I: DSC: 0.86; CC: 0.83;</p></list-item><list-item><p>Scenario II: DSC: 0.89; CC: 0.84;</p></list-item><list-item><p>Scenario III: DSC: 0.89; CC: 0.86;</p></list-item><list-item><p>Scenario IV: DSC: 0.92; CC: 0.88;</p></list-item><list-item><p>Scenario V: DSC: 0.88; CC: 0.85;</p></list-item><list-item><p>Scenario VI: DSC: 0.90; CC: 0.86</p></list-item></list></td></tr><tr><td align="left" valign="top">SX Wang [<xref ref-type="bibr" rid="ref62">62</xref>]</td><td align="left" valign="top">Model development</td><td align="left" valign="top">United States (California)</td><td align="left" valign="top">Segmentation and prediction (GA lesion area and GA progression)</td><td align="left" valign="top">The University of California&#x2014;Los Angeles</td><td align="left" valign="top">147</td><td align="left" valign="top">NR</td><td align="left" valign="top">8-fold cross-validation</td><td align="left" valign="top">SD-OCT, FAF (512*512 pixels)</td><td align="left" valign="top">CNN: U-Net</td><td align="left" valign="top">SEN, SPE, ACC, OR<sup><xref ref-type="table-fn" rid="table5fn22">v</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>ACC: 0.95; SEN: 0.60; SPE: 0.96; OR: 0.65</p></list-item></list></td></tr><tr><td align="left" valign="top">J Mai [<xref ref-type="bibr" rid="ref65">65</xref>]</td><td align="left" valign="top">Clinical evaluation of a DL-based algorithm</td><td align="left" valign="top">Austria (Vienna)</td><td align="left" valign="top">Prediction (GA lesions progression)</td><td align="left" valign="top">The Medical University of Vienna</td><td align="left" valign="top">100</td><td align="left" valign="top">967</td><td align="left" valign="top">5-fold cross-validation</td><td align="left" valign="top">SD-OCT, FAF (NR)</td><td align="left" valign="top">CNN: PSC-UNet</td><td align="left" valign="top">DSC, MAE<sup><xref ref-type="table-fn" rid="table5fn23">w</xref></sup>, and r2</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>0-1 year: DSC: mean 0.25 (SD 0.16); MAE: mean 0.13 (SD 0.11)</p></list-item><list-item><p>1-2 years: DSC: mean 0.38 (SD 0.20); MAE: mean 0.25 (SD 0.24);</p></list-item><list-item><p>2-3 years: DSC: mean 0.38 (SD 0.21); MAE: mean 0.35 (SD 0.34);</p></list-item><list-item><p>&#x003E;3 years: DSC: mean 0.37 (SD 0.23); MAE: mean 0.72 (SD 0.48)</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>RORA: retinal pigment epithelial and outer retinal atrophy.</p></fn><fn id="table5fn2"><p><sup>b</sup>NR: not reported.</p></fn><fn id="table5fn3"><p><sup>c</sup>OCT: optical coherence tomography.</p></fn><fn id="table5fn4"><p><sup>d</sup>CNN: convolutional neural network.</p></fn><fn id="table5fn5"><p><sup>e</sup>DSC: dice similarity coefficient.</p></fn><fn id="table5fn6"><p><sup>f</sup>AMD: age-related macular degeneration.</p></fn><fn id="table5fn7"><p><sup>g</sup>AREDS2: Age-Related Eye Disease Study 2.</p></fn><fn id="table5fn8"><p><sup>h</sup>SEN: sensitivity.</p></fn><fn id="table5fn9"><p><sup>i</sup>SPE: specificity.</p></fn><fn id="table5fn10"><p><sup>j</sup>PPV: positive predictive value.</p></fn><fn id="table5fn11"><p><sup>k</sup>NPV: negative predictive value.</p></fn><fn id="table5fn12"><p><sup>l</sup>ACC: accuracy.</p></fn><fn id="table5fn13"><p><sup>m</sup>FAF: fundus autofluorescence.</p></fn><fn id="table5fn14"><p><sup>n</sup><italic>r</italic><sup>2</sup>: Pearson correlation coefficient.</p></fn><fn id="table5fn15"><p><sup>o</sup>P: precision.</p></fn><fn id="table5fn16"><p><sup>p</sup>R: recall.</p></fn><fn id="table5fn17"><p><sup>q</sup>EZ: ellipsoid zone.</p></fn><fn id="table5fn18"><p><sup>r</sup>RPE: retinal pigment epithelium.</p></fn><fn id="table5fn19"><p><sup>s</sup>NIR: near-infrared reflectance.</p></fn><fn id="table5fn20"><p><sup>t</sup>ICC: intraclass coefficient.</p></fn><fn id="table5fn21"><p><sup>u</sup>CC: correlation coefficient.</p></fn><fn id="table5fn22"><p><sup>v</sup>OR: overlap ratio.</p></fn><fn id="table5fn23"><p><sup>w</sup>MAE: mean absolute error.</p></fn></table-wrap-foot></table-wrap><p>Dataset structures varied: 3 out of 11 studies used training-validation-test splits [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>]; 2 out of 11 studies used training-test sets [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref61">61</xref>]; 3 out of 11 studies used training-validation sets [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>]; and the rest adopted development&#x2013;holdout [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref60">60</xref>] or development-holdout-independent test configurations [<xref ref-type="bibr" rid="ref58">58</xref>]. In total, 6706 participants were included across studies. Fewer than half of the studies (4/11, 36.4%) reported demographic information, with mean age ranges spanning from 74 to 83 years [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref65">65</xref>]. Six studies [<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref64">64</xref>] were ethically approved and registered on ClinicalTrials.gov under the following identifiers: NCT02503332, NCT02247479, NCT02247531, NCT02479386, NCT01229215, and NCT02399072. The DL model&#x2019;s generalizability was assessed using leave-one-out cross-validation in 1 study [<xref ref-type="bibr" rid="ref61">61</xref>], 5-fold cross-validation in 7 studies [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref63">63</xref>-<xref ref-type="bibr" rid="ref65">65</xref>], and 8-fold cross-validation in 1 study [<xref ref-type="bibr" rid="ref62">62</xref>]. The remaining 2 studies [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref59">59</xref>] did not specify the cross-validation methodology.</p><p>Studies used 3D-OCT, SD-OCT, NIR, and FAF images, primarily sourced from Heidelberg, Zeiss, and Bioptigen devices. While most reported image metrics, 2 studies did not specify resolution details [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>]. Commonly used DL architectures included Inception v3 [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>], PSC-UNet [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>], U-Net [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>], EfficientNet-b3 [<xref ref-type="bibr" rid="ref56">56</xref>], and VGG16 [<xref ref-type="bibr" rid="ref57">57</xref>]. In addition, some studies introduced novel approaches, such as en-face intensity maps, SLIVER-net, 3D CNN, and a recurrent neural network, for improved GA progression forecasting.</p><p>According to various image modalities, datasets, and follow-up durations, we gathered 31 sets of performance data from 11 studies. The performance metrics included the Hausdorff distance, concordance index, overlap, SEN, SPE, accuracy, mean absolute error, Kappa, DSC, P, PPV, R, <italic>r</italic><sup>2</sup>, and negative predictive value. The findings for a single image modality (3D-OCT, SD-OCT, or FAF) demonstrated the development of DL algorithms to predict GA growth rate and progression with excellent performance characteristics comparable to trained experts [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref59">59</xref>-<xref ref-type="bibr" rid="ref61">61</xref>]. Multimodal approaches combining FAF, NIR, and SD-OCT further showed feasibility for individualized lesion growth prediction and localization [<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref65">65</xref>].</p><p>In this systematic review, we used the PROBAST tool to rigorously evaluate prediction models across 4 domains, addressing 20 signaling questions for each paper reviewed. Within the &#x201C;participants&#x201D; domain, all studies used appropriate data sources; however, only 6 studies [<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref65">65</xref>] clearly outlined their inclusion and exclusion criteria for participants, leaving the others unclear. In terms of &#x201C;predictors,&#x201D; these were defined and evaluated similarly for all participants, having no connection to outcome data and being available at baseline. All studies evaluated &#x201C;yes&#x201D; to the questions on outcome measurement methods, definitions, interference factors, and measurement time intervals. Concerning &#x201C;analysis,&#x201D; Dow [<xref ref-type="bibr" rid="ref55">55</xref>] and Zhang [<xref ref-type="bibr" rid="ref61">61</xref>] applied a small dataset with an insufficient number of participants. While Zhang performed internal validation, the lack of external validation notably limits the model&#x2019;s generalizability, which was constructed with bi-directional long-short term memory networks and CNN frameworks. Two studies by Salvi [<xref ref-type="bibr" rid="ref59">59</xref>] and Yoshida [<xref ref-type="bibr" rid="ref60">60</xref>] lacked independent and external validation. Gigon [<xref ref-type="bibr" rid="ref56">56</xref>] failed to explicitly mention missing data handling, complex problems, and model overfitting. Conversely, all other items were evaluated as low risk, and the applications of the studies were universally ranked as low risk (Table S1 in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This systematic review evaluated the performance of AI, particularly DL algorithms, in detecting and managing GA secondary to dry AMD using noninvasive imaging modalities. Our findings demonstrate that AI models exhibit strong capabilities in accurately detecting, segmenting, quantifying, and predicting GA progression from OCT, FAF, CFP, and NIR imaging, achieving diagnostic accuracy comparable to that of human experts. However, this review also identified several methodological challenges, such as limited sample sizes, inconsistent annotation standards, and a general lack of external validation, which may hinder the clinical generalizability and practical application of these models. Despite these limitations, AI-based tools show significant potential for future use by both specialists and nonspecialists in primary and specialty care settings.</p></sec><sec id="s4-2"><title>AI in Detecting GA With OCT, FAF, NIR, and CFP Images</title><p>Ten studies published between 2018 and 2025 were included, involving at least 7132 participants aged 50 to 85 years. Half of the studies were conducted in the United States, while others originated from European countries. SD-OCT was the most frequently used imaging modality (6/10 studies), followed by CFP (2/10 studies), NIR (1/10 studies), and FAF (1/10 studies). Image preprocessing techniques, such as standardization of size, orientation, and intensity, as well as noise reduction, were consistently applied to enhance model stability and training efficiency. However, 3 studies did not report critical image parameters, such as resolution, potentially limiting reproducibility. DL-based algorithms, including CNNs, were the primary methodologies used for GA detection. Cross-validation techniques, such as 5-fold and 10-fold methods, were used in half of the studies to assess model robustness, though 3 studies did not report validation strategies. AI, particularly DL algorithms, holds significant promise for the detection of GA using noninvasive imaging modalities. OCT, CFP, NIR, and FAF each demonstrated robust diagnostic potential, with performance metrics rivaling or exceeding human expertise.</p></sec><sec id="s4-3"><title>AI for GA Management With OCT, FAF, and NIR Images</title><p>A total of 20 studies (14,064 participants) were published between 2019 and 2025, focusing on themes such as GA segmentation, classification, quantification, and progression prediction. The research designs and geographic regions are diverse. The studies included retrospective analysis (9/20), model development (7/20), and prospective, comparative, or cross-sectional studies (4/20). Significant contributions came from China (6/20) and the United States (7/20), with additional studies from the United Kingdom (2/20), Australia (2/20), France (1/20), Israel (1/20), and Austria (1/20). The studies used a variety of imaging modalities to assess GA, including SD-OCT, FAF, NIR, SS-OCT, and 3D-OCT. DL algorithms demonstrated remarkable performance in GA management tasks. U-Net was the most commonly used architecture. Multimodal approaches combined FAF and NIR images with DL networks to improve segmentation accuracy. Performance metrics, such as DSC, Kappa, SEN, SPE, and accuracy, consistently showed strong diagnostic accuracy, with several studies achieving performance comparable to clinical experts.</p><p>Eleven studies with 6706 participants, published between 2021 and 2025, concentrated on the application of AI for predicting and segmenting GA lesions, as well as their growth and progression. The methodologies were diverse, including retrospective studies, model development studies, post hoc analyses, and clinical algorithm assessment. Participants or images were gathered from regions such as the United States, Australia, Switzerland, and various centers in China and the United States, ensuring broad geographic representation. Demographic information was reported in fewer than half of the studies, with a mean age ranging from 74 to 83 years. Imaging modalities, such as 3D-OCT, SD-OCT, NIR, and FAF, were obtained from devices including Bioptigen, Heidelberg Spectralis HRA+OCT, and Cirrus OCT. While the image preprocessing parameters were consistent across most studies, some did not specify image resolution. Multiview CNN architectures and advanced frameworks, such as the bi-directional long-short term memory networks, were used. DL algorithms exhibited excellent predictive capabilities, with multimodal approaches enabling individualized GA lesion growth prediction.</p></sec><sec id="s4-4"><title>Noninvasive Image Analysis Techniques for GA</title><p>GA, a late-stage form of dry AMD, is marked by the irreversible loss of photoreceptors, RPE, and choriocapillaris [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. The application of noninvasive imaging modalities has revolutionized the detection and management of GA. A comparative summary of AI performance across these modalities is provided in Table S2 in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. CFP serves as a standard initial assessment tool, useful for screening and early detection. It identifies GA lesions as visible underlying choroidal vessels and well-defined regions of RPE hypopigmentation [<xref ref-type="bibr" rid="ref66">66</xref>]. FAF imaging using a blue excitation wavelength (488 nm) visualizes metabolic changes at the level of photoreceptor or RPE complex and is practical in assessing GA lesion size and progression with hypo-autofluorescence [<xref ref-type="bibr" rid="ref67">67</xref>]. In contrast to nonatrophic areas, GA lesions on NIR (787-820 nm, longer than FAF) typically appear brighter and less harmful to the eye [<xref ref-type="bibr" rid="ref68">68</xref>]. In addition, NIR can help detect the boundaries of foveal lesions, where image contrast is lower on FAF [<xref ref-type="bibr" rid="ref68">68</xref>]. Recently, the Classification of Atrophy Meeting group recommended that atrophy in both patients with and those without neovascular AMD be defined based on specific drusen characteristics and other anatomical features, and it is most easily characterized by OCT [<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref70">70</xref>]. OCT stands out as the gold standard for GA detection and classification, providing high-resolution, cross-sectional, and en face images of the retina and choroid. SD-OCT is widely used in research and clinical trials, offering precise measurement of GA area and growth rates, while SS-OCT and 3D-OCT offer superior structural insights and potential for AI-driven automation [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref72">72</xref>]. Despite the higher cost and technical complexity of advanced OCT technologies, their detailed GA assessment capabilities make them indispensable tools in both clinical practice and research. Furthermore, OCT provides volumetric (3D) structural data, unlike the 2D en face projections of FAF, CFP, and NIR. It allows AI to learn not just the surface appearance of atrophy but also the cross-sectional structure alterations that define and precede GA [<xref ref-type="bibr" rid="ref3">3</xref>]. As technology advances, the integration of AI and further developments in imaging techniques are expected to enhance the utility of these modalities, overcoming current limitations and expanding their applications in ophthalmology.</p></sec><sec id="s4-5"><title>Advantages and Challenges of AI Architectures in Clinical Workflow</title><p>AI addresses critical limitations of traditional GA monitoring, such as labor-intensive manual grading and intergrader variability [<xref ref-type="bibr" rid="ref73">73</xref>]. Therefore, automated algorithms enable rapid, standardized analysis of large fundus image datasets, reducing clinician workload and enhancing reproducibility [<xref ref-type="bibr" rid="ref74">74</xref>]. Furthermore, our review revealed a clear trend in the choice of model architectures tailored to specific clinical tasks. A critical analysis of these architectures is provided in Table S3 in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. Interestingly, with the advancement of AI algorithm architectures, numerous studies have emerged that use these technologies to identify atrophy caused by various retinal diseases and to evaluate treatment outcomes through image analysis. Miere et al [<xref ref-type="bibr" rid="ref75">75</xref>] pretrained a DL-based classifier to automatically distinguish GA from atrophy secondary to inherited retinal diseases on FAF according to etiology, using 2 approaches (a trained and validated method and a 10-fold cross-validation method), achieving good accuracy and excellent area under the receiver operating characteristic (AUROC) values. In addition, a study examined the association between treatment and changes in photoreceptor lamina thickness in patients with GA secondary to AMD. The effect of pegcetacoplan on photoreceptors in OCT was supported by this post hoc analysis, which demonstrated that treatment with the drug was linked with reduced outer retinal thinning [<xref ref-type="bibr" rid="ref76">76</xref>]. Similarly, DL-based OCT image analysis assessed the therapeutic effectiveness of complement component 3 inhibition in delaying GA progression, with findings indicating decreased photoreceptor thinning and loss [<xref ref-type="bibr" rid="ref77">77</xref>]. Recent studies demonstrating the application of AI algorithms in imaging further validate their potential as reliable supplements to human expertise in the diagnosis and management of GA.</p></sec><sec id="s4-6"><title>Technical Challenges and Limitations</title><p>Despite the promising advancements in AI for GA detection and management, several technical challenges and limitations persist. A significant limitation of OCT-based AI models is their difficulty in distinguishing GA secondary to AMD from other forms of retinal atrophy; thus, the findings may not generalize to broader AMD cases or other retinal diseases, which limits their clinical applicability. In addition, images from different OCT devices show significant variability and imprecision, not offering good enough data acquisition [<xref ref-type="bibr" rid="ref74">74</xref>]. Another major challenge is the variability in algorithm performance caused by differences in training data, image acquisition protocols, and disease definitions. These differences reduce reproducibility and limit practical deployment. For instance, the absence of standardized reporting in AI studies can result in discrepancies when interpreting results and hinder comparisons between different models. Moreover, despite the high-performance metrics (eg, SEN, SPE, DSC&#x003E;0.85, and AUROC&#x003E;0.95) reported by many studies, methodological limitations remain. All diagnostic studies included in this review were assessed as high risk in at least 1 domain (10/10), only 1 GA assessment study (1/20) was evaluated as low risk across all domains, and several prediction studies (7/11) were ranked as high or unclear risk in at least 1 domain, primarily due to small or nonrepresentative datasets and a lack of detailed reporting on image preprocessing and external validation. These methodological shortcomings may lead to an overestimation of AI model performance and reduced overall robustness, thereby decreasing the generalizability of the findings and limiting confidence in their real-world applicability. Future studies should prioritize the use of larger, more diverse datasets and implement rigorous validation frameworks to enhance performance metrics (including detection, segmentation, quantification, and prediction accuracy) and conduct prospective, multicenter validation studies to improve clinical applicability and generalizability. Furthermore, adherence to established reporting guidelines for AI studies (such as the Standards for Reporting Diagnostic Accuracy-AI and Checklist for Artificial Intelligence in Medical Imaging [<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]) would improve comprehension and transparency, allow for more meaningful comparisons between systems, and facilitate meta-analyses.</p></sec><sec id="s4-7"><title>Real-World Implications and Research Contributions</title><p>Overall, despite some limitations, AI is constantly evolving and holds great potential for transformation in the health care sector [<xref ref-type="bibr" rid="ref80">80</xref>]. AI has the potential to accelerate existing forms of medical analysis; however, its algorithms require further testing to be fully trusted. Clinically, AI-based automated tools show strong potential to facilitate early detection, precise quantification, progression, and prediction of GA, thereby reducing the burden on retinal specialists and improving diagnostic consistency. Furthermore, DL algorithms have demonstrated effectiveness in identifying retinal image features associated with cognitive decline, dementia, Parkinson disease, and cardiovascular risk factors [<xref ref-type="bibr" rid="ref81">81</xref>]. These findings indicate that AI-based retinal images hold promise for transforming primary care and systemic disease management. Although most AI applications remain in the validation phase, the integration of AI with multimodal imaging, novel biomarkers, and emerging therapeutics holds promise for transforming clinical management paradigms in GA and advancing personalized medicine. Future efforts should focus on developing standardized datasets, improving algorithmic generalizability, and conducting real-world validation studies to fully integrate AI into routine ophthalmic practice.</p></sec><sec id="s4-8"><title>Conclusion</title><p>AI, especially DL-based algorithms, holds considerable promise for the detection and management of GA secondary to dry AMD, with performance comparable to trained experts. This systematic review synthesizes and critically appraises the current evidence, highlighting that AI&#x2019;s capabilities extend across GA management&#x2014;from initial detection and precise segmentation to the forecasting of lesion progression, which informs future research directions. Meanwhile, with the development of C5 inhibitors, AI-based noninvasive fundus image analysis is expected to detect, identify, and monitor GA at an early stage, thereby increasing the window of opportunity in the future. AI has strong potential to augment and streamline clinical workflows by offering automated, reproducible analysis that can assist clinicians in managing large volumes of imaging data; however, more studies are needed to further validate its effectiveness, repeatability, and accuracy.</p></sec></sec></body><back><ack><p>The authors declared that artificial intelligence (AI) or AI-assisted technologies were not used in the writing process of this manuscript.</p></ack><notes><sec><title>Funding</title><p>This research was funded by the Central High-Level Traditional Chinese Medicine Hospital Project of the Eye Hospital, China Academy of Chinese Medical Sciences (grant no GSP5-82); the National Natural Science Foundation of China (grant no 82274589); the Science and Technology Innovation Project, China Academy of Chinese Medical Sciences (grant no CI2023C008YG); the Institute-level Research Launch Fund of the Eye Hospital, China Academy of Chinese Medical Sciences (grant no kxy-202402); and the Special Project for the Director of the Business Research Office (grant no 2020YJSZX-2).</p></sec><sec><title>Data Availability</title><p>All data generated or analyzed during this study are included in this published article and its multimedia appendix files.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AMD</term><def><p>age-related macular degeneration</p></def></def-item><def-item><term id="abb3">AUROC</term><def><p>area under the receiver operating characteristic curve</p></def></def-item><def-item><term id="abb4">CFP</term><def><p>color fundus photography</p></def></def-item><def-item><term id="abb5">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb6">cRORA</term><def><p>complete retinal pigment epithelium and outer retinal atrophy</p></def></def-item><def-item><term id="abb7">DL</term><def><p>deep learning</p></def></def-item><def-item><term id="abb8">DSC</term><def><p>dice similarity coefficient</p></def></def-item><def-item><term id="abb9">FAF</term><def><p>fundus autofluorescence</p></def></def-item><def-item><term id="abb10">GA</term><def><p>geographic atrophy</p></def></def-item><def-item><term id="abb11">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb12">NIR</term><def><p>near-infrared reflectance</p></def></def-item><def-item><term id="abb13">OCT</term><def><p>optical coherence tomography</p></def></def-item><def-item><term id="abb14">PPV</term><def><p>positive predictive value</p></def></def-item><def-item><term id="abb15">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb16">PRISMA-DTA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analysis of Diagnostic Test Accuracy</p></def></def-item><def-item><term id="abb17">PRISMA-S</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses literature search extension</p></def></def-item><def-item><term id="abb18">PROBAST</term><def><p>Prediction Model Risk of Bias Assessment Tool</p></def></def-item><def-item><term id="abb19">QUADAS</term><def><p>Quality Assessment of Diagnostic Accuracy Studies</p></def></def-item><def-item><term id="abb20">RPE</term><def><p>retinal pigment epithelium</p></def></def-item><def-item><term id="abb21">SD-OCT</term><def><p>spectral domain optical coherence tomography</p></def></def-item><def-item><term id="abb22">SEN</term><def><p>sensitivity</p></def></def-item><def-item><term id="abb23">SPE</term><def><p>specificity</p></def></def-item><def-item><term id="abb24">SS-OCT</term><def><p>swept-source OCT</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thomas</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Mirza</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Gill</surname><given-names>MK</given-names> </name></person-group><article-title>Age-related macular degeneration</article-title><source>Med Clin North Am</source><year>2021</year><month>05</month><volume>105</volume><issue>3</issue><fpage>473</fpage><lpage>491</lpage><pub-id pub-id-type="doi">10.1016/j.mcna.2021.01.003</pub-id><pub-id pub-id-type="medline">33926642</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fleckenstein</surname><given-names>M</given-names> </name><name name-style="western"><surname>Schmitz-Valckenberg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chakravarthy</surname><given-names>U</given-names> </name></person-group><article-title>Age-related macular degeneration: a review</article-title><source>JAMA</source><year>2024</year><month>01</month><day>9</day><volume>331</volume><issue>2</issue><fpage>147</fpage><lpage>157</lpage><pub-id pub-id-type="doi">10.1001/jama.2023.26074</pub-id><pub-id pub-id-type="medline">38193957</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sadda</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Guymer</surname><given-names>R</given-names> </name><name name-style="western"><surname>Holz</surname><given-names>FG</given-names> </name><etal/></person-group><article-title>Consensus definition for atrophy associated with age-related macular degeneration on OCT: classification of atrophy report 3</article-title><source>Ophthalmology</source><year>2018</year><month>04</month><volume>125</volume><issue>4</issue><fpage>537</fpage><lpage>548</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2017.09.028</pub-id><pub-id pub-id-type="medline">29103793</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liakopoulos</surname><given-names>S</given-names> </name><name name-style="western"><surname>von der Emde</surname><given-names>L</given-names> </name><name name-style="western"><surname>Biller</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Ach</surname><given-names>T</given-names> </name><name name-style="western"><surname>Holz</surname><given-names>FG</given-names> </name></person-group><article-title>Geographic atrophy in age-related macular degeneration</article-title><source>Dtsch Arztebl Int</source><year>2025</year><month>02</month><day>7</day><volume>122</volume><issue>3</issue><fpage>82</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.3238/arztebl.m2025.0003</pub-id><pub-id pub-id-type="medline">39836449</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pramil</surname><given-names>V</given-names> </name><name name-style="western"><surname>de Sisternes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Omlor</surname><given-names>L</given-names> </name><etal/></person-group><article-title>A deep learning model for automated segmentation of geographic atrophy imaged using swept-source OCT</article-title><source>Ophthalmol Retina</source><year>2023</year><month>02</month><volume>7</volume><issue>2</issue><fpage>127</fpage><lpage>141</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.08.007</pub-id><pub-id pub-id-type="medline">35970318</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Heier</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Lad</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Holz</surname><given-names>FG</given-names> </name><etal/></person-group><article-title>Pegcetacoplan for the treatment of geographic atrophy secondary to age-related macular degeneration (OAKS and DERBY): two multicentre, randomised, double-masked, sham-controlled, phase 3 trials</article-title><source>Lancet</source><year>2023</year><month>10</month><day>21</day><volume>402</volume><issue>10411</issue><fpage>1434</fpage><lpage>1448</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(23)01520-9</pub-id><pub-id pub-id-type="medline">37865470</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khanani</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Staurenghi</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Efficacy and safety of avacincaptad pegol in patients with geographic atrophy (GATHER2): 12-month results from a randomised, double-masked, phase 3 trial</article-title><source>Lancet</source><year>2023</year><month>10</month><day>21</day><volume>402</volume><issue>10411</issue><fpage>1449</fpage><lpage>1458</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(23)01583-0</pub-id><pub-id pub-id-type="medline">37696275</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jaffe</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Westby</surname><given-names>K</given-names> </name><name name-style="western"><surname>Csaky</surname><given-names>KG</given-names> </name><etal/></person-group><article-title>C5 inhibitor avacincaptad pegol for geographic atrophy due to age-related macular degeneration: a randomized pivotal phase 2/3 trial</article-title><source>Ophthalmology</source><year>2021</year><month>04</month><volume>128</volume><issue>4</issue><fpage>576</fpage><lpage>586</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2020.08.027</pub-id><pub-id pub-id-type="medline">32882310</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schmidt-Erfurth</surname><given-names>U</given-names> </name><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Reiter</surname><given-names>GS</given-names> </name><etal/></person-group><article-title>Disease activity and therapeutic response to pegcetacoplan for geographic atrophy identified by deep learning-based analysis of OCT</article-title><source>Ophthalmology</source><year>2025</year><month>02</month><volume>132</volume><issue>2</issue><fpage>181</fpage><lpage>193</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2024.08.017</pub-id><pub-id pub-id-type="medline">39151755</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reiter</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Riedl</surname><given-names>S</given-names> </name><etal/></person-group><article-title>AI in the clinical management of GA: a novel therapeutic universe requires novel tools</article-title><source>Prog Retin Eye Res</source><year>2024</year><month>11</month><volume>103</volume><fpage>101305</fpage><pub-id pub-id-type="doi">10.1016/j.preteyeres.2024.101305</pub-id><pub-id pub-id-type="medline">39343193</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>R</given-names> </name><name name-style="western"><surname>Niu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Ji</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Rubin</surname><given-names>D</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Y</given-names> </name></person-group><article-title>Automated geographic atrophy segmentation for SD-OCT images based on two-stage learning model</article-title><source>Comput Biol Med</source><year>2019</year><month>02</month><volume>105</volume><fpage>102</fpage><lpage>111</lpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2018.12.013</pub-id><pub-id pub-id-type="medline">30605812</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kanagasingam</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Bhuiyan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Abr&#x00E0;moff</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>RT</given-names> </name><name name-style="western"><surname>Goldschmidt</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wong</surname><given-names>TY</given-names> </name></person-group><article-title>Progress on retinal image analysis for age related macular degeneration</article-title><source>Prog Retin Eye Res</source><year>2014</year><month>01</month><volume>38</volume><fpage>20</fpage><lpage>42</lpage><pub-id pub-id-type="doi">10.1016/j.preteyeres.2013.10.002</pub-id><pub-id pub-id-type="medline">24211245</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>X</given-names> </name><name name-style="western"><surname>Keenan</surname><given-names>TDL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Improving interpretability in machine diagnosis: detection of geographic atrophy in OCT scans</article-title><source>Ophthalmol Sci</source><year>2021</year><month>09</month><volume>1</volume><issue>3</issue><fpage>100038</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2021.100038</pub-id><pub-id pub-id-type="medline">36247813</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="web"><article-title>What is artificial intelligence? definition, uses, and types</article-title><source>Coursera</source><access-date>2025-11-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.coursera.org/articles/what-is-artificial-intelligence">https://www.coursera.org/articles/what-is-artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="web"><article-title>What is machine learning? definition, types, and examples</article-title><source>Coursera</source><access-date>2025-11-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.coursera.org/articles/what-is-machine-learning">https://www.coursera.org/articles/what-is-machine-learning</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><article-title>What is deep learning? definition, examples, and careers</article-title><source>Coursera</source><access-date>2025-11-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.coursera.org/articles/what-is-deep-learning">https://www.coursera.org/articles/what-is-deep-learning</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aggarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Martin</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of deep learning in medical imaging: a systematic review and meta-analysis</article-title><source>NPJ Digit Med</source><year>2021</year><month>04</month><day>7</day><volume>4</volume><issue>1</issue><fpage>65</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00438-z</pub-id><pub-id pub-id-type="medline">33828217</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Son</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>HD</given-names> </name><name name-style="western"><surname>Jung</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Park</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Park</surname><given-names>SJ</given-names> </name></person-group><article-title>Development and validation of deep learning models for screening multiple abnormal findings in retinal fundus images</article-title><source>Ophthalmology</source><year>2020</year><month>01</month><volume>127</volume><issue>1</issue><fpage>85</fpage><lpage>94</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2019.05.029</pub-id><pub-id pub-id-type="medline">31281057</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>MDF</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thombs</surname><given-names>BD</given-names> </name><etal/></person-group><article-title>Preferred Reporting Items for a Systematic Review and Meta-analysis of Diagnostic Test Accuracy Studies: the PRISMA-DTA statement</article-title><source>JAMA</source><year>2018</year><month>01</month><day>23</day><volume>319</volume><issue>4</issue><fpage>388</fpage><lpage>396</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id><pub-id pub-id-type="medline">29362800</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><month>03</month><day>29</day><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id><pub-id pub-id-type="medline">33782057</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lo</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Artificial intelligence for diagnosing exudative age-related macular degeneration</article-title><source>Cochrane Database Syst Rev</source><year>2024</year><month>10</month><day>17</day><volume>10</volume><issue>10</issue><fpage>CD015522</fpage><pub-id pub-id-type="doi">10.1002/14651858.CD015522.pub2</pub-id><pub-id pub-id-type="medline">39417312</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rethlefsen</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Kirtley</surname><given-names>S</given-names> </name><name name-style="western"><surname>Waffenschmidt</surname><given-names>S</given-names> </name><etal/></person-group><article-title>PRISMA-S: an extension to the PRISMA statement for Reporting Literature Searches in Systematic Reviews</article-title><source>Syst Rev</source><year>2021</year><month>01</month><day>26</day><volume>10</volume><issue>1</issue><fpage>39</fpage><pub-id pub-id-type="doi">10.1186/s13643-020-01542-z</pub-id><pub-id pub-id-type="medline">33499930</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>NN</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Cao</surname><given-names>MF</given-names> </name></person-group><article-title>Artificial intelligence for the detection of glaucoma with SD-OCT images: a systematic review and Meta-analysis</article-title><source>Int J Ophthalmol</source><year>2024</year><volume>17</volume><issue>3</issue><fpage>408</fpage><lpage>419</lpage><pub-id pub-id-type="doi">10.18240/ijo.2024.03.02</pub-id><pub-id pub-id-type="medline">38721504</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Ashrafian</surname><given-names>H</given-names> </name><name name-style="western"><surname>Rose</surname><given-names>S</given-names> </name><etal/></person-group><article-title>A quality assessment tool for artificial intelligence-centered diagnostic test accuracy studies: QUADAS-AI</article-title><source>Nat Med</source><year>2021</year><month>10</month><volume>27</volume><issue>10</issue><fpage>1663</fpage><lpage>1665</lpage><pub-id pub-id-type="doi">10.1038/s41591-021-01517-0</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohammad-Rahimi</surname><given-names>H</given-names> </name><name name-style="western"><surname>Motamedian</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Rohban</surname><given-names>MH</given-names> </name><etal/></person-group><article-title>Deep learning for caries detection: a systematic review</article-title><source>J Dent (Shiraz)</source><year>2022</year><month>07</month><volume>122</volume><fpage>104115</fpage><pub-id pub-id-type="doi">10.1016/j.jdent.2022.104115</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name><name name-style="western"><surname>Wolff</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Riley</surname><given-names>RD</given-names> </name><etal/></person-group><article-title>PROBAST: a tool to assess risk of bias and applicability of prediction model studies: explanation and elaboration</article-title><source>Ann Intern Med</source><year>2019</year><month>01</month><day>1</day><volume>170</volume><issue>1</issue><fpage>W1</fpage><lpage>W33</lpage><pub-id pub-id-type="doi">10.7326/M18-1377</pub-id><pub-id pub-id-type="medline">30596876</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chiang</surname><given-names>JN</given-names> </name><name name-style="western"><surname>Corradetti</surname><given-names>G</given-names> </name><name name-style="western"><surname>Nittala</surname><given-names>MG</given-names> </name><etal/></person-group><article-title>Automated identification of incomplete and complete retinal epithelial pigment and outer retinal atrophy using machine learning</article-title><source>Ophthalmol Retina</source><year>2023</year><month>02</month><volume>7</volume><issue>2</issue><fpage>118</fpage><lpage>126</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.08.016</pub-id><pub-id pub-id-type="medline">35995411</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elsawy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Keenan</surname><given-names>TDL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Deep-GA-Net for accurate and explainable detection of geographic atrophy on OCT scans</article-title><source>Ophthalmol Sci</source><year>2023</year><month>12</month><volume>3</volume><issue>4</issue><fpage>100311</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2023.100311</pub-id><pub-id pub-id-type="medline">37304045</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yao</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>Deep learning approaches for detecting of nascent geographic atrophy in age-related macular degeneration</article-title><source>Ophthalmol Sci</source><year>2024</year><volume>4</volume><issue>3</issue><fpage>100428</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2023.100428</pub-id><pub-id pub-id-type="medline">38284101</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Treder</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lauermann</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Eter</surname><given-names>N</given-names> </name></person-group><article-title>Deep learning-based detection and classification of geographic atrophy using a deep convolutional neural network classifier</article-title><source>Graefes Arch Clin Exp Ophthalmol</source><year>2018</year><month>11</month><volume>256</volume><issue>11</issue><fpage>2053</fpage><lpage>2060</lpage><pub-id pub-id-type="doi">10.1007/s00417-018-4098-2</pub-id><pub-id pub-id-type="medline">30091055</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kalra</surname><given-names>G</given-names> </name><name name-style="western"><surname>Cetin</surname><given-names>H</given-names> </name><name name-style="western"><surname>Whitney</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Machine learning-based automated detection and quantification of geographic atrophy and hypertransmission defects using spectral domain optical coherence tomography</article-title><source>J Pers Med</source><year>2022</year><month>12</month><day>24</day><volume>13</volume><issue>1</issue><fpage>37</fpage><pub-id pub-id-type="doi">10.3390/jpm13010037</pub-id><pub-id pub-id-type="medline">36675697</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Derradji</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Mosinska</surname><given-names>A</given-names> </name><name name-style="western"><surname>Apostolopoulos</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ciller</surname><given-names>C</given-names> </name><name name-style="western"><surname>De Zanet</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mantel</surname><given-names>I</given-names> </name></person-group><article-title>Fully-automated atrophy segmentation in dry age-related macular degeneration in optical coherence tomography</article-title><source>Sci Rep</source><year>2021</year><month>11</month><day>8</day><volume>11</volume><issue>1</issue><fpage>21893</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-01227-0</pub-id><pub-id pub-id-type="medline">34751189</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fineberg</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tiosano</surname><given-names>A</given-names> </name><name name-style="western"><surname>Golan</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Near infrared reflectance imaging for the assessment of geographic atrophy using deep learning</article-title><source>Retina</source><year>2025</year><month>07</month><day>15</day><pub-id pub-id-type="doi">10.1097/IAE.0000000000004614</pub-id><pub-id pub-id-type="medline">40694826</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Keenan</surname><given-names>TD</given-names> </name><name name-style="western"><surname>Dharssi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>A deep learning approach for automated detection of geographic atrophy from color fundus photographs</article-title><source>Ophthalmology</source><year>2019</year><month>11</month><volume>126</volume><issue>11</issue><fpage>1533</fpage><lpage>1540</lpage><pub-id pub-id-type="doi">10.1016/j.ophtha.2019.06.005</pub-id><pub-id pub-id-type="medline">31358385</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sarao</surname><given-names>V</given-names> </name><name name-style="western"><surname>Veritti</surname><given-names>D</given-names> </name><name name-style="western"><surname>De Nardin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Misciagna</surname><given-names>M</given-names> </name><name name-style="western"><surname>Foresti</surname><given-names>G</given-names> </name><name name-style="western"><surname>Lanzetta</surname><given-names>P</given-names> </name></person-group><article-title>Explainable artificial intelligence model for the detection of geographic atrophy using colour retinal photographs</article-title><source>BMJ Open Ophthalmol</source><year>2023</year><month>12</month><day>6</day><volume>8</volume><issue>1</issue><fpage>e001411</fpage><pub-id pub-id-type="doi">10.1136/bmjophth-2023-001411</pub-id><pub-id pub-id-type="medline">38057106</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Vente</surname><given-names>C</given-names> </name><name name-style="western"><surname>Valmaggia</surname><given-names>P</given-names> </name><name name-style="western"><surname>Hoyng</surname><given-names>CB</given-names> </name><etal/></person-group><article-title>Generalizable deep learning for the detection of incomplete and complete retinal pigment epithelium and outer retinal atrophy: a MACUSTAR report</article-title><source>Transl Vis Sci Technol</source><year>2024</year><month>09</month><day>3</day><volume>13</volume><issue>9</issue><fpage>11</fpage><pub-id pub-id-type="doi">10.1167/tvst.13.9.11</pub-id><pub-id pub-id-type="medline">39235402</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ji</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>X</given-names> </name><name name-style="western"><surname>Leng</surname><given-names>T</given-names> </name><name name-style="western"><surname>Rubin</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name></person-group><article-title>Mirrored X-Net: Joint classification and contrastive learning for weakly supervised GA segmentation in SD-OCT</article-title><source>Pattern Recognit DAGM</source><year>2024</year><month>09</month><volume>153</volume><fpage>110507</fpage><pub-id pub-id-type="doi">10.1016/j.patcog.2024.110507</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ji</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Niu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Leng</surname><given-names>T</given-names> </name><name name-style="western"><surname>Rubin</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Q</given-names> </name></person-group><article-title>MS-CAM: Multi-Scale Class Activation Maps for weakly-supervised segmentation of geographic atrophy lesions in SD-OCT images</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>12</month><volume>24</volume><issue>12</issue><fpage>3443</fpage><lpage>3455</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2020.2999588</pub-id><pub-id pub-id-type="medline">32750923</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Royer</surname><given-names>C</given-names> </name><name name-style="western"><surname>Sublime</surname><given-names>J</given-names> </name><name name-style="western"><surname>Rossant</surname><given-names>F</given-names> </name><name name-style="western"><surname>Paques</surname><given-names>M</given-names> </name></person-group><article-title>Unsupervised approaches for the segmentation of dry ARMD lesions in eye fundus cSLO images</article-title><source>J Imaging</source><year>2021</year><month>08</month><day>11</day><volume>7</volume><issue>8</issue><fpage>143</fpage><pub-id pub-id-type="doi">10.3390/jimaging7080143</pub-id><pub-id pub-id-type="medline">34460779</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Spaide</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Patil</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Geographic atrophy segmentation using multimodal deep learning</article-title><source>Trans Vis Sci Tech</source><year>2023</year><month>07</month><day>3</day><volume>12</volume><issue>7</issue><fpage>10</fpage><pub-id pub-id-type="doi">10.1167/tvst.12.7.10</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Spaide</surname><given-names>T</given-names> </name><name name-style="western"><surname>Rajesh</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Gim</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Estimating uncertainty of geographic atrophy segmentations with Bayesian deep learning</article-title><source>Ophthalmol Sci</source><year>2025</year><volume>5</volume><issue>1</issue><fpage>100587</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2024.100587</pub-id><pub-id pub-id-type="medline">39380882</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williamson</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Struyven</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Antaki</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Artificial intelligence to facilitate clinical trial recruitment in age-related macular degeneration</article-title><source>Ophthalmol Sci</source><year>2024</year><volume>4</volume><issue>6</issue><fpage>100566</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2024.100566</pub-id><pub-id pub-id-type="medline">39139546</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arslan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Samarasinghe</surname><given-names>G</given-names> </name><name name-style="western"><surname>Sowmya</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Deep learning applied to automated segmentation of geographic atrophy in fundus autofluorescence images</article-title><source>Trans Vis Sci Tech</source><year>2021</year><month>07</month><day>1</day><volume>10</volume><issue>8</issue><fpage>2</fpage><pub-id pub-id-type="doi">10.1167/tvst.10.8.2</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Al-khersan</surname><given-names>H</given-names> </name><name name-style="western"><surname>Oakley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Russakoff</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Automated deep learning-based 3D-to-2D segmentation of geographic atrophy in optical coherence tomography data</article-title><source>medRxiv</source><comment>Preprint posted online on 2025</comment><pub-id pub-id-type="doi">10.1101/2025.07.07.25330902</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>J</given-names> </name></person-group><article-title>Dual-branch image projection network for geographic atrophy segmentation in retinal OCT images</article-title><source>Sci Rep</source><year>2025</year><volume>15</volume><issue>1</issue><pub-id pub-id-type="doi">10.1038/s41598-025-90709-6</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Safai</surname><given-names>A</given-names> </name><name name-style="western"><surname>Froines</surname><given-names>C</given-names> </name><name name-style="western"><surname>Slater</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Quantifying geographic atrophy in age-related macular degeneration: a comparative analysis across 12 deep learning models</article-title><source>Invest Ophthalmol Vis Sci</source><year>2024</year><month>07</month><day>1</day><volume>65</volume><issue>8</issue><fpage>42</fpage><pub-id pub-id-type="doi">10.1167/iovs.65.8.42</pub-id><pub-id pub-id-type="medline">39046755</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vogl</surname><given-names>WD</given-names> </name><name name-style="western"><surname>Riedl</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Predicting topographic disease progression and treatment response of pegcetacoplan in geographic atrophy quantified by deep learning</article-title><source>Ophthalmol Retina</source><year>2023</year><month>01</month><volume>7</volume><issue>1</issue><fpage>4</fpage><lpage>13</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.08.003</pub-id><pub-id pub-id-type="medline">35948209</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Two-step hierarchical neural network for classification of dry age-related macular degeneration using optical coherence tomography images</article-title><source>Front Med</source><year>2023</year><volume>10</volume><fpage>1221453</fpage><pub-id pub-id-type="doi">10.3389/fmed.2023.1221453</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>B</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Dry age-related macular degeneration classification from optical coherence tomography images based on ensemble deep learning architecture</article-title><source>Front Med</source><year>2024</year><volume>11</volume><fpage>1438768</fpage><pub-id pub-id-type="doi">10.3389/fmed.2024.1438768</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Siraz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kamanda</surname><given-names>H</given-names> </name><name name-style="western"><surname>Gholami</surname><given-names>S</given-names> </name><name name-style="western"><surname>Nabil</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Yee Ong</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Alam</surname><given-names>MN</given-names> </name></person-group><article-title>Multi-class classification of central and non-central geographic atrophy using optical coherence tomography</article-title><source>medRxiv</source><comment>Preprint posted online on  May 28, 2025</comment><pub-id pub-id-type="doi">10.1101/2025.05.27.25328446</pub-id><pub-id pub-id-type="medline">40492092</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Szeskin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Yehuda</surname><given-names>R</given-names> </name><name name-style="western"><surname>Shmueli</surname><given-names>O</given-names> </name><name name-style="western"><surname>Levy</surname><given-names>J</given-names> </name><name name-style="western"><surname>Joskowicz</surname><given-names>L</given-names> </name></person-group><article-title>A column-based deep learning method for the detection and quantification of atrophy associated with AMD in OCT scans</article-title><source>Med Image Anal</source><year>2021</year><month>08</month><volume>72</volume><fpage>102130</fpage><pub-id pub-id-type="doi">10.1016/j.media.2021.102130</pub-id><pub-id pub-id-type="medline">34198041</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Automatic geographic atrophy segmentation using optical attenuation in OCT scans with deep learning</article-title><source>Biomed Opt Express</source><year>2022</year><month>03</month><day>1</day><volume>13</volume><issue>3</issue><fpage>1328</fpage><lpage>1343</lpage><pub-id pub-id-type="doi">10.1364/BOE.449314</pub-id><pub-id pub-id-type="medline">35414972</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>G</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Liefers</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Clinically relevant deep learning for detection and quantification of geographic atrophy from optical coherence tomography: a model development and external validation study</article-title><source>Lancet Digit Health</source><year>2021</year><month>10</month><volume>3</volume><issue>10</issue><fpage>e665</fpage><lpage>e675</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(21)00134-5</pub-id><pub-id pub-id-type="medline">34509423</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Merle</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Guymer</surname><given-names>RH</given-names> </name><name name-style="western"><surname>Chia</surname><given-names>MA</given-names> </name><etal/></person-group><article-title>Mapping the impact: AI-driven quantification of geographic atrophy on OCT scans and its association with visual sensitivity loss</article-title><source>Br J Ophthalmol</source><year>2025</year><month>09</month><day>23</day><volume>109</volume><issue>10</issue><fpage>1187</fpage><lpage>1193</lpage><pub-id pub-id-type="doi">10.1136/bjo-2024-326603</pub-id><pub-id pub-id-type="medline">40490296</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dow</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Jeong</surname><given-names>HK</given-names> </name><name name-style="western"><surname>Katz</surname><given-names>EA</given-names> </name><etal/></person-group><article-title>A deep-learning algorithm to predict short-term progression to geographic atrophy on spectral-domain optical coherence tomography</article-title><source>JAMA Ophthalmol</source><year>2023</year><month>11</month><day>1</day><volume>141</volume><issue>11</issue><fpage>1052</fpage><lpage>1061</lpage><pub-id pub-id-type="doi">10.1001/jamaophthalmol.2023.4659</pub-id><pub-id pub-id-type="medline">37856139</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gigon</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mosinska</surname><given-names>A</given-names> </name><name name-style="western"><surname>Montesel</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Personalized atrophy risk mapping in age-related macular degeneration</article-title><source>Transl Vis Sci Technol</source><year>2021</year><month>11</month><day>1</day><volume>10</volume><issue>13</issue><fpage>18</fpage><pub-id pub-id-type="doi">10.1167/tvst.10.13.18</pub-id><pub-id pub-id-type="medline">34767623</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cluceru</surname><given-names>J</given-names> </name><name name-style="western"><surname>Anegondi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>Topographic clinical insights from deep learning-based geographic atrophy progression prediction</article-title><source>Transl Vis Sci Technol</source><year>2024</year><month>08</month><day>1</day><volume>13</volume><issue>8</issue><fpage>6</fpage><pub-id pub-id-type="doi">10.1167/tvst.13.8.6</pub-id><pub-id pub-id-type="medline">39102242</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Anegondi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Steffen</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Deep learning to predict geographic atrophy area and growth rate from multimodal imaging</article-title><source>Ophthalmol Retina</source><year>2023</year><month>03</month><volume>7</volume><issue>3</issue><fpage>243</fpage><lpage>252</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.08.018</pub-id><pub-id pub-id-type="medline">36038116</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Salvi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cluceru</surname><given-names>J</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>Deep learning to predict the future growth of geographic atrophy from fundus autofluorescence</article-title><source>Ophthalmol Sci</source><year>2025</year><volume>5</volume><issue>2</issue><fpage>100635</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2024.100635</pub-id><pub-id pub-id-type="medline">39758130</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yoshida</surname><given-names>K</given-names> </name><name name-style="western"><surname>Anegondi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pely</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Deep learning approaches to predict geographic atrophy progression using three-dimensional OCT imaging</article-title><source>Trans Vis Sci Tech</source><year>2025</year><month>02</month><day>3</day><volume>14</volume><issue>2</issue><fpage>11</fpage><pub-id pub-id-type="doi">10.1167/tvst.14.2.11</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ji</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>An integrated time adaptive geographic atrophy prediction model for SD-OCT images</article-title><source>Med Image Anal</source><year>2021</year><month>02</month><volume>68</volume><fpage>101893</fpage><pub-id pub-id-type="doi">10.1016/j.media.2020.101893</pub-id><pub-id pub-id-type="medline">33260118</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Vejalla</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Reverse engineering for reconstructing baseline features of dry age-related macular degeneration in optical coherence tomography</article-title><source>Sci Rep</source><year>2020</year><volume>12</volume><issue>1</issue><fpage>22620</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-27140-8</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reiter</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Lachinov</surname><given-names>D</given-names> </name><name name-style="western"><surname>B&#x00FC;hl</surname><given-names>W</given-names> </name><etal/></person-group><article-title>A novel management challenge in age-related macular degeneration: artificial intelligence and expert prediction of geographic atrophy</article-title><source>Ophthalmol Retina</source><year>2025</year><month>05</month><volume>9</volume><issue>5</issue><fpage>421</fpage><lpage>430</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2024.10.029</pub-id><pub-id pub-id-type="medline">39522752</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lachinov</surname><given-names>D</given-names> </name><name name-style="western"><surname>Riedl</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Clinical validation for automated geographic atrophy monitoring on OCT under complement inhibitory treatment</article-title><source>Sci Rep</source><year>2023</year><month>04</month><day>29</day><volume>13</volume><issue>1</issue><fpage>7028</fpage><pub-id pub-id-type="doi">10.1038/s41598-023-34139-2</pub-id><pub-id pub-id-type="medline">37120456</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lachinov</surname><given-names>D</given-names> </name><name name-style="western"><surname>Reiter</surname><given-names>GS</given-names> </name><etal/></person-group><article-title>Deep learning-based prediction of individual geographic atrophy progression from a single baseline OCT</article-title><source>Ophthalmol Sci</source><year>2024</year><volume>4</volume><issue>4</issue><fpage>100466</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2024.100466</pub-id><pub-id pub-id-type="medline">38591046</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>F</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>H</given-names> </name><etal/></person-group><article-title>ADAM challenge: detecting age-related macular degeneration from fundus images</article-title><source>IEEE Trans Med Imaging</source><year>2022</year><month>10</month><volume>41</volume><issue>10</issue><fpage>2828</fpage><lpage>2847</lpage><pub-id pub-id-type="doi">10.1109/TMI.2022.3172773</pub-id><pub-id pub-id-type="medline">35507621</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mukherjee</surname><given-names>S</given-names> </name><name name-style="western"><surname>Arunachalam</surname><given-names>T</given-names> </name><name name-style="western"><surname>Duic</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Structure-function relationships in geographic atrophy based on mesopic microperimetry, fundus autofluorescence, and optical coherence tomography</article-title><source>Transl Vis Sci Technol</source><year>2025</year><month>02</month><day>3</day><volume>14</volume><issue>2</issue><fpage>7</fpage><pub-id pub-id-type="doi">10.1167/tvst.14.2.7</pub-id><pub-id pub-id-type="medline">39908134</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Keilhauer</surname><given-names>CN</given-names> </name><name name-style="western"><surname>Delori</surname><given-names>FC</given-names> </name></person-group><article-title>Near-infrared autofluorescence imaging of the fundus: visualization of ocular melanin</article-title><source>Invest Ophthalmol Vis Sci</source><year>2006</year><month>08</month><volume>47</volume><issue>8</issue><fpage>3556</fpage><lpage>3564</lpage><pub-id pub-id-type="doi">10.1167/iovs.06-0122</pub-id><pub-id pub-id-type="medline">16877429</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jaffe</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Chakravarthy</surname><given-names>U</given-names> </name><name name-style="western"><surname>Freund</surname><given-names>KB</given-names> </name><etal/></person-group><article-title>Imaging features associated with progression to geographic atrophy in age-related macular degeneration: Classification of Atrophy Meeting Report 5</article-title><source>Ophthalmol Retina</source><year>2021</year><month>09</month><volume>5</volume><issue>9</issue><fpage>855</fpage><lpage>867</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2020.12.009</pub-id><pub-id pub-id-type="medline">33348085</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Clevenger</surname><given-names>L</given-names> </name><name name-style="western"><surname>Rachitskaya</surname><given-names>A</given-names> </name></person-group><article-title>Identifying geographic atrophy</article-title><source>Curr Opin Ophthalmol</source><year>2023</year><month>05</month><day>1</day><volume>34</volume><issue>3</issue><fpage>195</fpage><lpage>202</lpage><pub-id pub-id-type="doi">10.1097/ICU.0000000000000952</pub-id><pub-id pub-id-type="medline">36943458</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Velaga</surname><given-names>SB</given-names> </name><name name-style="western"><surname>Nittala</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Hariri</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sadda</surname><given-names>SR</given-names> </name></person-group><article-title>Correlation between fundus autofluorescence and en face OCT measurements of geographic atrophy</article-title><source>Ophthalmol Retina</source><year>2022</year><month>08</month><volume>6</volume><issue>8</issue><fpage>676</fpage><lpage>683</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.03.017</pub-id><pub-id pub-id-type="medline">35338026</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Correlations between choriocapillaris and choroidal measurements and the growth of geographic atrophy using swept source OCT imaging</article-title><source>Am J Ophthalmol</source><year>2021</year><month>04</month><volume>224</volume><fpage>321</fpage><lpage>331</lpage><pub-id pub-id-type="doi">10.1016/j.ajo.2020.12.015</pub-id><pub-id pub-id-type="medline">33359715</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wei</surname><given-names>W</given-names> </name><name name-style="western"><surname>Anantharanjit</surname><given-names>R</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>RP</given-names> </name><name name-style="western"><surname>Cordeiro</surname><given-names>MF</given-names> </name></person-group><article-title>Detection of macular atrophy in age-related macular degeneration aided by artificial intelligence</article-title><source>Expert Rev Mol Diagn</source><year>2023</year><month>06</month><volume>23</volume><issue>6</issue><fpage>485</fpage><lpage>494</lpage><pub-id pub-id-type="doi">10.1080/14737159.2023.2208751</pub-id><pub-id pub-id-type="medline">37144908</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Enzendorfer</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Schmidt-Erfurth</surname><given-names>U</given-names> </name></person-group><article-title>Artificial intelligence for geographic atrophy: pearls and pitfalls</article-title><source>Curr Opin Ophthalmol</source><year>2024</year><month>11</month><day>1</day><volume>35</volume><issue>6</issue><fpage>455</fpage><lpage>462</lpage><pub-id pub-id-type="doi">10.1097/ICU.0000000000001085</pub-id><pub-id pub-id-type="medline">39259599</pub-id></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Miere</surname><given-names>A</given-names> </name><name name-style="western"><surname>Capuano</surname><given-names>V</given-names> </name><name name-style="western"><surname>Kessler</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Deep learning-based classification of retinal atrophy using fundus autofluorescence imaging</article-title><source>Comput Biol Med</source><year>2021</year><month>03</month><volume>130</volume><fpage>104198</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.104198</pub-id><pub-id pub-id-type="medline">33383315</pub-id></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pfau</surname><given-names>M</given-names> </name><name name-style="western"><surname>Schmitz-Valckenberg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ribeiro</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Association of complement C3 inhibitor pegcetacoplan with reduced photoreceptor degeneration beyond areas of geographic atrophy</article-title><source>Sci Rep</source><year>2022</year><month>10</month><day>25</day><volume>12</volume><issue>1</issue><fpage>17870</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-22404-9</pub-id><pub-id pub-id-type="medline">36284220</pub-id></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Riedl</surname><given-names>S</given-names> </name><name name-style="western"><surname>Vogl</surname><given-names>WD</given-names> </name><name name-style="western"><surname>Mai</surname><given-names>J</given-names> </name><etal/></person-group><article-title>The effect of pegcetacoplan treatment on photoreceptor maintenance in geographic atrophy monitored by artificial intelligence-based OCT analysis</article-title><source>Ophthalmol Retina</source><year>2022</year><month>11</month><volume>6</volume><issue>11</issue><fpage>1009</fpage><lpage>1018</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.05.030</pub-id><pub-id pub-id-type="medline">35667569</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Guni</surname><given-names>A</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><etal/></person-group><article-title>The STARD-AI reporting guideline for diagnostic accuracy studies using artificial intelligence</article-title><source>Nat Med</source><year>2025</year><month>10</month><volume>31</volume><issue>10</issue><fpage>3283</fpage><lpage>3289</lpage><pub-id pub-id-type="doi">10.1038/s41591-025-03953-8</pub-id><pub-id pub-id-type="medline">40954311</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mongan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Moy</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kahn</surname><given-names>CE</given-names> </name></person-group><article-title>Checklist for Artificial Intelligence in Medical Imaging (CLAIM): a guide for authors and reviewers</article-title><source>Radiol Artif Intell</source><year>2020</year><month>03</month><volume>2</volume><issue>2</issue><fpage>e200029</fpage><pub-id pub-id-type="doi">10.1148/ryai.2020200029</pub-id><pub-id pub-id-type="medline">33937821</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>YX</given-names> </name><name name-style="western"><surname>Zeng</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Artificial intelligence-enhanced retinal imaging as a biomarker for systemic diseases</article-title><source>Theranostics</source><year>2025</year><volume>15</volume><issue>8</issue><fpage>3223</fpage><lpage>3233</lpage><pub-id pub-id-type="doi">10.7150/thno.100786</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tan</surname><given-names>YY</given-names> </name><name name-style="western"><surname>Kang</surname><given-names>HG</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>CJ</given-names> </name><etal/></person-group><article-title>Correction: prognostic potentials of AI in ophthalmology: systemic disease forecasting via retinal imaging</article-title><source>Eye and Vis</source><year>2024</year><volume>11</volume><issue>1</issue><pub-id pub-id-type="doi">10.1186/s40662-024-00399-w</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search strategies in all databases.</p><media xlink:href="jmir_v27i1e81328_app1.docx" xlink:title="DOCX File, 14 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Summary of studies on geographic atrophy (GA) detection, GA assessment, GA prediction, quality assessment using Quality Assessment of Diagnostic Accuracy Studies&#x2013;Artificial Intelligence (QUADAS-AI), and risk of bias evaluation using the Prediction Model Risk of Bias Assessment Tool (PROBAST).</p><media xlink:href="jmir_v27i1e81328_app2.xlsx" xlink:title="XLSX File, 50 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Risk of bias and applicability assessment for geographic atrophy (GA) prediction studies, comparative performance of artificial intelligence (AI) models and features across imaging modalities for GA detection and management, and comparison of AI model architectures in GA detection and management.</p><media xlink:href="jmir_v27i1e81328_app3.xlsx" xlink:title="XLSX File, 15 KB"/></supplementary-material><supplementary-material id="app4"><label>Checklist 1</label><p>PRISMA 2020 checklist.</p><media xlink:href="jmir_v27i1e81328_app4.docx" xlink:title="DOCX File, 29 KB"/></supplementary-material><supplementary-material id="app5"><label>Checklist 2</label><p>PRISMA-S checklist.</p><media xlink:href="jmir_v27i1e81328_app5.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material></app-group></back></article>