<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v23i9e27414</article-id>
      <article-id pub-id-type="pmid">34236992</article-id>
      <article-id pub-id-type="doi">10.2196/27414</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Accuracy of Using Generative Adversarial Networks for Glaucoma Detection: Systematic Review and Bibliometric Analysis</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kukafka</surname>
            <given-names>Rita</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Ahmed Kamal</surname>
            <given-names>Manar</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mahmoud</surname>
            <given-names>Randa Salah</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lim</surname>
            <given-names>Gilbert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Naser</surname>
            <given-names>Ahmed</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Gabashvili</surname>
            <given-names>Irene</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Saeed</surname>
            <given-names>Ali Q</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Center for Cyber Security, Faculty of Information Science &#38; Technology</institution>
            <institution>Universiti Kebangsaan Malaysia</institution>
            <addr-line>Bangi Street, Bangi</addr-line>
            <addr-line>Selangor, 43600</addr-line>
            <country>Malaysia</country>
            <phone>60 7740870504</phone>
            <email>ali.qasim@ntu.edu.iq</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2276-3776</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Sheikh Abdullah</surname>
            <given-names>Siti Norul Huda</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2602-7805</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Che-Hamzah</surname>
            <given-names>Jemaima</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3571-2281</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Abdul Ghani</surname>
            <given-names>Ahmad Tarmizi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9765-1835</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Center for Cyber Security, Faculty of Information Science &#38; Technology</institution>
        <institution>Universiti Kebangsaan Malaysia</institution>
        <addr-line>Selangor</addr-line>
        <country>Malaysia</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Computer Center</institution>
        <institution>Northern Technical University</institution>
        <addr-line>Ninevah</addr-line>
        <country>Iraq</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Ophthalmology</institution>
        <institution>Faculty of Medicine</institution>
        <institution>Universiti Kebangsaan Malaysia</institution>
        <addr-line>Cheras, Kuala Lumpur</addr-line>
        <country>Malaysia</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ali Q Saeed <email>ali.qasim@ntu.edu.iq</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>9</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>21</day>
        <month>9</month>
        <year>2021</year>
      </pub-date>
      <volume>23</volume>
      <issue>9</issue>
      <elocation-id>e27414</elocation-id>
      <history>
        <date date-type="received">
          <day>26</day>
          <month>1</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>6</day>
          <month>4</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>11</day>
          <month>5</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>5</day>
          <month>7</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Ali Q Saeed, Siti Norul Huda Sheikh Abdullah, Jemaima Che-Hamzah, Ahmad Tarmizi Abdul Ghani. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 21.09.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2021/9/e27414" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Glaucoma leads to irreversible blindness. Globally, it is the second most common retinal disease that leads to blindness, slightly less common than cataracts. Therefore, there is a great need to avoid the silent growth of this disease using recently developed generative adversarial networks (GANs).</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This paper aims to introduce a GAN technology for the diagnosis of eye disorders, particularly glaucoma. This paper illustrates deep adversarial learning as a potential diagnostic tool and the challenges involved in its implementation. This study describes and analyzes many of the pitfalls and problems that researchers will need to overcome to implement this kind of technology.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>To organize this review comprehensively, articles and reviews were collected using the following keywords: (“Glaucoma,” “optic disc,” “blood vessels”) and (“receptive field,” “loss function,” “GAN,” “Generative Adversarial Network,” “Deep learning,” “CNN,” “convolutional neural network” OR encoder). The records were identified from 5 highly reputed databases: IEEE Xplore, Web of Science, Scopus, ScienceDirect, and PubMed. These libraries broadly cover the technical and medical literature. Publications within the last 5 years, specifically 2015-2020, were included because the target GAN technique was invented only in 2014 and the publishing date of the collected papers was not earlier than 2016. Duplicate records were removed, and irrelevant titles and abstracts were excluded. In addition, we excluded papers that used optical coherence tomography and visual field images, except for those with 2D images. A large-scale systematic analysis was performed, and then a summarized taxonomy was generated. Furthermore, the results of the collected articles were summarized and a visual representation of the results was presented on a T-shaped matrix diagram. This study was conducted between March 2020 and November 2020.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We found 59 articles after conducting a comprehensive survey of the literature. Among the 59 articles, 30 present actual attempts to synthesize images and provide accurate segmentation/classification using single/multiple landmarks or share certain experiences. The other 29 articles discuss the recent advances in GANs, do practical experiments, and contain analytical studies of retinal disease.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Recent deep learning techniques, namely GANs, have shown encouraging performance in retinal disease detection. Although this methodology involves an extensive computing budget and optimization process, it saturates the greedy nature of deep learning techniques by synthesizing images and solves major medical issues. This paper contributes to this research field by offering a thorough analysis of existing works, highlighting current limitations, and suggesting alternatives to support other researchers and participants in further improving and strengthening future work. Finally, new directions for this research have been identified.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>glaucoma</kwd>
        <kwd>generative adversarial network</kwd>
        <kwd>deep learning</kwd>
        <kwd>systematic literature review</kwd>
        <kwd>retinal disease</kwd>
        <kwd>blood vessels</kwd>
        <kwd>optic disc</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Medical and Statistical Overview</title>
        <p>Blindness and visual impairments often result from cataracts, age-related macular degeneration, and glaucoma [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Glaucoma is a neurodegenerative disease that damages the optic nerve and causes visual field loss [<xref ref-type="bibr" rid="ref3">3</xref>]. As it is an asymptomatic disease, it is known as the silent thief of sight [<xref ref-type="bibr" rid="ref4">4</xref>], and patients are unaware of the infection until their vision is irreversibly impaired. Among affected individuals, 50% are ignorant of the disorder [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Early phases of glaucoma have no symptoms or visual field changes [<xref ref-type="bibr" rid="ref8">8</xref>]. As the disease progresses, a slow narrowing of the visual field can occur. If left untreated, glaucoma may contribute to total blindness [<xref ref-type="bibr" rid="ref9">9</xref>]. Loss of vision usually begins on the eye’s side and then approaches the middle.</p>
        <p>Statistically, glaucoma affects millions of people globally, with more than 64 million cases recorded in 2013, and other studies have estimated that 76 million people will be affected by 2020 and 111.5 million by 2040 [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Glaucoma is the second leading cause of blindness worldwide, preceded by cataracts [<xref ref-type="bibr" rid="ref11">11</xref>], and it impacts 4.5 million individuals [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], more than 10% of the gross population [<xref ref-type="bibr" rid="ref10">10</xref>]. Owing to the asymptotic function of glaucoma, approximately 70% of individuals with glaucoma are unaware of the illness’s existence [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>] in the early stage. Thus, we need to provide an early detection and evaluation method [<xref ref-type="bibr" rid="ref15">15</xref>]. Once glaucoma is detected, a more effective follow-up takes place as a cure can slow down the transmission of the disease [<xref ref-type="bibr" rid="ref8">8</xref>].</p>
        <p>Cataracts may be reversed by surgery, while glaucoma causes lifelong blindness. Elevated intraocular pressure (IOP) is the most common cause of glaucoma. The tonometer measures IOP. However, IOP is not always an accurate and adequate indicator of glaucoma, because glaucoma does not always cause a rise in IOP [<xref ref-type="bibr" rid="ref16">16</xref>] but rather a deterioration of the optic nerve head (ONH). Visual information flows through the ONH to the brain. The ONH consists of a bright spherical area called the optic disc (OD) and a wider circle-like area called the optic cup (OC). <xref rid="figure1" ref-type="fig">Figure 1</xref> shows these structures in ocular images.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Fundus image structure.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>ONH assessment is a widely used glaucoma screening tool that utilizes differential division to distinguish between glaucomatous and normal images [<xref ref-type="bibr" rid="ref17">17</xref>]. Manual calculations of ONH geometric structures, such as the cup-to-disc ratio (CDR); inferior, superior, nasal, and temporal (ISNT) rule; disc diameter; and rim area, are recommended as diagnostic features for glaucoma screening [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. Among them, the CDR is a reliable therapeutic feature for early glaucoma screening and diagnosis [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. Each of the derived CDR parameters (diameter or area) is the ratio between the OC and the OD. CDR values rise when the illness progresses and become higher than approximately 0.6-0.7 when the patient has a stronger chance of developing glaucoma [<xref ref-type="bibr" rid="ref23">23</xref>]. Based on an earlier study [<xref ref-type="bibr" rid="ref24">24</xref>], a CDR of at least 0.65 is deemed glaucomatous in clinical practice. The CDR score tracks the development of glaucoma over time, effectively screening the condition early [<xref ref-type="bibr" rid="ref25">25</xref>]. Currently, to check for retinal diseases, specialists tend to manually extract the blood vessel (BV), OD, or OC from retinal images. Accurate segmentation of the retinal structure is very important during the diagnostic process. However, doing this process manually is very labor intensive, time consuming, and risky in terms of human mistakes. Furthermore, the analysis results may lack objectivity, as different experts may produce different results. Therefore, it is important to automate retinal image segmentation/classification while minimizing expert interference.</p>
      </sec>
      <sec>
        <title>Research Background</title>
        <p>The development of medical imaging technology has helped to accelerate the detection of diseases. Additionally, several studies have been conducted using image processing techniques to automatically process medical images without the intervention of experts [<xref ref-type="bibr" rid="ref26">26</xref>]. Several studies [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>] have examined vascular tracking and OD and OC segmentation using fundoscopic images. The main segmentation techniques depend on visual features such as color and contrast thresholding, region segmentation, and boundary recognition. Such methods use a learned classifier to classify pixels as foreground pixels (eg, OD, OC, or BV) or as background pixels (regions out of the area of interest) [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. However, most of these methods are based on hand-crafted features (eg, texture, red green blue [RGB] color, gradient and Gabor filter), which are susceptible to low image contrast, pathological regions, and have a lack of deep feature extraction.</p>
        <p>In recent years, automatic learning has been significantly improved with the assistance of machine learning (ML) techniques [<xref ref-type="bibr" rid="ref31">31</xref>]. According to several studies [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>], ML and deep learning (DL) algorithms have evolved to the point that they can compete with and sometimes even outperform humans on certain tasks, such as object detection [<xref ref-type="bibr" rid="ref34">34</xref>] and image classification on ImageNet [<xref ref-type="bibr" rid="ref35">35</xref>]. Currently, deep learning methods (DLMs) are an active research field because they can automatically generate and learn extremely complex features from input data. In particular, DLMs with deeper and complicated perceptron layers [eg, convolutional neural networks (CNNs)] have shown better performance in object detection than other methods [<xref ref-type="bibr" rid="ref33">33</xref>]. Researchers have attempted to use various types of architectures, such as GoogLeNet [<xref ref-type="bibr" rid="ref36">36</xref>], AlexNet [<xref ref-type="bibr" rid="ref33">33</xref>], and DenseNet [<xref ref-type="bibr" rid="ref37">37</xref>], for glaucoma diagnosis with the introduction of deep neural networks. Such research mainly focuses on 2 aspects: using DL for complex and deep feature extraction and utilizing medical features and spatial domain knowledge in the detection process. However, the use of deep fully connected networks is susceptible to imbalanced learning problems such as high false-negative or false-positive rates, leading to more fake or skinny branches than those of the ground truth [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. In other words, retinal BV segmentation still has issues such as false pathological information segmentation and low microvascular segmentation [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
        <p>For addressing complex learning issues, deep architectures often have advantages over shallow architectures; for example, deep CNNs have demonstrated significant efficiency improvements over conventional vision-based models [<xref ref-type="bibr" rid="ref41">41</xref>]. A fully connected convolutional network has been used to address insufficient public data. Such methods, however, create very fuzzy vessels with false positives along with tiny and weak roots. This error primarily occurs because the CNNs used in current methods depend solely on pixel-level objective feature to equate the standard image to the image created by the model and are incapable of adapting actively to the fundus image of the natural vascular structure [<xref ref-type="bibr" rid="ref42">42</xref>]. Empirical studies have proven that deep CNNs can learn invariant representations and attain human-level success if sufficient training data are provided. However, one of the leading shortfalls of DLMs is the lack of available data. Medical data annotation often requires specific domain experts. This shortage leads to the need for CNN training approaches with a limited number of annotated data. However, this can easily lead to underfitting, and as a result, high error rates on both training and testing data are recorded. Lahiri et al [<xref ref-type="bibr" rid="ref43">43</xref>] demonstrated the effectiveness of using generative adversarial networks (GANs) [<xref ref-type="bibr" rid="ref44">44</xref>] to perform some discriminative task with only 0.8%-1.6% of the amount of annotation data used by other methods.</p>
        <p>GANs belong to the family of unsupervised learning algorithms that have proven their merits in generating synthetic images close to real images and solving image-to-image translation problems in the natural domain [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. GANs have gradually shown their extraordinary ability and have started to shine brilliantly in various application fields [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. Inspired by the prevailing learning capability of GANs, Wu et al [<xref ref-type="bibr" rid="ref49">49</xref>] proposed the generative adversarial network with U-net, referred as (U-GAN), which includes an attention gate model in the generator and a densely connected convolutional network to segment the BVs automatically. Lahiri et al [<xref ref-type="bibr" rid="ref50">50</xref>] proposed deep convolutional GANs (DCGANs) for retinal segmentation to segment the region of interest (ROI) from a given image. In addition to segmentation tasks, the synthesis of retinal images is a large part of the literature. Haoqi and Ogawara [<xref ref-type="bibr" rid="ref51">51</xref>] trained a GAN model to learn the mappings of vessels from retinal images to segmented images for training a model to generate a synthesized image close to a given real image.</p>
        <p>To date, several review articles summarizing the technology of DL in ophthalmology have been published [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref52">52</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Nevertheless, none of them have particularly focused on the emerging breakthrough GAN techniques using fundus photographs. Moreover, despite the rapid development of telecommunication technology, only a few study groups have examined the possibility of integrating artificial intelligence (AI) technologies with teleophthalmology [<xref ref-type="bibr" rid="ref56">56</xref>]. To the best of our knowledge, no researchers have adopted telescreening for glaucoma using DL techniques, particularly the GAN.</p>
        <p>Shedding light on the importance of telecommunication technology in DL techniques is a current and very urgent need. Alongside the emergence of newer low-cost handheld devices, glaucoma screening will become more available, even to distant and poor communities. In addition, maintaining social distance is very important for mitigating the spread of the coronavirus pandemic. This paper summarizes the work in the literature on glaucoma diagnosis and highlights the challenges and gaps of current studies to uncover the possibilities of filling these gaps with the recommended suggestions. We aim to elucidate all research efforts, such as the GAN architectures mentioned earlier, that have been developed in response to the new and disruptive technology, mapping the research landscape from the literature onto a coherent taxonomy of the key features that characterize such an emerging line of research. Finally, the future work of this research will be proposed and described in detail.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Basic Theory of GANs</title>
        <p>We start by reviewing the concept of GANs [<xref ref-type="bibr" rid="ref44">44</xref>]. GANs consist of 2 separate neural networks, a generation network (G) and a discriminator network (D), plus a noise vector (z) sampled from a known distribution (eg, a Gaussian distribution), which is used to generate data points (fake samples; see <xref rid="figure2" ref-type="fig">Figure 2</xref>). A 2-player min-max game inspires the basic idea of this technique. The goal is to train the generator G to learn to capture the potential distribution in the real data sample and generate a new sample close to the real data to deceive the discriminator. The discriminator D is a binary classifier that attempts to discriminate whether the input data are real or fake [<xref ref-type="bibr" rid="ref44">44</xref>]. To win the game, both G and D need to continuously improve their generation and discrimination capabilities, respectively. The training process lasts until both G and D reach a convergence point (Nash equilibrium), where G generates an output distribution very close to the real data distribution [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref58">58</xref>].</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>GAN architecture. GAN: generative adversarial network.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Mathematically, let G be parameterized by θ, which takes random noise z as input and produces synthetic images G(z) as output. The generated G(z) is mapped from a distribution G(z; θ) ∼ <italic>p<sub>g</sub></italic>. Additionally, the training data set x is sampled from the real data distribution <italic>p</italic><sub>data</sub>, and the objective function of the generator network is used to train G to approximate <italic>p</italic><sub>data</sub> using <italic>p</italic><sub>g</sub>. By contrast, the discriminator (D) takes either the original image x or G(z) as input and indicates whether the input is from a true data distribution (x) or a synthetic data distribution G(z) by outputting a probability of D(x) or D(G[z]). This can be seen in the followig equation, where <italic>p</italic><sub>data</sub>(x) is the true data distribution and <italic>p</italic><sub>z</sub>(z) is the noise distribution.</p>
        <disp-formula>
          <graphic xlink:href="jmir_v23i9e27414_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>However, the training mechanism of such a model is critical. Unbalanced training between the G and D networks leads to model collapse. This happens when D is trained much better than G. In this case, D is able to easily discriminate between the real and synthetic images generated by G and reject all its outputs; thus, the loss log{1 − D(G[z])} saturates, and G learns nothing from the zero gradient. To avoid the model collapse issue, the loss function of G should be trained to maximize logD(G[z]) instead of minimizing log{1 − D(G[z])}. This can avoid the saturation of the gradient and provides the same gradient direction as that yielded by the old loss function.</p>
      </sec>
      <sec>
        <title>Extension Models of GANs</title>
        <p>The first GAN [<xref ref-type="bibr" rid="ref44">44</xref>] was composed of fully connected layers. Later, the DCGAN [<xref ref-type="bibr" rid="ref59">59</xref>] introduced the use of fully CNNs to increase training stability and improve efficiency. Since then, many GAN models have followed this set up as the main components of GAN architecture. Unlike the DCGAN, the Wasserstein GAN (WGAN) [<xref ref-type="bibr" rid="ref60">60</xref>] increases the permutation in the fully connected layer. In this model, the Wasserstein distance metric is used instead of the Jensen–Shannon divergence to measure the distance between the generated data distribution and the real data distribution. Therefore, the problems of model collapse and training instability were partially solved in this model. Subsequently, an improved version of the WGAN called the WGAN-GP (gradient penalty) [<xref ref-type="bibr" rid="ref61">61</xref>] was proposed. The WGAN-GP depends on gradient penalty replacement so that it can solve slow training problems encountered by the WGAN. Moreover, inspired by the WGAN, Mao et al [<xref ref-type="bibr" rid="ref62">62</xref>] proposed the least-squares GAN (LSGAN) to improve the quality of the generated images. The main idea of the LSGAN is to use a new loss function in the D network for smooth and unsaturated gradients.</p>
        <p>The original GAN randomly generated a date distribution that is beyond our control, as the output depends on random noise. Therefore, a conditional GAN (cGAN) was invented to add a vector c as a conditional input to the noise vector z so that the generator could generate the required data. Hence, the generator output of the cGAN was defined by G(c,z).</p>
        <p>Since the cGAN was proposed, many articles have used the cGAN applications, for example, Pix2Pix [<xref ref-type="bibr" rid="ref45">45</xref>], a cGAN-based technique proposed by PatchGAN to map a set of images to another image using N × N pixels. It classifies each N × N path of the image and averages all the scores of patches to obtain the final score for the image. The main limitation of Pix2Pix is that it requires images x1 and y1 that are paired with each other in the training stage. By contrast, CycleGAN [<xref ref-type="bibr" rid="ref47">47</xref>], which is also a cGAN-based technique, utilizes an image translation method that does not need paired data, even though Pix2Pix still outperforms CycleGAN’s remarkable margin.</p>
        <p>Another variation of the GAN combines a variational autoencoder (VAE) and a GAN in a single model named VAE-GAN [<xref ref-type="bibr" rid="ref63">63</xref>]. The idea behind this technique is to exploit the strength of both the GAN and VAE, as the GAN can generate sharp images but misses some modes while the VAE produces blurry images but with a large variety. Studies have demonstrated that VAE-GAN images are better than those produced by the VAE or GAN alone.</p>
      </sec>
      <sec>
        <title>Information Sources</title>
        <p>Guided by [<xref ref-type="bibr" rid="ref64">64</xref>], we conducted a comprehensive search to find all GANs-based articles related to glaucoma by searching the best and most reliable libraries: (1) Scopus, (2) ScienceDirect, (3) IEEE Xplore, (4) Web of Science, and (5) PubMed Central. This collection includes technical and medical literature, perfectly reflecting all research activities in this discipline.</p>
      </sec>
      <sec>
        <title>Study Selection Procedure</title>
        <p>The method for choosing appropriate studies was on the basis of 2 stages: screening and filtering. Successively, both stages met the same criterion for inclusion and exclusion. Both duplicates and unrelated studies by title and abstract skimming were omitted during the first stage. Then, the result in a set of papers was entirely read, analyzed, and summarized in the filtration stage.</p>
      </sec>
      <sec>
        <title>Search</title>
        <p>This work was carried out between March 2020 and November 2020. Various keyword combinations were used in the search of highly reputable libraries (IEEE Xplore, Science Direct, PubMed, Scopus, and Web of Science). Our search query consist of 2 parts that are connected with each other using the operator “and.” The following set of keywords (“glaucoma,” “optic disk,” “blood vessels”) and (“receptive field,” “loss function,” “GAN,” “generative adversarial network,” “deep learning,” “convolutional neural network,” “CNN,” Encoder) belong to the first and second parts, respectively. The operator “or” is used to connect keywords within the same part. Based on this, our study scope is formulated.</p>
        <p>The quest focused on different journals and conferences and omitted books and all other forms of literature. Therefore, we mainly concentrated on up-to-date and applicable scientific studies related to the use of GANs in retinal disease, especially glaucoma. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows the research query and inclusion criteria used in this work.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Flowchart of the study selection with the research query and inclusion criteria. GAN: generative adversarial network; WOS: Web of Science.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Validity of the Collected Papers (Scope Validation)</title>
        <p>The total number of keywords in the collected papers was 115. To validate our research scope, we analyzed these keywords and categorized them according to their co-occurrences. Then, we set a threshold indicating the co-occurrences of each keyword across all papers. Let k≥3, where k is a threshold. As a result, we obtained 15 keywords out of 115 that met the threshold. That is, each of these 15 keywords occurred at least three times in all the collected papers.</p>
        <p><xref rid="figure4" ref-type="fig">Figure 4</xref> illustrates the connections of these 15 keywords to each other. The size of each circle indicates how frequently a single corresponding keyword occurred. The more frequently a keyword occurred, the larger circle size it gets, for example, the keyword “deep learning” has the biggest circle size in the diagram, which means it is the most frequently appeared keyword in the collected papers. The second factor is the color, which indicates how often a single keyword occurred per year. The last factor is the total link strength, which indicates the total connection of a keyword to other keywords. The more frequently 2 keywords appeared in the same article, the thicker is the line drawn between them. For example, the keywords “deep learning” and “glaucoma” were linked by a thicker line than the line between the keywords “generative adversarial network” and “glaucoma,” which means that both “deep learning” and “glaucoma” appeared together in the collected articles more than the keywords “generative adversarial network” and “glaucoma” did. This indication reveals that GANs have been used less than other DL techniques in glaucoma detection.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Scope validation diagram.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p><xref ref-type="table" rid="table1">Table 1</xref> shows the occurrences and the total link strength of the 15 keywords that met our threshold (k≥3). Deep learning is the most frequently occurring keyword, which has uniquely appeared 20 times in the collected articles and 27 times with other different keywords. GANs occurred 17 times, with 18 connections to other keywords, while the keyword glaucoma occurred 8 times, with 12 connections to other keywords. In conclusion, these highest scores for the aforementioned keywords empirically demonstrated the validity of our search query that is used to collect literature publications. Furthermore, it proves that our research scope revolves around 3 main keywords, namely, deep learning, generative adversarial network, and glaucoma, as they have the biggest circle sizes with the thickest connection among them.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Keywords occurrence.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Keywords</td>
                <td>Occurrences</td>
                <td>Total link strength</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Techniques</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Deep learning</td>
                <td>20</td>
                <td>27</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generative adversarial network(s)/GAN</td>
                <td>17</td>
                <td>18</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Artificial intelligence</td>
                <td>3</td>
                <td>8</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Machine learning</td>
                <td>3</td>
                <td>8</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Diseases</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Glaucoma</td>
                <td>8</td>
                <td>12</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Diabetic retinopathy</td>
                <td>3</td>
                <td>6</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Imaging</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fundus image</td>
                <td>3</td>
                <td>6</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Medical imaging</td>
                <td>4</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Papers’ contribution</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Adversarial learning</td>
                <td>3</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Optic disc segmentation</td>
                <td>3</td>
                <td>6</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Retinal vessel segmentation</td>
                <td>3</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generative models</td>
                <td>3</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Retinal image synthesis</td>
                <td>3</td>
                <td>3</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Inclusion and Exclusion Criteria</title>
        <p>In this section, papers that met the criteria in <xref rid="figure3" ref-type="fig">Figure 3</xref> were included. We taxonomized the included papers on a general and in-depth diagram consisting of 2 paper groups, namely, the development studies group and the reviews and surveys group. The papers in the first group were classified according to 8 consecutive layers. In the literature, researchers classified GANs into 2-4 categories; these categories were separately used by different researchers, as referenced accordingly in the points below. However, in our taxonomy, we combined them all. Furthermore, we added 4 more classification criteria as follows: (1) method architecture (direct, hierarchical, iterative) [<xref ref-type="bibr" rid="ref32">32</xref>]; (2) model structure (2 players, multiple players) [<xref ref-type="bibr" rid="ref65">65</xref>]; (3) GAN category (optimization function, structure, and conditional) [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref68">68</xref>]; and (4) generator backbone (U-Net based or CNN based) [<xref ref-type="bibr" rid="ref69">69</xref>].</p>
        <p>Further, we added 4 additional categories as follows: (1) type of GAN used in a paper (eg, variational autoencoder with GAN [VAEGAN], DCGAN, cGAN, CycleGAN); (2) discriminator’s receptive field (PixelGAN, PatchGAN, ImageGAN); (3) landmarks used during the segmentation/classification process (single, multiple); (4) paper contributions (segmentation, classification, image synthesis, mixed).</p>
        <p>The exclusion criteria followed in this paper were as follows: (1) ML approaches, (2) 3D-based imaging methods (optical coherence tomography), (3) between-class papers, and (4) out-of-scope papers.</p>
      </sec>
      <sec>
        <title>Data Collection Process</title>
        <p>All papers from different sources were summarized and saved in a single spreadsheet file for simplicity and a quick review. Significant remarks and comments were illustrated by full-text reading in our analysis scope and classification stage, which further refined our taxonomy. Finally, our results were summarized on an Excel sheet (Microsoft) and listed in a tabular format. The additional data set includes a list of articles, publishing source, articles’ abstracts and contributions, the tools used in papers, audiences, objectives, architecture-based categorization table, and a list of relevant figures.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>The cumulative number of articles in the original search process was 455. Eighty percent (364/455) of the findings released in 2018-2021 and 20% (91/455) in 2015-2017 were distributed as follows: 15 papers from IEEE Xplore, 86 from Web of Science, 138 from Scopus, 147 from PubMed, and 69 from ScienceDirect. Approximately 62 papers were duplicates across the 5 databases.</p>
        <p>Later, 318 papers (not GAN based) were omitted after skimming through the articles’ titles and abstracts, leaving only 75 papers. Further screening via full-text reading was carried out on these 75 papers, which resulted in excluding 16 nonrelevant papers. A comprehensive reading was performed on the final 59 papers to create a general map to study this newly emerging methodology.</p>
        <p>Of these 59 papers, 51% (n=30) focused on the development and training of various GAN models and real attempts to improve the efficiency of the network architecture to improve segmentation/classification precision, especially at an early stage of the disease with fewer false positives/negatives. Nearly 49% (29/59) of publications included general reviews and surveys relating to GAN technique and its variants; recent GAN applications, limitations, and potential future prospects; reviews of retinal diseases; various DL detection methods; general analytical knowledge such as the most frequently used data sets; and the countries contributing to the current research area. From all these observations, we got a thorough view on the literature, determined the general categories of the study scope, and boosted the taxonomic classification of the literature. <xref rid="figure5" ref-type="fig">Figure 5</xref> presents the groupings of the GAN-based approaches used in the literature according to their structures or optimization functions.</p>
        <p>Kumar and Dhawan [<xref ref-type="bibr" rid="ref70">70</xref>] classified GANs based on their architectures or the loss functions used to train their generators. It is worth noting that the first 4 layers of our taxonomy have been separately used in other papers; therefore, inspired by those studies, we used these categories together as a baseline for our taxonomy. We added other categories to classify brief literature works in depth according to (1) their level of feature discrimination (PixelGAN, PatchGAN, or ImageGAN), (2) the numbers of landmarks used in the segmentation or classification process (a single landmark or multiple landmarks), (3) the backbones of the GANs used in the articles (eg, DCGAN [<xref ref-type="bibr" rid="ref59">59</xref>], Info-GAN [<xref ref-type="bibr" rid="ref71">71</xref>], WGAN [<xref ref-type="bibr" rid="ref60">60</xref>], CGAN [<xref ref-type="bibr" rid="ref45">45</xref>], Pix2Pix [<xref ref-type="bibr" rid="ref45">45</xref>], and Cycle-GAN [<xref ref-type="bibr" rid="ref47">47</xref>]), and (4) the contribution of each paper (eg, segmentation [s], classification [c], or synthesis [y]). In the following sections, we describe each category and provide some accompanying statistics.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Taxonomy of the literature on glaucoma screening based GANs technique. cGAN: conditional GAN; CNN: convolutional neural network; DCGAN: deep convolutional generative adversarial network; DL: deep learning; GAN: generative adversarial network; LSGAN: least-square GAN; WGAN-GP: Wasserstein GAN-gradient penalty.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Development Studies Category</title>
        <p>GANs were first developed by Goodfellow et al [<xref ref-type="bibr" rid="ref44">44</xref>] in 2014. Although researchers have continuously attempted to improve the performance of GANs in various ways, such as weight regularization, new loss functions, weight pruning, and Nash equilibrium, it is still a new research field among deep learning techniques [<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref72">72</xref>]. Only recently did this technique start to be adopted by researchers in the field of retinal disease, particularly glaucoma (roughly at the beginning of 2018). Therefore, the total set of papers that described various experiments and tools used for the detection or segmentation of retinal images included 30/59 (51%) articles.</p>
        <p>Among these categories, it is notable in <xref rid="figure5" ref-type="fig">Figure 5</xref> that the first 4 layers classified articles based on the method used (direct, hierarchical, or iterative) [<xref ref-type="bibr" rid="ref32">32</xref>], the model structure [<xref ref-type="bibr" rid="ref65">65</xref>], the architecture category (optimization function or structure and conditional based) [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref68">68</xref>], and the generator’s backbone (CNN based or U-Net based) [<xref ref-type="bibr" rid="ref69">69</xref>] consecutively.</p>
        <p>In the first layer, all the literature work followed the direct methods. This means that all these methods follow the philosophy of using 1 generator and 1 discriminator, and the structures of the G and D are straightforward without any branches. None of the articles used hierarchal or iterative methods; this reveals a new opportunity to apply GANs in the field of retinal disease.</p>
        <p>The second layer classified articles based on the number of players. Nearly 25/30 (83%) articles used 2 players, and only 5/30 (17%) articles utilized multiple players. In the latter case, some studies used 3 player-based methods [<xref ref-type="bibr" rid="ref73">73</xref>-<xref ref-type="bibr" rid="ref75">75</xref>], with the frameworks of [<xref ref-type="bibr" rid="ref74">74</xref>] and [<xref ref-type="bibr" rid="ref75">75</xref>] comprising segmentation, generator, and discriminator networks. In the study by Liu et al [<xref ref-type="bibr" rid="ref74">74</xref>], the segmentation network and generator enlarged the training data set to improve the segmentation performance, while the discriminator solely focused on identifying fake image–label pairs to ensure compatible utilities. However, in Yu et al [<xref ref-type="bibr" rid="ref75">75</xref>], the same architecture was used to synthesize images after performing traditional annotation-free methods to obtain coarse segmentations.</p>
        <p>A slight difference was observed in Wang et al [<xref ref-type="bibr" rid="ref73">73</xref>], where a pathology-aware visualization network was used instead of the segmentation network, with both pathology-aware visualization and the generator used to enhance the synthesized glaucoma images in specific pathological areas. The synthesized image was re-enforced to provide a heatmap close to that of the input reference image. The Patho-GAN can thus generate images of glaucoma fundus with clearer pathologies. In Yang et al [<xref ref-type="bibr" rid="ref76">76</xref>], the VGG19 network was incorporated with the 3 players to find the topology structure loss, which was combined with the other 3 losses (adversarial loss, weighted cross-entropy loss, and total variation loss) to be used by the generator. However, in [<xref ref-type="bibr" rid="ref77">77</xref>], the authors used 2 encoders, namely, E<sub>s</sub> and E<sub>t</sub>, where (s) is the source domain and (t) is the target domain; these encoders were trained to impede the classification performance of the discriminators (D+, D–). In turn, D+ and D– were trained to distinguish between positive/negative source images and positive/negative target images, and finally, a classifier (C) tried to classify source/target images.</p>
        <p>Following [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref68">68</xref>], we added a third layer to our taxonomy to classify papers as either structure-based or optimization-based methods. The majority of studies (27/30, 90%) at this level were structure- and conditional-based methods, while only 3/30 (10%) of the studies, namely, those in [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref79">79</xref>], were optimization-based methods with 2-player structures; none of these methods have been recorded as multiplayer-based structures.</p>
        <p>Some researchers tend to use objective function–based methods by updating specific loss functions or using a combination of losses to overcome the model collapse of GANs. This occurs when the generator continuously generates images with the same distribution or generates images with the same texture themes or color as the original image but with marginal differences in human understanding [<xref ref-type="bibr" rid="ref65">65</xref>]; for example, Ma et al [<xref ref-type="bibr" rid="ref42">42</xref>] used a least-squares loss function instead of sigmoid cross-entropy. Therefore, their experiment greatly improved the segmentation accuracy of the utilized model on both the digital retinal image for vessels extraction (DRIVE) and structured analysis of the retina (STARE) data sets by forcing the generator to generate images with distributions close to those of the real images. In Tu et al [<xref ref-type="bibr" rid="ref78">78</xref>], the authors used the WGAN-GP method to overcome the training instability of the traditional GAN and generate accurate probability maps of BVs. The WGAN-GP is an extension of the WGAN; it uses a gradient penalty instead of weight clipping to enforce the Lipschitz constraint. This type of GAN can be trained faster and generates higher-quality samples than those produced by WGANs [<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]. Last, Kadambi et al [<xref ref-type="bibr" rid="ref79">79</xref>] proposed a framework for domain adaptation guided by the Wasserstein distance metric instead of typical adversarial methods for more stable training and better convergence.</p>
        <p>The subsequent layer in our taxonomy was to classify methods according to the generator’s backbone (eg, U-Net based or CNN based) [<xref ref-type="bibr" rid="ref69">69</xref>]. Papers [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref80">80</xref>-<xref ref-type="bibr" rid="ref87">87</xref>] represented about 50% of the studies (n=16) and were U-Net-based architectures. However, the other 50% of the papers [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref77">77</xref>-<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref88">88</xref>-<xref ref-type="bibr" rid="ref94">94</xref>] were CNN-based generators (n=16).</p>
        <p>The study by Yu et al [<xref ref-type="bibr" rid="ref46">46</xref>] was very intensive; the authors proposed multiple-channels-multiple-landmarks as a new preprocessing framework. They used a combination of landmarks (vessel trees, ODs, and OC images) to synthesize colored images with 2 types of GANs (Pix2Pix and CycleGAN). Additionally, they used a Pix2Pix architecture with 2 different generator structures (eg, U-Net-based and CNN-based). They empirically demonstrated that the Pix2Pix network with a ResU-Net generator using high-resolution paired images and multiple-channels-multiple-landmarks outperforms every single landmark-based GAN method regardless of their architectures. Furthermore, they were able to generate significant and realistic images.</p>
        <p>The next distinguishing level in our taxonomy addressed the landmarks used in the papers. As <xref rid="figure5" ref-type="fig">Figure 5</xref> shows, references containing “N” letters refer to a single landmark (eg, the BV, OD, OC, retinal nerve fiber layer [RNFL], or rim loss [RL]). These references contributed to 20/30 (67%) of the total papers. Seventeen of them were BV-based methods [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]. Only 2 studies [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref81">81</xref>] were OD-based detection approaches, and 1 [<xref ref-type="bibr" rid="ref82">82</xref>] utilized RNFL-based detection (<xref rid="figure6" ref-type="fig">Figure 6</xref>).</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Distribution of papers per landmark(s). BG: background; BV, blood vessel; OC: optic cup; OD: optic disc; RL: rim loss; RNFL: retinal nerve fiber layer.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Another set of articles used multiple landmarks and was represented with an “M” letter in <xref rid="figure5" ref-type="fig">Figure 5</xref>. These articles contributed to 33% (10/30) of total papers. Some studies [<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>] used the BV and OD, while [<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref89">89</xref>] used the OD and OC to classify the disease. In addition, Wang et al [<xref ref-type="bibr" rid="ref73">73</xref>] used RL and RNFL, and Yu et al [<xref ref-type="bibr" rid="ref46">46</xref>] used BV, OD, and OC.</p>
        <p>The rest of the researchers used multiple landmarks, such as [<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref89">89</xref>], which involved OD and OC segmentation. Studies [<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref93">93</xref>] worked on BV and OD segmentation, and only Wang et al [<xref ref-type="bibr" rid="ref73">73</xref>] used RNFL and RL. The rest of the papers used triple landmarks in their work, such as [<xref ref-type="bibr" rid="ref58">58</xref>] and [<xref ref-type="bibr" rid="ref90">90</xref>], which involved work on BV, OD, and background, and Yu et al [<xref ref-type="bibr" rid="ref46">46</xref>] used BV, OD, and OC.</p>
        <p>In the next layer of our taxonomy, articles were classified according to the discriminator’s receptive field. As illustrated in <xref rid="figure5" ref-type="fig">Figure 5</xref>, references with P, H, or G letters represent refer to PixelGAN, PatchGAN, or ImageGAN, respectively. ImageGAN papers were [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref94">94</xref>], while PixelGAN papers were [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>-<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref92">92</xref>]. In addition, PatchGAN papers were [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref83">83</xref>-<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>].</p>
        <p>Isola et al [<xref ref-type="bibr" rid="ref45">45</xref>] proposed a Pix2Pix-based conditional adversarial network (cGAN) as a general-purpose solution to image-to-image translation problems, and demonstrated that a 70 × 70 PatchGAN alleviates artifacts and achieves the best scores. Scaling beyond 70 × 70 to a full 286 × 286 ImageGAN did not appear to improve the quality of the results and, in fact, the latter model obtained a considerably lower fully connected network (FCN) score. This scaling mechanism may have been effective because there are more parameters in ImageGAN than PatchGAN and greater depth, which made it harder to train. By contrast, 3 studies [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref89">89</xref>] proved that the 64 × 64 Patch-SAN is the best, while one [<xref ref-type="bibr" rid="ref84">84</xref>] concluded that a 120 × 120 patch is better than a 64 × 64 patch size. Studies [<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref88">88</xref>] concluded that ImageGAN is better than PatchGAN. Last, pixel-level annotation [<xref ref-type="bibr" rid="ref50">50</xref>] is much more tedious than image-level annotation.</p>
        <p>Each reference in <xref rid="figure5" ref-type="fig">Figure 5</xref> is denoted with a letter indicating the contribution of the relevant paper. Nearly 57% (17/30) of papers worked on the segmentation task and were denoted by (s), 17% (5/30) worked on image synthesis and were denoted by (y), and only 2 papers worked on the classification task and were denoted by (c). The remaining 6/30 (20%) papers worked on multiple tasks (eg, sc, sy, ysc). <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref>-<xref ref-type="supplementary-material" rid="app7">7</xref> summarize the literature results reported in the papers.</p>
      </sec>
      <sec>
        <title>Reviews and Surveys Category</title>
        <p>In this category, 2 sets of reviews were identified. In the first set, detailed discussion is presented about recent breakthrough techniques of GANs, their development, variations, and medical field applications. The second set shows the impact of deep learning on ophthalmology. In total, this category includes 29/59 (49%) papers.</p>
        <p>For the first set, studies [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref68">68</xref>-<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref95">95</xref>-<xref ref-type="bibr" rid="ref98">98</xref>] provided detailed reviews about GANs including their basic background, theory, and implementations. Also, they present current research hotspots and proposed GANs in different applications. They provided the reader with a clear insight into GANs’ advantages and disadvantages, its different evaluation metrics, and proposed a bright prospect of this technique. Studies [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref95">95</xref>] focused on the importance of GANs, especially in medical field applications, and their capability to generate data through image synthesis technique without explicitly modeling the probability of density function. Wang et al [<xref ref-type="bibr" rid="ref96">96</xref>] provided a further investigation of GAN in parallel intelligence. Another study, [<xref ref-type="bibr" rid="ref99">99</xref>], discussed incorporating GANs in the signal processing community, showing different training methods, constructing GANs, and highlighting current challenges to their theories and applications. References [<xref ref-type="bibr" rid="ref100">100</xref>,<xref ref-type="bibr" rid="ref101">101</xref>] are practical prospective studies, and in [<xref ref-type="bibr" rid="ref100">100</xref>], the authors tried to assess GAN algorithms and find the best architecture among all. However, they concluded that most of the models could achieve similar scores with enough hyperparameter optimization and random restarts. Additionally, they tried to overcome the limitation of evaluation metrics by computing precision and recall on several proposed data sets. Also, in [<xref ref-type="bibr" rid="ref101">101</xref>], the authors reproduced the current state-of-the-art GANs, aiming to explore their landscape, discussing their pitfalls, and reproducibility issues. Turhan and Bilge [<xref ref-type="bibr" rid="ref102">102</xref>] presented a comprehensive study about generative models such as GANs and autoencoders (AEs) and identified the relationship among them for better understanding and emphasizing on the importance of generative models. Oussidi and Elhassouny [<xref ref-type="bibr" rid="ref103">103</xref>] proposed a starting point survey for those who have interests in deep generative models such as deep belief networks (DBNs), deep Boltzmann machine (DBM), restricted Boltzmann machines (RBMs), VAE, and GAN. They explained their building blocks, learning procedures, and limitations.</p>
        <p>In the second set of articles, [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref104">104</xref>] presented an overview of DL applications in ophthalmic disorder using digital fundus images. They summarized the publicly available data sets used for different retinal diseases such as cataracts, retinopathy, glaucoma, and age-related macular degeneration. They also provided a detailed summary of the pros and cons of this emerging technique for both computer scientists and ophthalmologists and specified the clinical and technical aspects to address deep learning challenges and future directions. Some studies [<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref105">105</xref>,<xref ref-type="bibr" rid="ref106">106</xref>] discussed the importance of clinical considerations and potential challenges for clinical adoption and telemedicine integration to reduce cost, increase accuracy, and facilitate health care accessibility. Ting et al [<xref ref-type="bibr" rid="ref53">53</xref>] described the importance of deploying deep learning algorithms within clinical settings. Hogarty et al [<xref ref-type="bibr" rid="ref55">55</xref>] clarified the misunderstanding between ML and deep learning terms and presented an overview of AI and its development in the ophthalmology field. Mayro et al [<xref ref-type="bibr" rid="ref107">107</xref>] also provided an overview of AI and deep learning DL applications in glaucoma detection using fundus images, optical coherence tomography, and visual field interpretation.</p>
        <p>Other studies, [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref108">108</xref>], followed the systematic framework in their reviews: [<xref ref-type="bibr" rid="ref20">20</xref>] discussed the main algorithms used for glaucoma detection using ML, indicating the importance of this technology from a medical aspect, especially retinal image processing, whereas [<xref ref-type="bibr" rid="ref108">108</xref>] performed a systematic review on investigating and evaluating DL methods’ performance for automatically detecting glaucoma using fundus images.</p>
        <p><xref rid="figure7" ref-type="fig">Figure 7</xref> illustrates the publicly available data sets, their sizes, and how often researchers used them. Each data set is collected using a particular camera with different standards and used for a specific disease type. Thus, generalization is the key problem of DL approaches as described in the “Challenges” section.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Total use of various datasets in glaucoma screening.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>As <xref rid="figure7" ref-type="fig">Figure 7</xref> illustrates, DRIVE and STARE are the most frequently used data sets. In other words, researchers often rely on BV segmentation in the diagnosing process [<xref ref-type="bibr" rid="ref72">72</xref>]. However, few researchers have used Messidor-1, high-resolution fundus, 2D Neurons(NeuB1), and CHASEDB. For OD and OC landmarks segmentation, DRIONS-DB, retinal fundus glaucoma challenge (REFUGE), ORIGA, RIM-ONE (r3/v2), and Drishti-GS were the most used, while seldom used is the large-scale attention-based glaucoma (LAG) data set, which is for RNFL and RL landmarks segmentation.</p>
        <p><xref rid="figure8" ref-type="fig">Figure 8</xref> shows the distribution of the collected papers per year regardless of their duplications. The statistics in <xref rid="figure8" ref-type="fig">Figure 8</xref> indicates the recent interest of researchers to adopt GANs techniques. Furthermore, it reveals the need to explore this newly emerging technique in ophthalmology. Therefore, extensive further work is needed to cover this area of research.</p>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Distribution of papers per libraries. WOS: Web of Science.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>This work has targeted 5 search engines: Scopus, ScienceDirect, Web of Science, IEEE, and PubMed, which are highly reputed and reliable resources for research. They include studies on implementation of deep learning techniques for different retinal disorder fields to help ophthalmologists and patients. Journal articles comprised 36 papers and only 23 were published in conferences.</p>
        <p>According to <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref>-<xref ref-type="supplementary-material" rid="app7">7</xref>, each paper has used a different set of evaluation metrics; thus, we concur with Yu et al [<xref ref-type="bibr" rid="ref46">46</xref>] in concluding that there are no uniform evaluation indexes in the literature to evaluate synthetic and real images. To further clarify this issue, <xref rid="figure9" ref-type="fig">Figure 9</xref> shows the distribution of evaluation metrics used in the collected papers. To present <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref>-<xref ref-type="supplementary-material" rid="app7">7</xref> visually, a T-shaped matrix diagram in <xref ref-type="supplementary-material" rid="app8">Multimedia Appendix 8</xref> illustrates in the upper part, named “Metrics used,” the total use of each metric in all articles according to the used data set. Similarly, in the lower part, named “Task,” the diagram shows the total use of each data set in all articles according to a specific task (classification, segmentation, or synthesizing).</p>
        <fig id="figure9" position="float">
          <label>Figure 9</label>
          <caption>
            <p>Distribution of frequently used evaluation metrics in glaucoma screening. AUC: area under the curve; BLE: Boundary Distance Localization Error; IoU: Intersection over Union; ISC: Image Structure Clustering; MCC: Matthews correlation coefficient; mAP: Mean Average Precision; MIoU: Mean Intersection over Union; PSNR: peak signal-to-noise ratio; ROC: receiver operating characteristic curve; SSIM: structural index similarity.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Based on the observations of the upper part of the diagram, the top 5 metrics (sensitivity, specificity, accuracy, area under the curve [AUC], F1 score, and Dice Co.) were used the most with various data sets. Furthermore, 87% (13/15) of metrics were mainly performed on STARE and DRIVE data sets, unlike other data sets, such as Rim-ONEv3 and Drishti-GS, that use another set of metrics (eg, F1 score, Dice Co, peak signal-to-noise ratio, structural index similarity, and δ) to evaluate the performance. This indicates the need to consider standard effect metrics in future research irrespective of the type of data set used.</p>
        <p>By contrast, in the lower part, the segmentation task was reported as the most applied task in the collected articles, followed by images synthesizing, with the classification task being the seldom applied. Nevertheless, the best results reported were in a classification study by Bisneto et al [<xref ref-type="bibr" rid="ref81">81</xref>], which utilized a combination of Dristh-GS and RIM-ONE data sets. They achieved 100% in sensitivity, specificity, accuracy, and AUC in OD/OC classification. Their method was based on cGANs with taxonomic diversity and distinction indexes. Although most of the studies are on segmentation tasks and professionally segmented BVs and ODs [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref91">91</xref>], they still lack segmenting fine and small vessels and suffer from false positives. By contrast, images synthesizing attracted increased interest of researchers, as it assists in overcoming the shortage of medical images. Some researchers, such as [<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref87">87</xref>], have used GAN with adversarial AE to enhance the generated image and some others tend to rely on using different loss functions to better train G and D networks. However, generated images are blurry, noisy, and of low quality with lack of details. Other studies, such as [<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref94">94</xref>], adopted preprocessing (eg, data augmentation, localization of ROI, automatic color equalization) and postprocessing (eg, Lanczos resampling method, morphological operation, contrast enhancement) to enhance the performance of their methods, and they experienced a further improvement in their segmentation result.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This study aimed to provide a detailed summary of the literature on retinal disease detection or segmentation, particularly glaucoma, using GANs and highlight the recent trends exhibited by researchers on this topic. We mainly focused on articles that worked on enhancing the segmentation or detection of the disease rather than improving GAN techniques. Furthermore, we provide a taxonomy of papers related to this area to further assist future research.</p>
        <p>Several benefits may arise from our taxonomy. First, organizing tens of papers in a single diagram provides better understanding of literature work, as people with less experience may be confused if many papers remain unorganized. Second, the taxonomy helps sort literature works and activities into meaningful, easy to manage, and coherent frameworks. Third, it provides researchers with better insights into a given theme, thus finding current literature gaps and discovering new research directions. Last and most importantly, it helps highlighting articles’ strengths and weaknesses of a particular research scope.</p>
        <p>From the developed taxonomy, we can quickly see that all the published papers followed the direct method of the GAN architecture; hence, there is an urgent need to discover the impact of the hierarchical or iterative method on glaucoma screening. Moreover, almost all of the researchers worked on BV segmentation, and very few used OD and OC segmentation, which are the most reliable indications of glaucoma according to ophthalmologists. Future GAN research should focus on disease classification rather than on the segmentation of retinal anatomy. Most of the literature studies faced difficulties in terms of the early detection of glaucoma and low segmentation of fine vessels; therefore, alternatives should be developed, for example, using the RNFL to indicate the early presence of the disease or exploiting the prior knowledge of vascular connectivity to improve upon the segmentation performance of the current methods. Although the RNFL is a good sign for early glaucoma screening and has been incorporated as one of the gold standards of glaucoma evaluation [<xref ref-type="bibr" rid="ref109">109</xref>], very few studies utilized the RNFL with GANs. OD/OC segmentation may lead to interference with pathological aspects such as large genetic OD sizes. Based on the reviewed papers, we noticed that only one article [<xref ref-type="bibr" rid="ref1">1</xref>] has used RNFL for glaucoma screening. Although that study achieved impressive results, the authors used a private data set.</p>
        <p>Most of the previous studies concentrated on the segmentation task. As much as 17/30 papers worked on retinal landmark segmentation [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref17">17</xref>], while only 2 papers worked on disease classification [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>], and 5 papers worked on image synthesis to address the lack of medical images [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref24">24</xref>]. However, the rest of the papers (6/30) performed multiple tasks (eg, segmentation and classification, synthesis and segmentation) [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref30">30</xref>]. In conclusion, more than 50% (17/30) of the literature worked on segmentation task and few researchers have worked on classification and synthesizing retinal images. Therefore, future studies should take these statistics into considerations.</p>
        <p>In the following sections, the included papers will be discussed in detail. We present comprehensive diagrams showing the factors that motivate researchers to carry out their work in this area, highlighting their encountered challenges, and summarizing significant recommendations for addressing their faults in future work.</p>
      </sec>
      <sec>
        <title>Challenges</title>
        <sec>
          <title>Overview</title>
          <p>Glaucoma is a serious disease. Therefore, researchers and developers attempt to exploit the magic of DL technique to help doctors and patients diagnose the disease at its early stage. However, various challenges hinder their expectations; some of those challenges implicitly exist in the nature of DLMs, or are somehow incorporated within DLMs (eg, data richness, diversity of data, and powerful hardware), besides the challenges of GANs architectures (eg, model collapse, optimization, Nash equilibrium, and evaluation metrics). All these challenges have been summarized and discussed in this section along with their relevant references to provide the readers with direct access to the original papers for further discussion. <xref rid="figure10" ref-type="fig">Figure 10</xref> categorizes literature challenges into 6 groups to further assist discussion. Each group is indicated with a separate shape.</p>
          <fig id="figure10" position="float">
            <label>Figure 10</label>
            <caption>
              <p>Challenges of glaucoma screening using GANs technique. AL: artificial learning; GAN: generative adversarial network.</p>
            </caption>
            <graphic xlink:href="jmir_v23i9e27414_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Challenges Related to Patients</title>
          <p>The silent progress of glaucoma disease constitutes a crucial challenge worldwide. Half of the infected people do not experience any symptoms at early stages [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. According to various studies, more than 60 million cases were diagnosed globally in 2013, and it is expected to exceed 75 million and 111 million cases by 2020 and 2040, respectively [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Especially among rural populations, China and India are considered to be the home to approximately 40% of glaucoma cases globally [<xref ref-type="bibr" rid="ref110">110</xref>]. These populations, mostly in developing countries, suffer from difficulties in accessing medical centers, unavailability of experts, high costs of health care, and sustainability of health care services [<xref ref-type="bibr" rid="ref111">111</xref>], in contrast to Western countries, where health care is cost-effective and different socioeconomic situations of patients are supported, and thus treatment for glaucoma remains affordable [<xref ref-type="bibr" rid="ref106">106</xref>].</p>
          <p>In addition, the recent pandemic, COVID-19, has enforced social distancing during communication. Therefore, there is a great need to promote ocular screening in conjunction with telemedicine as a remote monitoring tool [<xref ref-type="bibr" rid="ref112">112</xref>], alongside the presence of handy cheap smartphones, whereby patients can collect their own IOP data themselves with accurate tonometers and free anesthesia [<xref ref-type="bibr" rid="ref113">113</xref>]. Although DLMs positively affect both doctors and patients’ style in terms of decision making, cost affordability, and health care accessibility, there remain some serious challenges, such as technical and clinical challenges, interpretation of the results, and patient trust in machines [<xref ref-type="bibr" rid="ref112">112</xref>]. Zapata et al [<xref ref-type="bibr" rid="ref114">114</xref>] predict that very soon AI will start assisting specialists in achieving high levels of consistency and accuracy beyond human abilities.</p>
        </sec>
        <sec>
          <title>Challenges Related to Reliability</title>
          <p>Reliability is a key to adopting computer technology in the medical field. Deep learning techniques may misclassify segmenting some pixels due to low image contrast or heavy overlap between foreground and background pixels, leading to false-positive/false-negative result [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]. In some cases, doctors are dissatisfied with deep learning segmentation performance, as it is not as real as their expectations. Taking RNFL segmentation as an example, the segmentation results do not have specific geometrical shape of RNFLD as the gold standards and large segmentation errors of fundus images [<xref ref-type="bibr" rid="ref83">83</xref>]. Furthermore, the variability of shape and extremely inhomogeneous OD structure appearance result in inaccurate CDR measurement compared with ideal ones [<xref ref-type="bibr" rid="ref115">115</xref>-<xref ref-type="bibr" rid="ref117">117</xref>]. In some cases, deep learning approaches neglect domain knowledge that doctors care about, such as CDR [<xref ref-type="bibr" rid="ref118">118</xref>].</p>
          <p>Existing methods often suffer from poor segmentation of the fine vessels [<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref80">80</xref>] due to weak ability of antinoise interference or insufficient segmentation of vessels [<xref ref-type="bibr" rid="ref49">49</xref>]; therefore, prior knowledge of BVs connectivity may improve the segmentation performance. Meanwhile, the low reliability of manual detection and the small size of public data sets increase the complexity of morphological assessment of nonglaucomatous optic neuropathy [<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref120">120</xref>]. Robust ground truth labeling must be generated after a comprehensive evaluation, including structural imaging, clinical examination, and perimetry [<xref ref-type="bibr" rid="ref121">121</xref>]. Doctors mostly decide the disease status. Although all clinical symptoms occur, it can lead to differences within annotators, and thus exaggerated annotations [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref106">106</xref>,<xref ref-type="bibr" rid="ref122">122</xref>,<xref ref-type="bibr" rid="ref123">123</xref>]. The reliability of glaucoma algorithms is restricted due to the lack of reference ground reality for glaucoma [<xref ref-type="bibr" rid="ref115">115</xref>,<xref ref-type="bibr" rid="ref124">124</xref>]. DLMs have a remarkable ability to address glaucoma. However, it is critical to have gold-standard algorithms for assessing and detecting glaucoma [<xref ref-type="bibr" rid="ref54">54</xref>], as well as for editing or synthesizing images using the GAN techniques [<xref ref-type="bibr" rid="ref97">97</xref>].</p>
          <p>Sometimes, researchers tend to exclude low-quality or sparsely annotated images during the training phase; this kind of regime weakens the algorithm and leads to less reliability in real-life cases [<xref ref-type="bibr" rid="ref111">111</xref>]. Furthermore, incorporating nonspecialists for image grading limits the reliability of identification [<xref ref-type="bibr" rid="ref125">125</xref>]. Finally, although most of the reviewed papers have shown outstanding diagnostic performance, at times researchers do not mention some hyperparameter values used in the training stage, particularly when they use their own private dataset [<xref ref-type="bibr" rid="ref112">112</xref>]. Excessive screening can result in overdiagnosis. DLMs could also be harmful if the diagnostic software is issued directly to patients, as future opportunities and risk of AI could be magnified [<xref ref-type="bibr" rid="ref55">55</xref>].</p>
        </sec>
        <sec>
          <title>Challenges Related to Biological Effects</title>
          <p>Pathological change and image quality play a major role in the accuracy of glaucoma diagnosis [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref123">123</xref>]. Early and moderate glaucoma stages are considered one of the biggest challenges faced by ophthalmological practice due to the marginal variation size of CDR compared with normal eye [<xref ref-type="bibr" rid="ref126">126</xref>]. Serener and Serte [<xref ref-type="bibr" rid="ref127">127</xref>] have used ResNet-50 and GoogLeNet with transfer learning for early and advanced glaucoma detection, and found that GoogLeNet outperforms ResNet-50 with a trade-off performance between sensitivity and specificity. Besides, Bisneto et al [<xref ref-type="bibr" rid="ref81">81</xref>] proposed GAN-based OD segmentation allied with an index of taxonomic diversity for extracting texture attributes aiming to detect early stages of glaucoma. They achieved outstanding results reaching up to 100% for accuracy and 1 for the receiver-operating characteristic curve. The misclassification of glaucoma and nonglaucoma is usually due to heavy overlap and extremely bad contrast between ocular structure and the background, leading to unsatisfied segmentation performance due to OC’s undistinguishable boundaries [<xref ref-type="bibr" rid="ref116">116</xref>]. Low-quality images (blurring and contrast) can result in unreliable model predictions. Furthermore, the lack of a clear OC border increases the misclassification rate [<xref ref-type="bibr" rid="ref128">128</xref>].</p>
          <p>There is a trade-off between image’s quality and computational parameters of the network [<xref ref-type="bibr" rid="ref129">129</xref>]. Therefore, the need for DLMs to downsample images into lower resolution (ie, 224 × 224) to reduce the computation time leads to reducing image contrast, and hence deteriorating key diagnostic parts of ocular images and weakening the capability to recover contextual information [<xref ref-type="bibr" rid="ref86">86</xref>]. By contrast, performance of DLMs varied among ethnicities, for example, the Saudi population’s performance is not the same as on Western populations. The differences among populations is due to the richness of melanocytes in the retinal pigmented epithelium of darkly skinned people compared with Whites [<xref ref-type="bibr" rid="ref52">52</xref>]. Therefore, data sets used in glaucoma detection must follow specific standards to ensure heterogeneity and diversity of images.</p>
          <p>Multiple eye disorders such as high myopia or pathologic are another major challenge leading to false-negative and false-positive results [<xref ref-type="bibr" rid="ref54">54</xref>]. The main reason for the incorrect segmentation of glaucoma in myopia cases is the alteration of the macula and optic nerve appearance. In addition, the use of RNFL imaging for glaucoma diagnosis in patients with diabetes should be made carefully [<xref ref-type="bibr" rid="ref130">130</xref>]. Myopia affects macular and RNFL thickness measurements due to the thinning and stretching of these layers caused by the increased axial length and optical projection artifact of the scanning region [<xref ref-type="bibr" rid="ref131">131</xref>]. Myopia mostly causes misclassification of glaucoma due to its irregular ONH appearance [<xref ref-type="bibr" rid="ref132">132</xref>]. In severe myopic cases, the color contrast between the foreground (OC) and the neuroretinal rim decreases due to an increased pallor in the rim. Furthermore, the increased pixels’ values brighten the underlying peripapillary tissue and lead to difficult evaluation of the RNFL in the peripapillary area. In addition, torsion or tilting of the OD can occur, and the OD’s rotation can result in an oblique view of the ONH [<xref ref-type="bibr" rid="ref128">128</xref>].</p>
          <p>In other cases, it is hard to distinguish between physiologic large cups and glaucomatous cases because both cases share a common feature (eg, large CDR) [<xref ref-type="bibr" rid="ref117">117</xref>]. Diseases such as OD edema, OD hemorrhage, and glaucoma frequently make segmentation of OD rather difficult [<xref ref-type="bibr" rid="ref133">133</xref>]. By contrast, retinal BV segmentation also has inherent challenges such as incorrect segmentation of pathological details and low microvascular segmentation [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
        </sec>
        <sec>
          <title>Challenges Related to Availability/Services</title>
          <p>Time, efforts, and lack of experts are the main challenges of medical care centers [<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref134">134</xref>]. Therefore, computers have been increasingly used for automatic retinal segmentation to serve as a second opinion to the doctors, improve the diagnostic accuracy, and reduce the tedious work of annotating images [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref135">135</xref>]. Particularly, GANs showed impressive performance in medical image synthesis and it is usually employed to tackle the shortage of annotated data or lack of experts [<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref95">95</xref>]. Generally, medical images are usually rare, expensive, and full of patient privacy issues [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref88">88</xref>] and the publicly available data sets are often imbalanced in size and annotation [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref84">84</xref>]. In general, segmentation tasks suffer from an immense problem of class imbalance. Thus, the accuracy metric is not sufficient alone until concluding a system’s efficiency on both sensitivity and specificity. They should, however, be considered as an essential evaluation metric [<xref ref-type="bibr" rid="ref72">72</xref>].</p>
          <p>Diaz-Pinto et al [<xref ref-type="bibr" rid="ref90">90</xref>] proposed a GAN method with semisupervised learning to develop a good image synthesizer to tackle the shortage of retinal image availability and support generalization ability. Additionally, Liu et al [<xref ref-type="bibr" rid="ref136">136</xref>] created a large-scale glaucoma diagnostic fundus images (FIGD) database. They proposed the glaucoma diagnosis with a complicated neural networks method for automatic detection of glaucomatous optic neuropathy. Importantly, the method has the potential to be generalized throughout populations.</p>
          <p>Various GAN-based methods have been proposed to mitigate image labeling [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref92">92</xref>]. However, this challenge remained open as the current literature results are still inaccurate (eg, fail to generate very thin vessels). Lahiri et al [<xref ref-type="bibr" rid="ref43">43</xref>] concluded that the diversity of annotated images is more important than the actual number of annotations. Finally, rural areas experience difficulties in locating ophthalmologists. This also necessitates more future work to use telemedicine in ophthalmology [<xref ref-type="bibr" rid="ref55">55</xref>].</p>
        </sec>
        <sec>
          <title>Challenges Related to the Nature of Deep Learning</title>
          <p>With the recent advancements in DLM methodologies, promising results in the field of ophthalmology have been obtained. Many GANs and CNNs models are proposed in computer vision. However, DL approaches face several difficulties, such as domain shift.</p>
          <p>Domain shift is the disparity in appearance distribution between various data sets due to different camera settings, illumination variation, different screening angles, or out-of-focus ROI. As a result, domain shift hinders the generalization capability of deep networks [<xref ref-type="bibr" rid="ref89">89</xref>]. In most literature, training and test data sets come from the same image distribution. However, this is not always the case in real life. Therefore, it may significantly damage the real-life applications if not handled beforehand [<xref ref-type="bibr" rid="ref72">72</xref>]. Kadambi et al [<xref ref-type="bibr" rid="ref79">79</xref>] proposed an unsupervised domain adaptation framework by allowing the model to learn domain-invariant features to enhance segmentation performance and generalization capability. Wang et al [<xref ref-type="bibr" rid="ref77">77</xref>] tried to align the distributions of the source and target domains so that the labeled source images can be used to enhance the classification efficiency of the target domain.</p>
          <p>Deep learning addressed many issues in the traditional methods of ML. However, it also brought new difficulties. The most crucial issue is the ambiguity of the diagnosing result; in other words, the blackbox problem [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. DLMs are blackbox in nature and do not have diagnostic explanations to confirm their effectiveness in a real clinical setting. Wang et al [<xref ref-type="bibr" rid="ref73">73</xref>] proposed a pathology-aware visualization approach for feature visualization using DNNs to explain better how decisions are taken by computer, and therefore find pathological evidence through computer-aided diagnosis. Furthermore, for this purpose, Zhao et al [<xref ref-type="bibr" rid="ref115">115</xref>] proposed a weakly supervised model due to its ability to simultaneously learn the clinical evidence identification and perform the segmentation task from large-scale weak-label data that further improves glaucoma diagnosis.</p>
          <p>The lack of publicly available data sets for training the model is another significant challenge concerning deep learning approaches. Therefore, Orlando et al [<xref ref-type="bibr" rid="ref132">132</xref>] proposed a data set named REFUGE, which contains 1200 fundus photographs with standard gold segmentations and clinical glaucoma marks. Moreover, Li et al [<xref ref-type="bibr" rid="ref137">137</xref>] created the LAG database containing 11,760 fundus photographs classified as either positive glaucoma (4,878) or negative glaucoma (6,882), which is the largest among the currently existing databases. According to Asiri et al [<xref ref-type="bibr" rid="ref52">52</xref>], the key problem of constructing a robust deep CNN method is not the availability of broad data sets but instead the diversity of annotation of those images [<xref ref-type="bibr" rid="ref43">43</xref>]. A major difficulty of each algorithm is its validity in multiple patient cohorts with diverse conditions. Therefore, for a DLM to be sturdy, it must be effective across various data sets [<xref ref-type="bibr" rid="ref105">105</xref>].</p>
          <p>Recent studies demonstrated that more complicated and informative image features might be discovered when growing the depth of the network [<xref ref-type="bibr" rid="ref138">138</xref>,<xref ref-type="bibr" rid="ref139">139</xref>]. However, as the network depth rises, deeper CNN has poor diagnostic efficiency due to the gradient disappearance issue or the gradient explosion problem [<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref140">140</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]. Researchers mostly use shortcut links (skip connections) that skip one or more layers while training deep networks, as was the case with [<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref126">126</xref>,<xref ref-type="bibr" rid="ref128">128</xref>,<xref ref-type="bibr" rid="ref129">129</xref>]. Alternatively, in GANs techniques, using WGAN or LSGAN gives a smoother gradient that contributes to stable training [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]. Another concern that should be considered before building up deep models is the <italic>computation time</italic>. As there is a trade-off between model’s depth and the efficiency, the deeper the architecture the greater the number of parameters it gets, which eventually increases computation time [<xref ref-type="bibr" rid="ref140">140</xref>].</p>
        </sec>
        <sec>
          <title>Challenges Related to GAN Technique</title>
          <p>Despite all the ongoing developments and studies, GANs suffer from several challenges and weaknesses besides the challenges related to deep learning nature (eg, blackbox, generalization capability, computation time, and annotation cost). The most critical concern with GANs is the instability of the training process (Nash equilibrium point) [<xref ref-type="bibr" rid="ref98">98</xref>,<xref ref-type="bibr" rid="ref142">142</xref>]. Zhao et al [<xref ref-type="bibr" rid="ref82">82</xref>] used the residual module that allowed easy optimization of competitive networks, while Tu et al [<xref ref-type="bibr" rid="ref78">78</xref>] used WGAN-GP to alleviate training instability of the traditional GAN. Biswas et al [<xref ref-type="bibr" rid="ref92">92</xref>] carefully adjusted hyperparameters to balance between the 2 networks (G and D). Park et al [<xref ref-type="bibr" rid="ref94">94</xref>] improved learning performance and mitigated imbalanced learning by introducing new loss functions for the generator and re-designing the discriminator’s network. However, it remains challenging to determine which algorithm works better than others or what modifications are critical to enhancing the results. Lucic et al [<xref ref-type="bibr" rid="ref100">100</xref>] found that most models could achieve comparable scores with appropriate hyperparameter optimization and random restarts. According to Kurach et al [<xref ref-type="bibr" rid="ref101">101</xref>], the nonsaturating loss over data sets, architectures, and hyperparameters is sufficiently stable.</p>
          <p>Besides, in GANs, the possibility of mode failure/collapse persists while training the model. Model collapse occurs when data generated from GANs mostly concentrate on very narrower modes (partial collapse) or 1 single mode (complete collapse) [<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref99">99</xref>]. By contrast, if the discriminator becomes very strong during training, the generator gradient gradually decreases and eventually disappears. As a result, the generator learns nothing. The imbalance between generator and discriminator networks contributes to overfitting. Many approaches have been proposed to tackle these challenges; for example, Radford et al [<xref ref-type="bibr" rid="ref59">59</xref>] aimed to address instability training issues, and Kadambi et al [<xref ref-type="bibr" rid="ref79">79</xref>] created a new adversarial domain adaptation architecture, led by Wasserstein for better stability and convergence.</p>
          <p>The lack of standard evaluation metrics is another big issue in GANs compared with other generative models. Inception score (IS), average log likelihood, Fréchet inception distance (FID), Wasserstein metric, etc. are quantitative measurements of GANs. There is no majority vote on which assessing measurement is the best. Different scores rely on various aspects of image generation. However, some measurements seem more plausible than others (eg, FID is more durable to noise). FID can compare the similarity between real and generated images [<xref ref-type="bibr" rid="ref143">143</xref>], which is considered more effective than IS [<xref ref-type="bibr" rid="ref70">70</xref>].</p>
          <p>In conclusion, the main causes of GAN problems can be summarized as follows: (1) The distance calculation of the corresponding optimization (such as Kullback–Leibler divergence and Jensen–Shannon divergence) is unreasonable. (2) It is difficult to overlap the generated distribution with real distribution. Although the GAN technique is a new, interesting, and attractive field of study in many applications, further studies are needed to resolve the uniqueness of generated samples, poor convergence, and complete model collapse challenges.</p>
        </sec>
      </sec>
      <sec>
        <title>Motivations</title>
        <p>Adopting deep GAN in ophthalmology is a promising and significant field of study. This section reports some of the literature’s characteristics, which we classified on the basis of references to support further discussion (<xref rid="figure11" ref-type="fig">Figure 11</xref>).</p>
        <fig id="figure11" position="float">
          <label>Figure 11</label>
          <caption>
            <p>Benefits of GANs-based methods for glaucoma screening. DCNN: deep convolutional neural network; GAN: generative adversarial network.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Motivations Related to Experts/Doctors</title>
          <p>Detection of any retinal defects must be through analysis of ocular images. Analysis of retinal images, however, must involve trained physicians to analyze and assess digital color fundus images. Such a process requires a great deal of time and human work; therefore, GANs support doctors in mitigating this extensive bottleneck [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref91">91</xref>]. Furthermore, deep GANs techniques are unlike CNNs, where the same GAN approach could be applied to a wide variety of cases and still produce reasonable results [<xref ref-type="bibr" rid="ref45">45</xref>]. GANs can detect the OD in fundus photos with pathological changes or irregular highlights [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref86">86</xref>]. In the case of vessel segmentation with CNN-based methods, outputs are usually blurry around small and weak branches or suffer from a problem of nonconnectivity of segmented vessels; however, GANs better segment capillary/thin vessels of fundus images [<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref84">84</xref>], and thus serve as a second opinion to ophthalmologists [<xref ref-type="bibr" rid="ref72">72</xref>]. GANs are the framework that allows to create and use practical outputs as a gold standard [<xref ref-type="bibr" rid="ref44">44</xref>]. Therefore, these frameworks were adopted by Lu et al [<xref ref-type="bibr" rid="ref83">83</xref>] due to their ability to generate the required specific geometry of RNFLD, which is close to ground truth with high precisions, accuracy, and fewer segmentation errors, despite the existence of multiple pieces of RNFL or low-contrast images. Thus, its segmentation results are much more trusted by doctors than CNN’s.</p>
          <p>Adversarial learning avoids scarcity of manual annotation and subjective segmentation made by non-expert clinicians as this methodology is mainly data driven [<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]. In glaucoma classification, enforcing GANs to synthesize images with similar visualization results as the reference image will help mitigate the drawbacks of binary labels (negative or positive) that limit the visualization methods to recognize pathological facts underlying diagnosis by DNNs [<xref ref-type="bibr" rid="ref73">73</xref>].</p>
        </sec>
        <sec>
          <title>Motivations Related to Researchers</title>
          <p>Deep learning in retina images is very effective and useful [<xref ref-type="bibr" rid="ref72">72</xref>]. However, they are often affected by domain shifts across data sets. As a result, a generalization of DLMs was severely hindered. Therefore, researchers tend to exploit generative adversarial learning for domain adaptation by encouraging the target domain predictions to be close to the source ones [<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]. Domain adaptation is often used to overcome the lack of large pixel annotation using off-the-shelf annotated images from other relevant domains. Alternatively, researchers exploit the existence of a large amount of unlabeled data to train a classifier using the power of DCGAN in a semisupervised learning scenario [<xref ref-type="bibr" rid="ref90">90</xref>]. Semisupervised learning is in the middle way between unsupervised and supervised learning; therefore, less human intervention is required when combined with GANs for better semantic segmentation [<xref ref-type="bibr" rid="ref74">74</xref>]. Using GANs techniques, Lahiri et al [<xref ref-type="bibr" rid="ref43">43</xref>] performed image segmentation with very few annotated samples (0.8%-1.6%), nearly 500-1000 annotations. Further, Zhao et al [<xref ref-type="bibr" rid="ref93">93</xref>] proposed an image synthesizer using GANs with style transfer and then integrated the outputs into the training stage to boost segmentation efficiency using just 10 samples.</p>
          <p>With deep adversarial learning, researchers aim to reduce domain discrepancy [<xref ref-type="bibr" rid="ref144">144</xref>,<xref ref-type="bibr" rid="ref145">145</xref>] by improving the quality of the generated outputs to be as close as possible as the inputs. Wang et al [<xref ref-type="bibr" rid="ref77">77</xref>] exploited label information for matching domain distribution. Ma et al [<xref ref-type="bibr" rid="ref42">42</xref>] applied the least-squares loss function instead of sigmoid cross-entropy to generate images with distribution close to the real ones and also alleviate gradient vanishing problems. Furthermore, Liu et al [<xref ref-type="bibr" rid="ref57">57</xref>] added a patch-level adversarial network to enhance image consistency between ground truth and the generated samples, which further boosts segmentation performance.</p>
          <p>GANs are capable of learning the mapping from the input image to the output image as well as learning a loss function to train this mapping [<xref ref-type="bibr" rid="ref45">45</xref>], unlike existing DLMs, which use a unified loss function for retinal vessels segmentation, thereby producing blurry outputs with false positives around faint and tiny vessels [<xref ref-type="bibr" rid="ref84">84</xref>], which is in contrast to GAN variations (eg, WGAN-GP and M-GAN) that provide accurate segmentation results around small and weak branches [<xref ref-type="bibr" rid="ref78">78</xref>], reduce low microvascular segmentation [<xref ref-type="bibr" rid="ref94">94</xref>], and preserve the connectivity of arteriovenous vessels [<xref ref-type="bibr" rid="ref76">76</xref>]. Moreover, AEs and GANs in a single system facilitate generating vessel maps without the previous existence of retinal vessel tree [<xref ref-type="bibr" rid="ref87">87</xref>]. Besides, unconditional GANs can synthesize retinal images without using prior vessel images [<xref ref-type="bibr" rid="ref92">92</xref>].</p>
          <p>Although researchers recommend using DCNN for efficient segmentation tasks [<xref ref-type="bibr" rid="ref146">146</xref>], the existing limitations of DCNNs are insufficiency of feature extraction, weak generalization capability, and poor capability to recover low-context information, unlike GANs, which are used to alleviate these problems as in Jiang et al [<xref ref-type="bibr" rid="ref86">86</xref>], who proposed GAN with transfer learning, data augmentation, and skip connection concepts to overcome these challenges. Bisneto et al [<xref ref-type="bibr" rid="ref81">81</xref>] impressively improved glaucoma segmentation and classification results using GANs allied with texture attributes identified by taxonomic diversity indexes. They achieved promising results (sensitivity, specificity, and accuracy of up to 100%).</p>
          <p>For optimizing network complexity, Wu et al [<xref ref-type="bibr" rid="ref49">49</xref>] applied the attention Gates technique in a standard GAN to encourage the propagation of features, promote reuse of features, and greatly reduce network parameters when paired with DenseNet instead of conversion layer. Alternatively, using dilated convolutions in the generative networks effectively expands the generator’s receptive field without the number of calculations [<xref ref-type="bibr" rid="ref82">82</xref>]. Adversarial training has been shown to improve the long-range spatial label interaction without expanding the segmentation network’s complexity [<xref ref-type="bibr" rid="ref147">147</xref>].</p>
        </sec>
        <sec>
          <title>Motivations Related to Medical Centers</title>
          <p>We think the best medical treatment is achieved when the doctor–patient relationship is built on honesty and concern. DL cannot substitute real relationships, but can complement them [<xref ref-type="bibr" rid="ref104">104</xref>]. GAN architectures are versatile. For various training samples, the objective feature can be re-designed and more free model designs can be used [<xref ref-type="bibr" rid="ref98">98</xref>]. The extraordinary feature of GANs in the medical field is synthesizing high-quality images with global consistency(eg, color consistency and both BV and OD occupy the same proportional area as the real images) [<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref92">92</xref>]. Bisneto et al [<xref ref-type="bibr" rid="ref81">81</xref>] proposed a method that learns the mapping function between retinal landmarks (BV, OD, and OC) and synthesizes images using the 3 channels (RGB). Furthermore, the method exploits the merit of a large receptive field of GANs to generate good segmentation results [<xref ref-type="bibr" rid="ref82">82</xref>].</p>
          <p>Incorporating GAN techniques in the medical field helps enrich health care centers with various data and effectively solves data imbalance problem [<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref134">134</xref>]. As a result, this feature facilitates solving ethical issues surrounding patients’ privacy [<xref ref-type="bibr" rid="ref72">72</xref>], saves memory and time needed to collecting images [<xref ref-type="bibr" rid="ref79">79</xref>], reduces costs [<xref ref-type="bibr" rid="ref88">88</xref>], and saturates the nature of data-hungry DLMs [<xref ref-type="bibr" rid="ref51">51</xref>].</p>
        </sec>
      </sec>
      <sec>
        <title>Recommendation</title>
        <p>In this section, we briefly include guidelines from the literature to alleviate existing challenges faced by researchers, doctors, medical centers, and patients, as well as present ways to achieve a correct diagnosis of retinal defects (<xref rid="figure12" ref-type="fig">Figure 12</xref>).</p>
        <fig id="figure12" position="float">
          <label>Figure 12</label>
          <caption>
            <p>Recommendations of using GANs-based methods in glaucoma screening. AI: artificial intelligence; CAD: computed-aided design; GAN: generative adversarial network; OC: optic cup; OD: optic disc; VAEGAN: variational autoencoder with GAN.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e27414_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Recommendations to Doctors and Medical Centers</title>
          <p>Higher-image resolutions significantly improve performance of GANs [<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref148">148</xref>]. The key factor in obtaining GAN’s high-quality synthetic outputs is the high-resolution paired images and the architecture of the generator [<xref ref-type="bibr" rid="ref46">46</xref>]. Moreover, annotation variety is more important than the actual number of annotations [<xref ref-type="bibr" rid="ref43">43</xref>]. Therefore, doctors must develop a public data set with high-resolution images that meet the quality assessment system [<xref ref-type="bibr" rid="ref105">105</xref>]. Furthermore, it must be accessible and include multiethnicities to ensure generalization capability [<xref ref-type="bibr" rid="ref108">108</xref>]. Besides, experts must validate deep learning models on the sizable heterogeneous population under different conditions [<xref ref-type="bibr" rid="ref52">52</xref>], as direct release of DL application without prior checking could be harmful [<xref ref-type="bibr" rid="ref55">55</xref>].</p>
          <p>To improve public health, reduce health care costs, and enhance patients’ perception, doctors shall adopt DL techniques in the medical field to tackle these challenges [<xref ref-type="bibr" rid="ref53">53</xref>]. Adopting deep learning applications in magnetic resonance imaging and X-ray image processing is an interesting area of research [<xref ref-type="bibr" rid="ref93">93</xref>]. All glaucoma studies emphasized the importance of CAD programs for early disease detection and for improvement of screening reliability [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
          <p>In the future, GANs may be utilized to speed up AI development and application, allowing AI to comprehend and explore the environment [<xref ref-type="bibr" rid="ref66">66</xref>]. Innovative and radical solutions for the health care system must be improved alongside glaucoma screening [<xref ref-type="bibr" rid="ref106">106</xref>]. Significant improvements in instrumentation and interpretation can lower the cost of glaucoma screening in the future. Embedding glaucoma AI algorithms in the electronic medical record will improve outpatient management [<xref ref-type="bibr" rid="ref107">107</xref>]. However, it is up to the physicians to lead the way in deciding how to incorporate AI in a new era of glaucoma management.</p>
          <p>Automated retinal imaging technologies can reduce barriers to access and monitoring of the health system. Thus, AI integration into ophthalmology can improve patient care [<xref ref-type="bibr" rid="ref56">56</xref>], help clinicians focus on patient relationships, and enhance health services [<xref ref-type="bibr" rid="ref104">104</xref>], all of which can decrease irreversible blindness [<xref ref-type="bibr" rid="ref54">54</xref>]. GANs can reduce the scarcity of manual data annotation and also be used as a clinical support tool [<xref ref-type="bibr" rid="ref72">72</xref>].</p>
        </sec>
        <sec>
          <title>Recommendations to Developers</title>
          <p>A CNN in a generative learner is used for image segmentation tasks and obtaining successful outcomes [<xref ref-type="bibr" rid="ref149">149</xref>]. GAN is an inclusive system that can be combined with various deep learning models to address problems that conventional ML algorithms cannot solve, such as poor quality of outputs, insufficient training samples, and deep feature extraction [<xref ref-type="bibr" rid="ref68">68</xref>]. Furthermore, it outperforms conventional methods in editing and synthesizing image [<xref ref-type="bibr" rid="ref69">69</xref>]. GAN allied with transfer learning can effectively reduce misjudgment of OD/OC in glaucoma cases and improve accuracy and generalization capability; however, better backbone network and different upsampling methods are required to improve performance [<xref ref-type="bibr" rid="ref86">86</xref>] and exploring other downstream tasks may enhance the model’s performance [<xref ref-type="bibr" rid="ref93">93</xref>]. Although there is a vast increase of GAN applications, further studies are required to improve its efficiency and performance [<xref ref-type="bibr" rid="ref70">70</xref>]. Incorporating spatial information, attention-based information, feature-maps information, and image channels (RGB) to improve network performance is a current research trend [<xref ref-type="bibr" rid="ref140">140</xref>].</p>
          <p>GANs can generate samples with distribution close to real data. Thus, they can be used in a systematic study of parallel systems [<xref ref-type="bibr" rid="ref96">96</xref>]. GANs or its variants remain the future trends for mitigating imbalanced learning through generating samples close to real data, or enhancing model performance when combined with VAEs [<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref72">72</xref>]. Thus, it is used as a sophisticated data augmentation technique to generate heterogeneous samples and ensure prognostic characteristics of images [<xref ref-type="bibr" rid="ref52">52</xref>].</p>
          <p>To date, only a few studies have experienced AI technologies in teleophthalmology [<xref ref-type="bibr" rid="ref56">56</xref>]; photography using smartphones can be used as a diagnostic tool for ocular diseases [<xref ref-type="bibr" rid="ref105">105</xref>]. Nowadays, there is a great need for remote disease monitoring and screening [<xref ref-type="bibr" rid="ref107">107</xref>], especially during the COVID-19 pandemic and the vast infection transmission [<xref ref-type="bibr" rid="ref150">150</xref>]. Thus, future study should emphasize on deep learning and telemedicine/teleretinal as potential gamechangers in the eye-care field [<xref ref-type="bibr" rid="ref106">106</xref>].</p>
          <p>Wang et al [<xref ref-type="bibr" rid="ref89">89</xref>] proposed a very lightweight network architecture for joint OD and OC segmentation based on the MobileNetV2 backbone, which has few parameters and half testing time compared with the XCeption backbone, which promotes the network as a mobile app for glaucoma detection. Bisneto et al [<xref ref-type="bibr" rid="ref81">81</xref>] presented GAN and texture features for automatic detection of glaucoma, and achieved impressive results that reached up to 100% for sensitivity, specificity, and accuracy. The authors indicated a proposal to transfer their method into a mobile app in a future study.</p>
          <p>Future research should emphasize GANs and semisupervised learning for image synthesizing, aiming to improve the classification accuracy and the quality of the generated images simultaneously [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref90">90</xref>]. Adopting GANs in the medical field remains in its infancy, with no breakthrough application yet clinically implemented for GAN-based approaches [<xref ref-type="bibr" rid="ref95">95</xref>]. For better feature extraction, researchers must exploit full feature information on RBG channels, spatial structure, and geometry of landmarks [<xref ref-type="bibr" rid="ref83">83</xref>]. Semantic segmentation may reduce manual labeling effort [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref74">74</xref>] and enhance model performance when incorporated with WGAN domain adaptation [<xref ref-type="bibr" rid="ref79">79</xref>]. In ophthalmology diagnosis, adversarial domain adaptation can be an important and effective direction for future research [<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref151">151</xref>]. In addition, exploring the relationship between the quality of the generated image and the performance of the CAD system is needed [<xref ref-type="bibr" rid="ref46">46</xref>].</p>
          <p>With the envision to improve deep learning performance, preprocessing and postprocessing are essential for accurate segmentation [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]. Barros et al [<xref ref-type="bibr" rid="ref20">20</xref>] concluded that data set size has a huge impact on the results. However, Lahiri et al [<xref ref-type="bibr" rid="ref43">43</xref>] amazingly demonstrated that annotation diversity is more important than annotation count. GAN can make use of large amounts of unlabeled data [<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref87">87</xref>].</p>
          <p>Regarding GAN evaluation metrics, future studies should focus on more objective and systematic evaluation methods. However, further FID examination is required [<xref ref-type="bibr" rid="ref100">100</xref>]. Developing quantitative assessment metrics thus remains a crucial research direction [<xref ref-type="bibr" rid="ref152">152</xref>,<xref ref-type="bibr" rid="ref153">153</xref>]. Researchers should evaluate their segmentation performance on public data sets [<xref ref-type="bibr" rid="ref74">74</xref>] with heterogeneous and multimodal designs using less data-hungry algorithms [<xref ref-type="bibr" rid="ref105">105</xref>]. In addition, the performance other classifiers (eg, XGBoost) and other cGAN architectures should be examined for faster and more accurate learning [<xref ref-type="bibr" rid="ref81">81</xref>].</p>
          <p>For glaucoma diagnosis, CDR and ISNT metrics present substantial information to be assessed [<xref ref-type="bibr" rid="ref20">20</xref>]. More studies are needed to assess the validity of ophthalmology applications to detect AMD, diabetic retinopathy, and glaucoma in terms of accuracy, sensitivity, and specificity [<xref ref-type="bibr" rid="ref54">54</xref>]. AUC, sensitivity, and specificity should be included in AI studies as the bare minimum [<xref ref-type="bibr" rid="ref53">53</xref>].</p>
          <p>Moreover, future research may utilize fine-tuning and data augmentation techniques to effectively improve model performance [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref86">86</xref>] and increase data set size for better training, and thus, synthesizing better classifiers [<xref ref-type="bibr" rid="ref77">77</xref>]. GANs strength lies in its discriminator [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]. Duplicating the generator’s structure improves robustness [<xref ref-type="bibr" rid="ref94">94</xref>]. Adding more network layers help capture more in-depth features [<xref ref-type="bibr" rid="ref82">82</xref>]. Training and optimizing the model remain critical [<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref87">87</xref>], with regard to careful balancing between G and D [<xref ref-type="bibr" rid="ref92">92</xref>]. Patch-based images should be used as input for both G and D [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref84">84</xref>]. U-GAN instead of U-Net should be used to improve the model’s performance [<xref ref-type="bibr" rid="ref49">49</xref>]. Additionally, exploiting previous knowledge of vessel structure [<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref92">92</xref>] is critical for accurate segmentation [<xref ref-type="bibr" rid="ref91">91</xref>]. Objective function supported with various loss functions may enhance model performance [<xref ref-type="bibr" rid="ref84">84</xref>]; for example, WGAN-GP can avoid gradient disappearing and enhance training [<xref ref-type="bibr" rid="ref92">92</xref>], Dice coefficient loss function for segmenting hard images [<xref ref-type="bibr" rid="ref57">57</xref>], and least-squares loss function with dilated convolution can enhance small vessel segmentation [<xref ref-type="bibr" rid="ref42">42</xref>]. On top of that, topological structure loss can enhance the connectivity of A/V classification [<xref ref-type="bibr" rid="ref76">76</xref>], whereas binary cross-entropy loss function with false-negative loss function can improve training efficiency and increase segmentation robustness [<xref ref-type="bibr" rid="ref94">94</xref>]. Furthermore, an adversarial loss can reduce the domain overfitting [<xref ref-type="bibr" rid="ref154">154</xref>], and Wasserstein distance is preferable for domain adaptation, as it decreases the probability of mode collapse and avoids the gradient vanishing [<xref ref-type="bibr" rid="ref79">79</xref>]. Weight normalization along with average pooling is the best design setting when structured prediction is used with U-Net [<xref ref-type="bibr" rid="ref43">43</xref>]. Exploring a combination of different styles instead of training dedicated models for a particular style is necessary [<xref ref-type="bibr" rid="ref93">93</xref>]. MISH is a modern activation function that presented better results than ReLU on most current benchmark data sets [<xref ref-type="bibr" rid="ref155">155</xref>].</p>
          <p>To date, explainable DLMs for glaucoma screening utilizing retinal fundus images have not been proposed [<xref ref-type="bibr" rid="ref156">156</xref>]. Researchers should focus on relational and locational explanation using saliency maps, heatmaps, or other invented methods to provide plausible explanations of DL decisions.</p>
          <p>Lastly, future research should incorporate the distributed ML library GPipe proposed by Google [<xref ref-type="bibr" rid="ref157">157</xref>] to mitigate hardware limitations. This may help train large-sized models and enhance performance without tuning hyperparameters [<xref ref-type="bibr" rid="ref140">140</xref>].</p>
        </sec>
        <sec>
          <title>Recommendations to Patients</title>
          <p>Increasing the amount of data using a successful GANs synthesizer significantly saves the privacy of patients [<xref ref-type="bibr" rid="ref72">72</xref>]. Good DLM offers timely treatment by providing wealthy information regarding patients’ eye conditions [<xref ref-type="bibr" rid="ref49">49</xref>]. In the near future, AI can support telemedicine platforms by facilitating the self-monitoring by patients through home-based diagnosis [<xref ref-type="bibr" rid="ref56">56</xref>]. The availability of cheap, handy smartphones may also assist as a remote diagnostic tool [<xref ref-type="bibr" rid="ref105">105</xref>]. This eventually could improve patient’s perception and satisfactions [<xref ref-type="bibr" rid="ref53">53</xref>], as well as encourage continuous follow-up and treatment [<xref ref-type="bibr" rid="ref106">106</xref>].</p>
        </sec>
      </sec>
      <sec>
        <title>New Direction of DL</title>
        <p>Recently, DLMs have achieved positive retinal disease identification and segmentation outcomes. These technologies can revolutionize our way of life, and, probably in the next few decades, the field of medicine will change rapidly [<xref ref-type="bibr" rid="ref53">53</xref>]. However, these techniques involve expensive hardware (eg, GPU requirements) and are greedy for images by nature. Thus, more advanced data augmentation techniques must be introduced to create heterogeneous samples while preserving the prognostic features of fundus images. A possible approach in this regard is to explore GANs [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref158">158</xref>]. Building systematic deep learning models trained on heterogeneous and multimodal data with fewer data-hungry algorithms can boost the effectiveness of AI in clinical settings [<xref ref-type="bibr" rid="ref105">105</xref>]. Additionally, AI algorithms should be incorporated into electronic medical records to promote outpatient management, which is another fascinating subject [<xref ref-type="bibr" rid="ref107">107</xref>].</p>
        <p>From the viewpoints of accessibility, cost-effectiveness, and health care protection, there is a tremendous need to promote remote glaucoma monitoring in developed countries and rural communities, allow patients with glaucoma to obtain their own IOP data with anesthesia-free and reliable tonometers [<xref ref-type="bibr" rid="ref113">113</xref>], and enable home-based evaluation and disease control (eg, rendering home tonometry accessible at a lower cost). Most importantly, within the current situation of the COVID-19 pandemic, new directions for DLMs can be implemented via teleretinal screening apps in ophthalmic settings to maintain maximum protection for both physicians and patients at a lower cost.</p>
        <p>Improving the quality of diagnosis in terms of class imbalance, refining the training phases of GANs, and enhancing the computation time to better diagnose glaucoma variants remain obstacles [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]. Furthermore, it is necessary to note that GANs have not been used to diagnose difficult retinal disease to date, and GAN evaluation metrics are yet another challenging path of study [<xref ref-type="bibr" rid="ref68">68</xref>].</p>
        <p>Finally, combining GANs with other approaches is another prospective research approach; for example, the fusion of GANs with reinforcement learning, function learning, or conventional learning to create new AI applications and facilitate the advancement of these methods is also worth investigating [<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref98">98</xref>].</p>
      </sec>
      <sec>
        <title>Limitations of the Study</title>
        <p>This most important limitation of our analysis is the number and identification of the source databases; however, the selected works form a reasonable and broadly representative selection of the chosen sources. Furthermore, the exclusion of other retinal diseases besides glaucoma, due to its severity worldwide, is considered another limitation. In addition, a quick view of the research activities on this critical retinal disease and GANs does not necessarily reflect the research community’s response.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>Providing adequate health services to people with retinal disorders has been a global issue. Studies are still ongoing to diagnose retinal disorders using deep learning; however, papers adopting GANs for glaucoma detection are not as abundant as those utilizing DL or ML methods. Consequently, insights into this emerging area are needed. Six papers [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref29">29</xref>] have worked on glaucoma classification–based GANs, and the majority tended to use GANs for segmentation or synthesizing retinal images.</p>
        <p>The contribution of this study lies in analyzing and taxonomizing literature works in the field of glaucoma detection using GAN-based methods. To the best of our knowledge, all the previous studies generally discussed AL or DL effects on retinal diseases, and none particularly surveyed GANs for glaucoma detection. This makes our work first to address this emerging technique.</p>
        <p>According to our taxonomy, the majority of the collected papers paid more attention to single landmark segmentation (eg, BVs) than to the segmentation of multiple landmarks. Some techniques were of tremendous or little interest (eg, the DCGAN and cGANs). Researchers worked in this field, identified their difficulties, and suggested recommendations to overcome the current and expected challenges. Other studies focused on improving GAN architectures rather than adopting them for diagnosis. To date, there has been no specific work adopting a GAN as a smartphone app or in telemedicine. Therefore, filling this gap is important for both patients and physicians to ensure fewer physical meetings during the global COVID-19 pandemic. Furthermore, new directions in this field have been explained.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Segmentation papers.</p>
        <media xlink:href="jmir_v23i9e27414_app1.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 17 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Segmentation and classification papers.</p>
        <media xlink:href="jmir_v23i9e27414_app2.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Classification papers.</p>
        <media xlink:href="jmir_v23i9e27414_app3.docx" xlink:title="DOCX File , 21 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Synthesizing images papers.</p>
        <media xlink:href="jmir_v23i9e27414_app4.docx" xlink:title="DOCX File , 23 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>Synthesizing, segmentation, and classification paper.</p>
        <media xlink:href="jmir_v23i9e27414_app5.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app6">
        <label>Multimedia Appendix 6</label>
        <p>Synthesizing and classification of papers.</p>
        <media xlink:href="jmir_v23i9e27414_app6.docx" xlink:title="DOCX File , 21 KB"/>
      </supplementary-material>
      <supplementary-material id="app7">
        <label>Multimedia Appendix 7</label>
        <p>Segmentation and synthesizing images papers.</p>
        <media xlink:href="jmir_v23i9e27414_app7.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app8">
        <label>Multimedia Appendix 8</label>
        <p>T-shaped matrix diagram.</p>
        <media xlink:href="jmir_v23i9e27414_app8.docx" xlink:title="DOCX File , 135 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">BV</term>
          <def>
            <p>blood vessel</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CDR</term>
          <def>
            <p>cup-to-disc ratio rule</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">cGANs</term>
          <def>
            <p>conditional generative adversarial networks</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DCGAN</term>
          <def>
            <p>deep convolutional generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">DLM</term>
          <def>
            <p>deep learning method</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">DRIVE data set</term>
          <def>
            <p>digital retinal image for vessels extraction</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">GANs</term>
          <def>
            <p>generative adversarial networks</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">ISNT</term>
          <def>
            <p>inferior, superior, nasal, and temporal rule</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">LAG data set</term>
          <def>
            <p>large-scale-attention-based glaucoma</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">LSGAN</term>
          <def>
            <p>least-square GAN</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">OC</term>
          <def>
            <p>optic cup</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">OD</term>
          <def>
            <p>optic disc</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">ONH</term>
          <def>
            <p>optic nerve head</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">REFUGE data set</term>
          <def>
            <p>retinal fundus glaucoma challenge</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">RGB</term>
          <def>
            <p>red green blue</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb17">RL</term>
          <def>
            <p>rim loss</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb18">RNFL</term>
          <def>
            <p>retinal nerve fiber layer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb19">ROI</term>
          <def>
            <p>region of interest</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb20">STARE data set</term>
          <def>
            <p>structured analysis of the retina</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb21">VAEGAN</term>
          <def>
            <p>variational autoencoder with GAN</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb22">WGAN</term>
          <def>
            <p>Wasserstein GAN</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb23">WGAN-GP</term>
          <def>
            <p>Wasserstein GAN-gradient penalty</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>A long-term research grant scheme (LRGS/1/2019/UKM-UKM/2/7) supports this work.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Flaxman</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Bourne</surname>
              <given-names>RRA</given-names>
            </name>
            <name name-style="western">
              <surname>Resnikoff</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ackland</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Braithwaite</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cicinelli</surname>
              <given-names>MV</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jonas</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Keeffe</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kempen</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Leasher</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Limburg</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Naidoo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Pesudovs</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Silvester</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stevens</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Tahhan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>HR</given-names>
            </name>
            <collab>Vision Loss Expert Group of the Global Burden of Disease Study</collab>
          </person-group>
          <article-title>Global causes of blindness and distance vision impairment 1990-2020: a systematic review and meta-analysis</article-title>
          <source>Lancet Glob Health</source>
          <year>2017</year>
          <month>12</month>
          <volume>5</volume>
          <issue>12</issue>
          <fpage>e1221</fpage>
          <lpage>e1234</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2214-109X(17)30393-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2214-109X(17)30393-5</pub-id>
          <pub-id pub-id-type="medline">29032195</pub-id>
          <pub-id pub-id-type="pii">S2214-109X(17)30393-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pascolini</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mariotti</surname>
              <given-names>SP</given-names>
            </name>
          </person-group>
          <article-title>Global estimates of visual impairment: 2010</article-title>
          <source>Br J Ophthalmol</source>
          <year>2012</year>
          <month>05</month>
          <volume>96</volume>
          <issue>5</issue>
          <fpage>614</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/22133988/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bjophthalmol-2011-300539</pub-id>
          <pub-id pub-id-type="medline">22133988</pub-id>
          <pub-id pub-id-type="pii">bjophthalmol-2011-300539</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Carvalho Junior</surname>
              <given-names>ASV</given-names>
            </name>
            <name name-style="western">
              <surname>Carvalho</surname>
              <given-names>ED</given-names>
            </name>
            <name name-style="western">
              <surname>de Carvalho Filho</surname>
              <given-names>AO</given-names>
            </name>
            <name name-style="western">
              <surname>de Sousa</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Corrêa Silva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gattass</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic methods for diagnosis of glaucoma using texture descriptors based on phylogenetic diversity</article-title>
          <source>Computers &#38; Electrical Engineering</source>
          <year>2018</year>
          <month>10</month>
          <volume>71</volume>
          <fpage>102</fpage>
          <lpage>114</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.sciencedirect.com/science/article/abs/pii/S0045790617338570"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.compeleceng.2018.07.028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wood</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Glaucoma: the silent thief of sight</article-title>
          <source>The Lamp</source>
          <year>1995</year>
          <month>09</month>
          <volume>52</volume>
          <issue>8</issue>
          <fpage>15</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.semanticscholar.org/paper/Glaucoma%3A-the-silent-thief-of-sight.-Wood/3083cbab3f3199dce22551bd75171c21c8b71cf9#citing-papers"/>
          </comment>
          <pub-id pub-id-type="medline">7500740</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Villain</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>[The epidemiology of glaucoma]</article-title>
          <source>J Fr Ophtalmol</source>
          <year>2005</year>
          <month>06</month>
          <volume>28 Spec No 2</volume>
          <fpage>2S9</fpage>
          <lpage>2S12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/16208234/"/>
          </comment>
          <pub-id pub-id-type="medline">16208234</pub-id>
          <pub-id pub-id-type="pii">MDOI-JFO-06-2005-28-HS2-0181-5512-101019-200506342</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cumming</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Attebo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Panchapakesan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Prevalence of cataract in Australia: the Blue Mountains eye study</article-title>
          <source>Ophthalmology</source>
          <year>1997</year>
          <month>04</month>
          <volume>104</volume>
          <issue>4</issue>
          <fpage>581</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/9111249/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/s0161-6420(97)30266-8</pub-id>
          <pub-id pub-id-type="medline">9111249</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(97)30266-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Michelson</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wärntges</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hornegger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lausen</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>The papilla as screening parameter for early diagnosis of glaucoma</article-title>
          <source>Dtsch Arztebl Int</source>
          <year>2008</year>
          <month>08</month>
          <volume>105</volume>
          <issue>34-35</issue>
          <fpage>583</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3238/arztebl.2008.0583"/>
          </comment>
          <pub-id pub-id-type="doi">10.3238/arztebl.2008.0583</pub-id>
          <pub-id pub-id-type="medline">19471619</pub-id>
          <pub-id pub-id-type="pmcid">PMC2680559</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Costagliola</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>dell'Omo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Romano</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Rinaldi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zeppa</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Parmeggiani</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Pharmacotherapy of intraocular pressure - part II. Carbonic anhydrase inhibitors, prostaglandin analogues and prostamides</article-title>
          <source>Expert Opin Pharmacother</source>
          <year>2009</year>
          <month>12</month>
          <volume>10</volume>
          <issue>17</issue>
          <fpage>2859</fpage>
          <lpage>70</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/19929706/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1517/14656560903300129</pub-id>
          <pub-id pub-id-type="medline">19929706</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Quigley</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Broman</surname>
              <given-names>AT</given-names>
            </name>
          </person-group>
          <article-title>The number of people with glaucoma worldwide in 2010 and 2020</article-title>
          <source>Br J Ophthalmol</source>
          <year>2006</year>
          <month>03</month>
          <volume>90</volume>
          <issue>3</issue>
          <fpage>262</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/16488940"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bjo.2005.081224</pub-id>
          <pub-id pub-id-type="medline">16488940</pub-id>
          <pub-id pub-id-type="pii">90/3/262</pub-id>
          <pub-id pub-id-type="pmcid">PMC1856963</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tham</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Quigley</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Aung</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Global prevalence of glaucoma and projections of glaucoma burden through 2040: a systematic review and meta-analysis</article-title>
          <source>Ophthalmology</source>
          <year>2014</year>
          <month>11</month>
          <volume>121</volume>
          <issue>11</issue>
          <fpage>2081</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/24974815/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2014.05.013</pub-id>
          <pub-id pub-id-type="medline">24974815</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(14)00433-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Yahya</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Mohamad</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>C.A</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Risk Factors for Cataract: A Case Study at National University of Malaysia Hospital</article-title>
          <source>Jurnal Sains Kesihatan Malaysia (Malaysian Journal of Health Sciences)</source>
          <year>2012</year>
          <month>10</month>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>85</fpage>
          <lpage>98</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kingman</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Glaucoma is second leading cause of blindness globally</article-title>
          <source>Bull World Health Organ</source>
          <year>2004</year>
          <month>11</month>
          <volume>82</volume>
          <issue>11</issue>
          <fpage>887</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/15640929"/>
          </comment>
          <pub-id pub-id-type="medline">15640929</pub-id>
          <pub-id pub-id-type="pii">S0042-96862004001100019</pub-id>
          <pub-id pub-id-type="pmcid">PMC2623060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Foster</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Loo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rosman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Loon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>WL</given-names>
            </name>
            <name name-style="western">
              <surname>Saw</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aung</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The prevalence and types of glaucoma in malay people: the Singapore Malay eye study</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2008</year>
          <month>09</month>
          <volume>49</volume>
          <issue>9</issue>
          <fpage>3846</fpage>
          <lpage>51</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/18441307/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1167/iovs.08-1759</pub-id>
          <pub-id pub-id-type="medline">18441307</pub-id>
          <pub-id pub-id-type="pii">iovs.08-1759</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vijaya</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>George</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Paul</surname>
              <given-names>PG</given-names>
            </name>
            <name name-style="western">
              <surname>Baskaran</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Arvind</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Raju</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ramesh</surname>
              <given-names>SV</given-names>
            </name>
            <name name-style="western">
              <surname>Kumaramanickavel</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>McCarty</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Prevalence of open-angle glaucoma in a rural south Indian population</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2005</year>
          <month>12</month>
          <volume>46</volume>
          <issue>12</issue>
          <fpage>4461</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubmed.ncbi.nlm.nih.gov/16303934/"/>
          </comment>
          <pub-id pub-id-type="doi">10.1167/iovs.04-1529</pub-id>
          <pub-id pub-id-type="medline">16303934</pub-id>
          <pub-id pub-id-type="pii">46/12/4461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nazri</surname>
              <given-names>MZA</given-names>
            </name>
            <name name-style="western">
              <surname>Kurniawan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh-Abdullah</surname>
              <given-names>SNH</given-names>
            </name>
            <name name-style="western">
              <surname>Othman</surname>
              <given-names>ZA</given-names>
            </name>
            <name name-style="western">
              <surname>Abdullah</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Bayesian network and Dempster-Shafer theory for early diagnosis of eye diseases</article-title>
          <source>COMPUSOFT</source>
          <year>2020</year>
          <month>4</month>
          <day>30</day>
          <volume>9</volume>
          <issue>4</issue>
          <fpage>A</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Centre for Eye Research Australia</collab>
          </person-group>
          <source>Tunnel Vision-The Economic Impact of Primary Open Angle Glaucoma</source>
          <year>2011</year>
          <month>07</month>
          <day>19</day>
          <access-date>2020-06-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.icoph.org/resources/249/Tunnel-Vision-The-Economic-Impact-of-Primary-Open-Angle-Glaucoma.html">http://www.icoph.org/resources/249/Tunnel-Vision-The-Economic-Impact-of-Primary-Open-Angle-Glaucoma.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garway-Heath</surname>
              <given-names>DF</given-names>
            </name>
            <name name-style="western">
              <surname>Hitchings</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Quantitative evaluation of the optic nerve head in early glaucoma</article-title>
          <source>Br J Ophthalmol</source>
          <year>1998</year>
          <month>04</month>
          <volume>82</volume>
          <issue>4</issue>
          <fpage>352</fpage>
          <lpage>61</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bjo.bmj.com/lookup/pmidlookup?view=long&#38;pmid=9640180"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bjo.82.4.352</pub-id>
          <pub-id pub-id-type="medline">9640180</pub-id>
          <pub-id pub-id-type="pmcid">PMC1722573</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thakur</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Juneja</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Survey of classification approaches for glaucoma diagnosis from retinal images</article-title>
          <source>Advanced Computing and Communication Technologies. Advances in Intelligent Systems and Computing, vol 562</source>
          <year>2017</year>
          <month>10</month>
          <day>25</day>
          <publisher-loc>Singapore</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>91</fpage>
          <lpage>99</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Azzopardi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jansonius</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Petkov</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Automatic Determination of Vertical Cup-to-Disc Ratio in Retinal Fundus Images for Glaucoma Screening</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>8527</fpage>
          <lpage>8541</lpage>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2890544</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barros</surname>
              <given-names>DMS</given-names>
            </name>
            <name name-style="western">
              <surname>Moura</surname>
              <given-names>JCC</given-names>
            </name>
            <name name-style="western">
              <surname>Freire</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Taleb</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Valentim</surname>
              <given-names>RAM</given-names>
            </name>
            <name name-style="western">
              <surname>Morais</surname>
              <given-names>PSG</given-names>
            </name>
          </person-group>
          <article-title>Machine learning applied to retinal image processing for glaucoma detection: review and perspective</article-title>
          <source>Biomed Eng Online</source>
          <year>2020</year>
          <month>04</month>
          <day>15</day>
          <volume>19</volume>
          <issue>1</issue>
          <fpage>20</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://biomedical-engineering-online.biomedcentral.com/articles/10.1186/s12938-020-00767-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12938-020-00767-2</pub-id>
          <pub-id pub-id-type="medline">32293466</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12938-020-00767-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC7160894</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>JointRCNN: A Region-Based Convolutional Neural Network for Optic Disc and Cup Segmentation</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2020</year>
          <month>02</month>
          <volume>67</volume>
          <issue>2</issue>
          <fpage>335</fpage>
          <lpage>343</lpage>
          <pub-id pub-id-type="doi">10.1109/TBME.2019.2913211</pub-id>
          <pub-id pub-id-type="medline">31021760</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>DWK</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tao</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Aung</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>Superpixel classification based optic disc and optic cup segmentation for glaucoma screening</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2013</year>
          <month>06</month>
          <volume>32</volume>
          <issue>6</issue>
          <fpage>1019</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2013.2247770</pub-id>
          <pub-id pub-id-type="medline">23434609</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abràmoff</surname>
              <given-names>Michael D</given-names>
            </name>
            <name name-style="western">
              <surname>Garvin</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Sonka</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Retinal imaging and image analysis</article-title>
          <source>IEEE Rev Biomed Eng</source>
          <year>2010</year>
          <volume>3</volume>
          <fpage>169</fpage>
          <lpage>208</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22275207"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/RBME.2010.2084567</pub-id>
          <pub-id pub-id-type="medline">22275207</pub-id>
          <pub-id pub-id-type="pmcid">PMC3131209</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akram</surname>
              <given-names>MU</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khalid</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Javed</surname>
              <given-names>MY</given-names>
            </name>
            <name name-style="western">
              <surname>Abbas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yasin</surname>
              <given-names>UU</given-names>
            </name>
          </person-group>
          <article-title>Glaucoma detection using novel optic disc localization, hybrid feature set and classification techniques</article-title>
          <source>Australas Phys Eng Sci Med</source>
          <year>2015</year>
          <month>12</month>
          <volume>38</volume>
          <issue>4</issue>
          <fpage>643</fpage>
          <lpage>55</lpage>
          <pub-id pub-id-type="doi">10.1007/s13246-015-0377-y</pub-id>
          <pub-id pub-id-type="medline">26399880</pub-id>
          <pub-id pub-id-type="pii">10.1007/s13246-015-0377-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>DK</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>Level-set based automatic cup-to-disc ratio determination using retinal fundus images in ARGALI</article-title>
          <source>Annu Int Conf IEEE Eng Med Biol Soc</source>
          <year>2008</year>
          <volume>2008</volume>
          <fpage>2266</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1109/IEMBS.2008.4649648</pub-id>
          <pub-id pub-id-type="medline">19163151</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khatami</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khosravi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Nahavandi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Medical image analysis using wavelet transform and deep belief networks</article-title>
          <source>Expert Systems with Applications</source>
          <year>2017</year>
          <month>11</month>
          <day>15</day>
          <volume>86</volume>
          <fpage>190</fpage>
          <lpage>198</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.sciencedirect.com/science/article/abs/pii/S0957417417304013"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2017.05.073</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>GD</given-names>
            </name>
            <name name-style="western">
              <surname>Sivaswamy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Krishnadas</surname>
              <given-names>SR</given-names>
            </name>
          </person-group>
          <article-title>Optic disk and cup segmentation from monocular color retinal images for glaucoma assessment</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2011</year>
          <month>06</month>
          <volume>30</volume>
          <issue>6</issue>
          <fpage>1192</fpage>
          <lpage>205</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2011.2106509</pub-id>
          <pub-id pub-id-type="medline">21536531</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Stambolian</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gee</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Optic disc and cup segmentation from color fundus photograph using graph cut with priors</article-title>
          <source>Med Image Comput Comput Assist Interv</source>
          <year>2013</year>
          <volume>16</volume>
          <issue>Pt 2</issue>
          <fpage>75</fpage>
          <lpage>82</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/24579126"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-642-40763-5_10</pub-id>
          <pub-id pub-id-type="medline">24579126</pub-id>
          <pub-id pub-id-type="pmcid">PMC4165089</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roychowdhury</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Koozekanani</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Parhi</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Iterative Vessel Segmentation of Fundus Images</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2015</year>
          <month>07</month>
          <volume>62</volume>
          <issue>7</issue>
          <fpage>1738</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1109/TBME.2015.2403295</pub-id>
          <pub-id pub-id-type="medline">25700436</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Câmara Neto</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ramalho</surname>
              <given-names>GL</given-names>
            </name>
            <name name-style="western">
              <surname>Rocha Neto</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Veras</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Medeiros</surname>
              <given-names>FN</given-names>
            </name>
          </person-group>
          <article-title>An unsupervised coarse-to-fine algorithm for blood vessel segmentation in fundus images</article-title>
          <source>Expert Systems with Applications</source>
          <year>2017</year>
          <month>07</month>
          <day>15</day>
          <volume>78</volume>
          <fpage>182</fpage>
          <lpage>192</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.sciencedirect.com/science/article/abs/pii/S0957417417300970"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2017.02.015</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kurniawan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nazri</surname>
              <given-names>MZA</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh-Abdullah</surname>
              <given-names>SNH</given-names>
            </name>
            <name name-style="western">
              <surname>Hamzah</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Yendra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Oktaviana</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Automatic Rule Generator via FP-Growth for Eye Diseases Diagnosis</article-title>
          <source>International Journal on Advanced Science, Engineering and Information Technology</source>
          <year>2019</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>960</fpage>
          <lpage>966</lpage>
          <comment>doi = {10.18517/ijaseit.9.3.7025}<ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://ijaseit.insightsociety.org/index.php?option=com_content&#38;view=article&#38;id=9&#38;Itemid=1&#38;article_id=7025"/></comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>An Introduction to Image Synthesis with Generative Adversarial Nets</article-title>
          <source>Computer Vision and Pattern Recognition</source>
          <year>2018</year>
          <month>11</month>
          <day>17</day>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1803.04469"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krizhevsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hinton</surname>
              <given-names>GE</given-names>
            </name>
          </person-group>
          <article-title>ImageNet classification with deep convolutional neural networks</article-title>
          <source>Commun. ACM</source>
          <year>2017</year>
          <month>05</month>
          <day>24</day>
          <volume>60</volume>
          <issue>6</issue>
          <fpage>84</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.1145/3065386"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3065386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2017</year>
          <month>06</month>
          <volume>39</volume>
          <issue>6</issue>
          <fpage>1137</fpage>
          <lpage>1149</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2016.2577031</pub-id>
          <pub-id pub-id-type="medline">27295650</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russakovsky</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Krause</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Satheesh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Karpathy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khosla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bernstein</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Berg</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Fei-Fei</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>ImageNet Large Scale Visual Recognition Challenge</article-title>
          <source>Int J Comput Vis</source>
          <year>2015</year>
          <month>4</month>
          <day>11</day>
          <volume>115</volume>
          <issue>3</issue>
          <fpage>211</fpage>
          <lpage>252</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s11263-015-0816-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11263-015-0816-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szegedy</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sermanet</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Reed</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Anguelov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Erhan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vanhoucke</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Rabinovich</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Going deeper with convolutions</article-title>
          <year>2015</year>
          <month>10</month>
          <day>15</day>
          <conf-name>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>2015</conf-date>
          <conf-loc>Boston, MA, USA</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/7298594"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/CVPR.2015.7298594</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Maaten</surname>
              <given-names>LVD</given-names>
            </name>
            <name name-style="western">
              <surname>Weinberger</surname>
              <given-names>KQ</given-names>
            </name>
          </person-group>
          <article-title>Densely Connected Convolutional Networks</article-title>
          <year>2017</year>
          <month>11</month>
          <day>9</day>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2261</fpage>
          <lpage>2269</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/8099726"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/CVPR.2017.243</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dasgupta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A fully convolutional neural network based structured prediction approach towards the retinal vessel segmentation</article-title>
          <year>2017</year>
          <month>6</month>
          <day>19</day>
          <conf-name>2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI 2017)</conf-name>
          <conf-date>18-21 April 2017</conf-date>
          <conf-loc>Melbourne, VIC, Australia</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>248</fpage>
          <lpage>251</lpage>
          <pub-id pub-id-type="doi">10.1109/isbi.2017.7950512</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maninis</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Pont-Tuset</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Arbeláez</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Van Gool</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Ourselin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Joskowicz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sabuncu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Unal</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wells</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Deep retinal image understanding</article-title>
          <source>Medical Image Computing and Computer-Assisted Intervention – MICCAI 2016. MICCAI 2016. Lecture Notes in Computer Science, vol 9901</source>
          <year>2016</year>
          <month>10</month>
          <day>02</day>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>140</fpage>
          <lpage>148</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xiuqin</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A Fundus Retinal Vessels Segmentation Scheme Based on the Improved Deep Learning U-Net Model</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <month>08</month>
          <day>13</day>
          <volume>7</volume>
          <fpage>122634</fpage>
          <lpage>122643</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/8796382"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2935138</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ojala</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pietikainen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Maenpaa</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Multiresolution gray-scale and rotation invariant texture classification with local binary patterns</article-title>
          <source>IEEE Trans. Pattern Anal. Machine Intell</source>
          <year>2002</year>
          <month>07</month>
          <volume>24</volume>
          <issue>7</issue>
          <fpage>971</fpage>
          <lpage>987</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/1017623/authors#authors"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2002.1017623</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Retinal vessel segmentation based on Generative Adversarial network and Dilated convolution</article-title>
          <year>2019</year>
          <month>09</month>
          <day>23</day>
          <conf-name>2019 14th International Conference on Computer Science Education (ICCSE)</conf-name>
          <conf-date>19-21 August 2019</conf-date>
          <conf-loc>Toronto, ON, Canada</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>282</fpage>
          <lpage>287</lpage>
          <pub-id pub-id-type="doi">10.1109/iccse.2019.8845491</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lahiri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Mondal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Biswas</surname>
              <given-names>PK</given-names>
            </name>
          </person-group>
          <article-title>Retinal vessel segmentation under extreme low annotation: a gan-based semi-supervised approach</article-title>
          <year>2020</year>
          <month>09</month>
          <day>30</day>
          <conf-name>2020 IEEE International Conference on Image Processing (ICIP)</conf-name>
          <conf-date>25-28 Oct. 2020</conf-date>
          <conf-loc>Abu Dhabi, UAE</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>418</fpage>
          <lpage>422</lpage>
          <pub-id pub-id-type="doi">10.1109/icip40778.2020.9190882</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodfellow</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Pouget-Abadie</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mirza</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Warde-Farley</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ozair</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial networks</article-title>
          <source>Commun. ACM</source>
          <year>2020</year>
          <month>10</month>
          <day>22</day>
          <volume>63</volume>
          <issue>11</issue>
          <fpage>139</fpage>
          <lpage>144</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3422622"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3422622</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Isola</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Efros</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Image-to-image translation with conditional adversarial networks</article-title>
          <year>2017</year>
          <month>11</month>
          <day>09</day>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>21-26 July 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>5967</fpage>
          <lpage>5976</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/8100115"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/CVPR.2017.632</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xiang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Retinal image synthesis from multiple-landmarks input with generative adversarial networks</article-title>
          <source>Biomed Eng Online</source>
          <year>2019</year>
          <month>05</month>
          <day>21</day>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>62</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://biomedical-engineering-online.biomedcentral.com/articles/10.1186/s12938-019-0682-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12938-019-0682-x</pub-id>
          <pub-id pub-id-type="medline">31113438</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12938-019-0682-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC6528202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Isola</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Efros</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Unpaired Image-to-Image Translation Using Cycle-Consistent Adversarial Networks</article-title>
          <year>2017</year>
          <month>12</month>
          <day>25</day>
          <conf-name>2017 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>22-29 October 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2242</fpage>
          <lpage>2251</lpage>
          <pub-id pub-id-type="doi">10.1109/ICCV.2017.244</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shrivastava</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pfister</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tuzel</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Susskind</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Webb</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Learning from Simulated and Unsupervised Images through Adversarial Training</article-title>
          <year>2017</year>
          <month>11</month>
          <day>09</day>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>21-26 July 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2242</fpage>
          <lpage>2251</lpage>
          <pub-id pub-id-type="doi">10.1109/CVPR.2017.241</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>U-GAN: generative adversarial networks with U-Net for retinal vessel segmentation</article-title>
          <year>2019</year>
          <month>09</month>
          <day>23</day>
          <conf-name>2019 14th International Conference on Computer Science &#38; Education (ICCSE)</conf-name>
          <conf-date>19-21 Aug. 2019</conf-date>
          <conf-loc>Toronto, ON, Canada</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>642</fpage>
          <lpage>646</lpage>
          <pub-id pub-id-type="doi">10.1109/ICCSE.2019.8845397</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lahiri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ayush</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Biswas</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Mitra</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial learning for reducing manual annotation in semantic segmentation on large scale microscopy images: automated vessel segmentation in retinal fundus image as test case</article-title>
          <year>2017</year>
          <month>08</month>
          <day>24</day>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</conf-name>
          <conf-date>21-26 July 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>794</fpage>
          <lpage>800</lpage>
          <pub-id pub-id-type="doi">10.1109/CVPRW.2017.110</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haoqi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ogawara</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>CGAN-based synthetic medical image augmentation between retinal fundus images and vessel segmented images</article-title>
          <year>2020</year>
          <month>05</month>
          <day>19</day>
          <conf-name>2020 5th International Conference on Control and Robotics Engineering (ICCRE)</conf-name>
          <conf-date>24-26 April 2020</conf-date>
          <conf-loc>Osaka, Japan</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>218</fpage>
          <lpage>223</lpage>
          <pub-id pub-id-type="doi">10.1109/ICCRE49379.2020.9096438</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Asiri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al Adel</surname>
              <given-names>Fadwa</given-names>
            </name>
            <name name-style="western">
              <surname>Alzaidi</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Deep learning based computer-aided diagnosis systems for diabetic retinopathy: A survey</article-title>
          <source>Artif Intell Med</source>
          <year>2019</year>
          <month>08</month>
          <volume>99</volume>
          <fpage>101701</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2019.07.009</pub-id>
          <pub-id pub-id-type="medline">31606116</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(18)30760-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Varadarajan</surname>
              <given-names>AV</given-names>
            </name>
            <name name-style="western">
              <surname>Keane</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Burlina</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Chiang</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Schmetterer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Pasquale</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Bressler</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Abramoff</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in ophthalmology: The technical and clinical considerations</article-title>
          <source>Prog Retin Eye Res</source>
          <year>2019</year>
          <month>09</month>
          <volume>72</volume>
          <fpage>100759</fpage>
          <pub-id pub-id-type="doi">10.1016/j.preteyeres.2019.04.003</pub-id>
          <pub-id pub-id-type="medline">31048019</pub-id>
          <pub-id pub-id-type="pii">S1350-9462(18)30090-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Balyen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Peto</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Promising Artificial Intelligence-Machine Learning-Deep Learning Algorithms in Ophthalmology</article-title>
          <source>Asia Pac J Ophthalmol (Phila)</source>
          <year>2019</year>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>264</fpage>
          <lpage>272</lpage>
          <pub-id pub-id-type="doi">10.22608/APO.2018479</pub-id>
          <pub-id pub-id-type="medline">31149787</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hogarty</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Mackey</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Hewitt</surname>
              <given-names>AW</given-names>
            </name>
          </person-group>
          <article-title>Current state and future prospects of artificial intelligence in ophthalmology: a review</article-title>
          <source>Clin Exp Ophthalmol</source>
          <year>2019</year>
          <month>01</month>
          <volume>47</volume>
          <issue>1</issue>
          <fpage>128</fpage>
          <lpage>139</lpage>
          <pub-id pub-id-type="doi">10.1111/ceo.13381</pub-id>
          <pub-id pub-id-type="medline">30155978</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSJ</given-names>
            </name>
            <name name-style="western">
              <surname>Foo</surname>
              <given-names>VH</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>LWY</given-names>
            </name>
            <name name-style="western">
              <surname>Sia</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Ang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chodosh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mehta</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence for anterior segment diseases: Emerging applications in ophthalmology</article-title>
          <source>Br J Ophthalmol</source>
          <year>2021</year>
          <month>02</month>
          <volume>105</volume>
          <issue>2</issue>
          <fpage>158</fpage>
          <lpage>168</lpage>
          <pub-id pub-id-type="doi">10.1136/bjophthalmol-2019-315651</pub-id>
          <pub-id pub-id-type="medline">32532762</pub-id>
          <pub-id pub-id-type="pii">bjophthalmol-2019-315651</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Optic disc segmentation in fundus images using adversarial training</article-title>
          <source>IET Image Processing</source>
          <year>2019</year>
          <month>02</month>
          <day>1</day>
          <volume>13</volume>
          <issue>2</issue>
          <fpage>375</fpage>
          <lpage>381</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ietresearch.onlinelibrary.wiley.com/doi/10.1049/iet-ipr.2018.5922"/>
          </comment>
          <pub-id pub-id-type="doi">10.1049/iet-ipr.2018.5922</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Diaz-Pinto</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Colomer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Naranjo</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Morales</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>Retinal Image Synthesis for Glaucoma Assessment Using DCGAN and VAE Models</article-title>
          <source>Yin H., Camacho D., Novais P., Tallón-Ballesteros A. (eds) Intelligent Data Engineering and Automated Learning – IDEAL 2018. IDEAL 2018. Lecture Notes in Computer Science</source>
          <year>2018</year>
          <month>11</month>
          <day>09</day>
          <volume>11314</volume>
          <fpage>224</fpage>
          <lpage>232</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/chapter/10.1007%2F978-3-030-03493-1_24"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-030-03493-1_24</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radford</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Metz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks</article-title>
          <source>4th International Conference on Learning Representations, ICLR 2016 - Conference Track Proceedings</source>
          <year>2016</year>
          <month>01</month>
          <day>07</day>
          <fpage>1</fpage>
          <lpage>16</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1511.06434"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arjovsky</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bottou</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Wasserstein Generative Adversarial Networks</article-title>
          <year>2017</year>
          <conf-name>Proceedings of the 34th International Conference on Machine Learning</conf-name>
          <conf-date>6-11 August 2017</conf-date>
          <conf-loc>Sydney, Australia</conf-loc>
          <fpage>214</fpage>
          <lpage>223</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gulrajani</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Arjovsky</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dumoulin</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Improved training of wasserstein GANs</article-title>
          <source>Adv Neural Inf Process Syst Internet Neural information processing systems foundation</source>
          <year>2017</year>
          <month>03</month>
          <day>31</day>
          <fpage>5768</fpage>
          <lpage>5778</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1704.00028"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>RY</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Smolley</surname>
              <given-names>SP</given-names>
            </name>
          </person-group>
          <article-title>Least Squares Generative Adversarial Networks</article-title>
          <year>2017</year>
          <month>12</month>
          <day>25</day>
          <conf-name>2017 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>22-29 October 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2813</fpage>
          <lpage>2821</lpage>
          <pub-id pub-id-type="doi">10.1109/ICCV.2017.304</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Larsen</surname>
              <given-names>ABL</given-names>
            </name>
            <name name-style="western">
              <surname>Sønderby</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Larochelle</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Winther</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Autoencoding beyond pixels using a learned similarity metric</article-title>
          <year>2016</year>
          <conf-name>Proceedings of The 33rd International Conference on Machine Learning</conf-name>
          <conf-date>20-22 June 2016</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <fpage>1558</fpage>
          <lpage>1566</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yas</surname>
              <given-names>QM</given-names>
            </name>
            <name name-style="western">
              <surname>Zaidan</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Zaidan</surname>
              <given-names>BB</given-names>
            </name>
            <name name-style="western">
              <surname>Hashim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>CK</given-names>
            </name>
          </person-group>
          <article-title>A Systematic Review on Smartphone Skin Cancer Apps: Coherent Taxonomy, Motivations, Open Challenges and Recommendations, and New Research Direction</article-title>
          <source>J CIRCUIT SYST COMP</source>
          <year>2018</year>
          <month>02</month>
          <day>06</day>
          <volume>27</volume>
          <issue>05</issue>
          <fpage>1830003</fpage>
          <lpage>40</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.worldscientific.com/doi/abs/10.1142/S0218126618300039"/>
          </comment>
          <pub-id pub-id-type="doi">10.1142/s0218126618300039</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Recent Advance On Generative Adversarial Networks</article-title>
          <year>2018</year>
          <month>11</month>
          <day>12</day>
          <conf-name>2018 International Conference on Machine Learning and Cybernetics (ICMLC)</conf-name>
          <conf-date>15-18 July 2018</conf-date>
          <conf-loc>Chengdu, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>69</fpage>
          <lpage>74</lpage>
          <pub-id pub-id-type="doi">10.1109/ICMLC.2018.8526990</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gonog</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A review: generative adversarial networks</article-title>
          <year>2019</year>
          <month>09</month>
          <day>16</day>
          <conf-name>2019 14th IEEE Conference on Industrial Electronics and Applications (ICIEA)</conf-name>
          <conf-date>19-21 June 2019</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>505</fpage>
          <lpage>510</lpage>
          <pub-id pub-id-type="doi">10.1109/ICIEA.2019.8833686</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ke</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Recent Advances of Image Steganography With Generative Adversarial Networks</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>60575</fpage>
          <lpage>60597</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/9046754"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2983175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Recent Advances of Generative Adversarial Networks in Computer Vision</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>14985</fpage>
          <lpage>15006</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/8576508"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2886814</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A Survey of Generative Adversarial Networks</article-title>
          <year>2019</year>
          <month>01</month>
          <day>24</day>
          <conf-name>2018 Chinese Automation Congress (CAC)</conf-name>
          <conf-date>30 November to 2 December 2018</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2768</fpage>
          <lpage>2773</lpage>
          <pub-id pub-id-type="doi">10.1109/cac.2018.8623645</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dhawan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A Detailed Study on Generative Adversarial Networks</article-title>
          <year>2020</year>
          <month>07</month>
          <day>10</day>
          <conf-name>2020 5th International Conference on Communication and Electronics Systems (ICCES)</conf-name>
          <conf-date>10-12 June 2020</conf-date>
          <conf-loc>Coimbatore, Tamil Nadu, India</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>641</fpage>
          <lpage>645</lpage>
          <pub-id pub-id-type="doi">10.1109/ICCES48766.2020.9137883</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Houthooft</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Schulman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Abbeel</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets</article-title>
          <source>Advances in Neural Information Processing Systems</source>
          <year>2016</year>
          <month>12</month>
          <day>5</day>
          <fpage>2180</fpage>
          <lpage>2188</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1606.03657"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sengupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Leopold</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Gulati</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lakshminarayanan</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Ophthalmic diagnosis using deep learning with fundus images - A critical review</article-title>
          <source>Artif Intell Med</source>
          <year>2020</year>
          <month>01</month>
          <volume>102</volume>
          <fpage>101758</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2019.101758</pub-id>
          <pub-id pub-id-type="medline">31980096</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(19)30585-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Pathology-Aware Deep Network Visualization and Its Application in Glaucoma Image Synthesis</article-title>
          <source>Shen D. et al. (eds) Medical Image Computing and Computer Assisted Intervention – MICCAI 2019. MICCAI 2019. Lecture Notes in Computer Science</source>
          <year>2019</year>
          <month>10</month>
          <day>10</day>
          <volume>11764</volume>
          <fpage>423</fpage>
          <lpage>431</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/978-3-030-32239-7_47"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-030-32239-7_47</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Joint optic disc and cup segmentation using semi-supervised conditional GANs</article-title>
          <source>Comput Biol Med</source>
          <year>2019</year>
          <month>12</month>
          <volume>115</volume>
          <fpage>103485</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2019.103485</pub-id>
          <pub-id pub-id-type="medline">31630029</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(19)30354-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>AF-SEG: An Annotation-Free Approach for Image Segmentation by Self-Supervision and Generative Adversarial Network</article-title>
          <year>2020</year>
          <month>05</month>
          <day>22</day>
          <conf-name>2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI)</conf-name>
          <conf-date>3-7 April 2020</conf-date>
          <conf-loc>Iowa City, IA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1503</fpage>
          <lpage>1507</lpage>
          <pub-id pub-id-type="doi">10.1109/isbi45749.2020.9098535</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Tao</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Fully Automatic Arteriovenous Segmentation in Retinal Images via Topology-Aware Generative Adversarial Networks</article-title>
          <source>Interdiscip Sci</source>
          <year>2020</year>
          <month>09</month>
          <volume>12</volume>
          <issue>3</issue>
          <fpage>323</fpage>
          <lpage>334</lpage>
          <pub-id pub-id-type="doi">10.1007/s12539-020-00385-5</pub-id>
          <pub-id pub-id-type="medline">32725575</pub-id>
          <pub-id pub-id-type="pii">10.1007/s12539-020-00385-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Min</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Conditional Adversarial Transfer for Glaucoma Diagnosis</article-title>
          <source>Annu Int Conf IEEE Eng Med Biol Soc</source>
          <year>2019</year>
          <month>07</month>
          <volume>2019</volume>
          <fpage>2032</fpage>
          <lpage>2035</lpage>
          <pub-id pub-id-type="doi">10.1109/EMBC.2019.8857308</pub-id>
          <pub-id pub-id-type="medline">31946300</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>DRPAN: A novel Adversarial Network Approach for Retinal Vessel Segmentation</article-title>
          <year>2019</year>
          <month>09</month>
          <day>16</day>
          <conf-name>2019 14th IEEE Conference on Industrial Electronics and Applications (ICIEA)</conf-name>
          <conf-date>19-21 June 2019</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>228</fpage>
          <lpage>232</lpage>
          <pub-id pub-id-type="doi">10.1109/ICIEA.2019.8833908</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kadambi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>WGAN domain adaptation for the joint optic disc-and-cup segmentation in fundus images</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2020</year>
          <month>07</month>
          <volume>15</volume>
          <issue>7</issue>
          <fpage>1205</fpage>
          <lpage>1213</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-020-02144-9</pub-id>
          <pub-id pub-id-type="medline">32445127</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-020-02144-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Son</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Towards Accurate Segmentation of Retinal Vessels and the Optic Disc in Fundoscopic Images with Generative Adversarial Networks</article-title>
          <source>J Digit Imaging</source>
          <year>2019</year>
          <month>06</month>
          <volume>32</volume>
          <issue>3</issue>
          <fpage>499</fpage>
          <lpage>512</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30291477"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10278-018-0126-3</pub-id>
          <pub-id pub-id-type="medline">30291477</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10278-018-0126-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC6499859</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bisneto</surname>
              <given-names>TRV</given-names>
            </name>
            <name name-style="western">
              <surname>de Carvalho Filho</surname>
              <given-names>AO</given-names>
            </name>
            <name name-style="western">
              <surname>Magalhães</surname>
              <given-names>DMV</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial network and texture features applied to automatic glaucoma detection</article-title>
          <source>Applied Soft Computing</source>
          <year>2020</year>
          <month>05</month>
          <volume>90</volume>
          <fpage>106165</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.asoc.2020.106165"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.asoc.2020.106165</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Qiu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>High-quality retinal vessel segmentation using generative adversarial network with a large receptive field</article-title>
          <source>Int J Imaging Syst Technol</source>
          <year>2020</year>
          <month>04</month>
          <day>09</day>
          <volume>30</volume>
          <issue>3</issue>
          <fpage>828</fpage>
          <lpage>842</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1002/ima.22428"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/ima.22428</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A Novel Adaptive Weighted Loss Design in Adversarial Learning for Retinal Nerve Fiber Layer Defect Segmentation</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>132348</fpage>
          <lpage>132359</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/9141229"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3009442</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rammy</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Abbas</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Raza</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>CPGAN: Conditional patch‐based generative adversarial network for retinal vesselsegmentation</article-title>
          <source>IET image process</source>
          <year>2020</year>
          <month>04</month>
          <day>10</day>
          <volume>14</volume>
          <issue>6</issue>
          <fpage>1081</fpage>
          <lpage>1090</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1049/iet-ipr.2019.1007"/>
          </comment>
          <pub-id pub-id-type="doi">10.1049/iet-ipr.2019.1007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Deep supervision adversarial learning network for retinal vessel segmentation</article-title>
          <year>2020</year>
          <month>01</month>
          <day>23</day>
          <conf-name>2019 12th International Congress on Image and Signal Processing, BioMedical Engineering and Informatics (CISP-BMEI)</conf-name>
          <conf-date>19-21 October 2019</conf-date>
          <conf-loc>Suzhou, China</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/CISP-BMEI48845.2019.8965924</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Optic Disc and Cup Segmentation Based on Deep Convolutional Generative Adversarial Networks</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <month>05</month>
          <day>17</day>
          <volume>7</volume>
          <fpage>64483</fpage>
          <lpage>64493</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/ACCESS.2019.2917508"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2917508</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Costa</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Galdran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>MI</given-names>
            </name>
            <name name-style="western">
              <surname>Niemeijer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abramoff</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mendonca</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Campilho</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>End-to-End Adversarial Retinal Image Synthesis</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2018</year>
          <month>03</month>
          <volume>37</volume>
          <issue>3</issue>
          <fpage>781</fpage>
          <lpage>791</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/tmi.2017.2759102"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TMI.2017.2759102</pub-id>
          <pub-id pub-id-type="medline">28981409</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Generative Adversarial Network for Medical Images (MI-GAN)</article-title>
          <source>J Med Syst</source>
          <year>2018</year>
          <month>10</month>
          <day>12</day>
          <volume>42</volume>
          <issue>11</issue>
          <fpage>231</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s10916-018-1072-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10916-018-1072-9</pub-id>
          <pub-id pub-id-type="medline">30315368</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-018-1072-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Heng</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Patch-Based Output Space Adversarial Learning for Joint Optic Disc and Cup Segmentation</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2019</year>
          <month>11</month>
          <volume>38</volume>
          <issue>11</issue>
          <fpage>2485</fpage>
          <lpage>2495</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/tmi.2019.2899910"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TMI.2019.2899910</pub-id>
          <pub-id pub-id-type="medline">30794170</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Diaz-Pinto</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Colomer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Naranjo</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Morales</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>Retinal Image Synthesis and Semi-Supervised Learning for Glaucoma Assessment</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2019</year>
          <month>09</month>
          <volume>38</volume>
          <issue>9</issue>
          <fpage>2211</fpage>
          <lpage>2218</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/tmi.2019.2903434"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TMI.2019.2903434</pub-id>
          <pub-id pub-id-type="medline">30843823</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Fundus image segmentation based on improved generative adversarial network for retinal vessel analysis</article-title>
          <year>2020</year>
          <month>07</month>
          <day>09</day>
          <conf-name>2020 3rd International Conference on Artificial Intelligence and Big Data (ICAIBD)</conf-name>
          <conf-date>28-31 May 2020</conf-date>
          <conf-loc>Chengdu, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>231</fpage>
          <lpage>236</lpage>
          <pub-id pub-id-type="doi">10.1109/icaibd49809.2020.9137459</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Biswas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rohdin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Drahansky</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Synthetic Retinal Images from Unconditional GANs</article-title>
          <source>Annu Int Conf IEEE Eng Med Biol Soc</source>
          <year>2019</year>
          <month>07</month>
          <volume>2019</volume>
          <fpage>2736</fpage>
          <lpage>2739</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/embc.2019.8857857"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/EMBC.2019.8857857</pub-id>
          <pub-id pub-id-type="medline">31946460</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Maurer-Stroh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Synthesizing retinal and neuronal images with generative adversarial nets</article-title>
          <source>Med Image Anal</source>
          <year>2018</year>
          <month>10</month>
          <volume>49</volume>
          <fpage>14</fpage>
          <lpage>26</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.media.2018.07.001"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2018.07.001</pub-id>
          <pub-id pub-id-type="medline">30007254</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(18)30459-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JY</given-names>
            </name>
          </person-group>
          <article-title>M-GAN: Retinal Blood Vessel Segmentation by Balancing Losses Through Stacked Deep Fully Convolutional Networks</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>146308</fpage>
          <lpage>146322</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/ACCESS.2020.3015108"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3015108</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yi</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Walia</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Babyn</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial network in medical imaging: A review</article-title>
          <source>Med Image Anal</source>
          <year>2019</year>
          <month>12</month>
          <volume>58</volume>
          <fpage>101552</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.media.2019.101552"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2019.101552</pub-id>
          <pub-id pub-id-type="medline">31521965</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(18)30843-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial networks: introduction and outlook</article-title>
          <source>IEEE/CAA Journal of Automatica Sinica</source>
          <year>2017</year>
          <volume>4</volume>
          <issue>4</issue>
          <fpage>588</fpage>
          <lpage>598</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/JAS.2017.7510583"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/JAS.2017.7510583</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>A survey of image synthesis and editing with generative adversarial networks</article-title>
          <source>Tsinghua Science and Technology</source>
          <year>2017</year>
          <month>12</month>
          <volume>22</volume>
          <issue>6</issue>
          <fpage>660</fpage>
          <lpage>674</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.23919/TST.2017.8195348"/>
          </comment>
          <pub-id pub-id-type="doi">10.23919/TST.2017.8195348</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Review and Prospect of Research on Generative Adversarial Networks</article-title>
          <year>2019</year>
          <month>11</month>
          <day>21</day>
          <conf-name>2019 IEEE 11th International Conference on Communication Software and Networks (ICCSN)</conf-name>
          <conf-date>12-15 June 2019</conf-date>
          <conf-loc>Chongqing, China</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>726</fpage>
          <lpage>730</lpage>
          <pub-id pub-id-type="doi">10.1109/iccsn.2019.8905263</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref99">
        <label>99</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Creswell</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Dumoulin</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Arulkumaran</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sengupta</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bharath</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Generative Adversarial Networks: An Overview</article-title>
          <source>IEEE Signal Processing Magazine</source>
          <year>2018</year>
          <month>1</month>
          <volume>35</volume>
          <issue>1</issue>
          <fpage>53</fpage>
          <lpage>65</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/MSP.2017.2765202"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/MSP.2017.2765202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref100">
        <label>100</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lucic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kurach</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Michalski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gelly</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bousquet</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Are GANs Created Equal? A Large-Scale Study</article-title>
          <year>2018</year>
          <month>10</month>
          <day>29</day>
          <conf-name>NIPS'18: Proceedings of the 32nd International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 3-8, 2018</conf-date>
          <conf-loc>Montréal, QC, Canada</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>698</fpage>
          <lpage>707</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref101">
        <label>101</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kurach</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lucic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhai</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Michalski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gelly</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The GAN Landscape: Losses, Architectures, Regularization, and Normalization</article-title>
          <year>2018</year>
          <month>09</month>
          <day>28</day>
          <conf-name>ICLR 2019 Conference</conf-name>
          <conf-date>May 6-9, 2019</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openreview.net/forum?id=rkGG6s0qKQ"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref102">
        <label>102</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turhan</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Bilge</surname>
              <given-names>HS</given-names>
            </name>
          </person-group>
          <article-title>Recent trends in deep generative models: a review</article-title>
          <year>2018</year>
          <month>12</month>
          <day>10</day>
          <conf-name>2018 3rd International Conference on Computer Science and Engineering (UBMK)</conf-name>
          <conf-date>20-23 September 2018</conf-date>
          <conf-loc>Sarajevo, Bosnia and Herzegovina</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>574</fpage>
          <lpage>579</lpage>
          <pub-id pub-id-type="doi">10.1109/UBMK.2018.8566353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref103">
        <label>103</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oussidi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Elhassouny</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep generative models: survey</article-title>
          <year>2018</year>
          <month>05</month>
          <day>07</day>
          <conf-name>2018 International Conference on Intelligent Systems and Computer Vision (ISCV)</conf-name>
          <conf-date>2-4 April 2018</conf-date>
          <conf-loc>Fez, Morocco</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/ISACV.2018.8354080</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref104">
        <label>104</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grewal</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Oloumi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Rubin</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Tennant</surname>
              <given-names>MTS</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in ophthalmology: a review</article-title>
          <source>Can J Ophthalmol</source>
          <year>2018</year>
          <month>08</month>
          <volume>53</volume>
          <issue>4</issue>
          <fpage>309</fpage>
          <lpage>313</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.jcjo.2018.04.019"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jcjo.2018.04.019</pub-id>
          <pub-id pub-id-type="medline">30119782</pub-id>
          <pub-id pub-id-type="pii">S0008-4182(18)30214-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref105">
        <label>105</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Application of artificial intelligence in anterior segment ophthalmic diseases: diversity and standardization</article-title>
          <source>Ann Transl Med</source>
          <year>2020</year>
          <month>06</month>
          <volume>8</volume>
          <issue>11</issue>
          <fpage>714</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.21037/atm-20-976"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/atm-20-976</pub-id>
          <pub-id pub-id-type="medline">32617334</pub-id>
          <pub-id pub-id-type="pii">atm-08-11-714</pub-id>
          <pub-id pub-id-type="pmcid">PMC7327317</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref106">
        <label>106</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>NYQ</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Stalmans</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>IIK</given-names>
            </name>
            <name name-style="western">
              <surname>Sng</surname>
              <given-names>CCA</given-names>
            </name>
          </person-group>
          <article-title>Glaucoma screening: where are we and where do we need to go?</article-title>
          <source>Curr Opin Ophthalmol</source>
          <year>2020</year>
          <month>03</month>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>91</fpage>
          <lpage>100</lpage>
          <pub-id pub-id-type="doi">10.1097/ICU.0000000000000649</pub-id>
          <pub-id pub-id-type="medline">31904596</pub-id>
          <pub-id pub-id-type="pii">00055735-202003000-00004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref107">
        <label>107</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mayro</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Elze</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pasquale</surname>
              <given-names>LR</given-names>
            </name>
          </person-group>
          <article-title>The impact of artificial intelligence in the diagnosis and management of glaucoma</article-title>
          <source>Eye (Lond)</source>
          <year>2020</year>
          <month>01</month>
          <volume>34</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31541215"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41433-019-0577-x</pub-id>
          <pub-id pub-id-type="medline">31541215</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41433-019-0577-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC7002653</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref108">
        <label>108</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Poly</surname>
              <given-names>TN</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Atique</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>YJ</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning for Accurate Diagnosis of Glaucomatous Optic Neuropathy Using Digital Fundus Image: A Meta-Analysis</article-title>
          <source>Stud Health Technol Inform</source>
          <year>2020</year>
          <month>06</month>
          <day>16</day>
          <volume>270</volume>
          <fpage>153</fpage>
          <lpage>157</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3233/shti200141"/>
          </comment>
          <pub-id pub-id-type="doi">10.3233/SHTI200141</pub-id>
          <pub-id pub-id-type="medline">32570365</pub-id>
          <pub-id pub-id-type="pii">SHTI200141</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref109">
        <label>109</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mustapha</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Retinal Nerve Fiber Layer Thickness Post-Laser Treatment in Diabetic Retinopathy: Argon versus Pattern Scanning Laser</article-title>
          <source>Medicine &#38; Health</source>
          <year>2016</year>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>199</fpage>
          <lpage>208</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.medicineandhealthukm.com/article/retinal-nerve-fiber-layer-thickness-post-laser-treatment-diabetic-retinopathy-argon-versus-p"/>
          </comment>
          <pub-id pub-id-type="doi">10.17576/mh.2016.1102.09</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref110">
        <label>110</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Varma</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Goldberg</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kotak</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>An assessment of the health and economic burdens of glaucoma</article-title>
          <source>Am J Ophthalmol</source>
          <year>2011</year>
          <month>10</month>
          <volume>152</volume>
          <issue>4</issue>
          <fpage>515</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21961848"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ajo.2011.06.004</pub-id>
          <pub-id pub-id-type="medline">21961848</pub-id>
          <pub-id pub-id-type="pii">S0002-9394(11)00475-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC3206636</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref111">
        <label>111</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shibata</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tanito</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mitsuhashi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Fujino</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Matsuura</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Murata</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Asaoka</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Development of a deep residual learning algorithm to screen for glaucoma from fundus photography</article-title>
          <source>Sci Rep</source>
          <year>2018</year>
          <month>10</month>
          <day>02</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>14665</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-018-33013-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-018-33013-w</pub-id>
          <pub-id pub-id-type="medline">30279554</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-018-33013-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC6168579</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref112">
        <label>112</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
            <name name-style="western">
              <surname>Pasquale</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>AY</given-names>
            </name>
            <name name-style="western">
              <surname>Raman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>GSW</given-names>
            </name>
            <name name-style="western">
              <surname>Schmetterer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Keane</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and deep learning in ophthalmology</article-title>
          <source>Br J Ophthalmol</source>
          <year>2019</year>
          <month>02</month>
          <volume>103</volume>
          <issue>2</issue>
          <fpage>167</fpage>
          <lpage>175</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://bjo.bmj.com/lookup/pmidlookup?view=long&#38;pmid=30361278"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bjophthalmol-2018-313173</pub-id>
          <pub-id pub-id-type="medline">30361278</pub-id>
          <pub-id pub-id-type="pii">bjophthalmol-2018-313173</pub-id>
          <pub-id pub-id-type="pmcid">PMC6362807</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref113">
        <label>113</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cvenkel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Velkovska</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Self-monitoring of intraocular pressure using Icare HOME tonometry in clinical practice</article-title>
          <source>Clin Ophthalmol</source>
          <year>2019</year>
          <month>05</month>
          <day>10</day>
          <volume>13</volume>
          <fpage>841</fpage>
          <lpage>847</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.doi.org/10.2147/OPTH.S198846"/>
          </comment>
          <pub-id pub-id-type="doi">10.2147/OPTH.S198846</pub-id>
          <pub-id pub-id-type="medline">31190727</pub-id>
          <pub-id pub-id-type="pii">198846</pub-id>
          <pub-id pub-id-type="pmcid">PMC6520593</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref114">
        <label>114</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zapata</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Royo-Fibla</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Font</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Vela</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Marcantonio</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Moya-Sánchez</surname>
              <given-names>EU</given-names>
            </name>
            <name name-style="western">
              <surname>Sánchez-Pérez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia-Gasulla</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cortés</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Ayguadé</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Labarta</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Artificial Intelligence to Identify Retinal Fundus Images, Quality Validation, Laterality Evaluation, Macular Degeneration, and Suspected Glaucoma</article-title>
          <source>Clin Ophthalmol</source>
          <year>2020</year>
          <month>02</month>
          <day>13</day>
          <volume>14</volume>
          <fpage>419</fpage>
          <lpage>429</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.doi.org/10.2147/OPTH.S235751"/>
          </comment>
          <pub-id pub-id-type="doi">10.2147/OPTH.S235751</pub-id>
          <pub-id pub-id-type="medline">32103888</pub-id>
          <pub-id pub-id-type="pii">235751</pub-id>
          <pub-id pub-id-type="pmcid">PMC7025650</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref115">
        <label>115</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zou</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Weakly-Supervised Simultaneous Evidence Identification and Segmentation for Automated Glaucoma Diagnosis</article-title>
          <source>AAAI</source>
          <year>2019</year>
          <month>07</month>
          <day>17</day>
          <volume>33</volume>
          <issue>1</issue>
          <fpage>809</fpage>
          <lpage>816</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1609/aaai.v33i01.3301809"/>
          </comment>
          <pub-id pub-id-type="doi">10.1609/aaai.v33i01.3301809</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref116">
        <label>116</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Direct Cup-to-Disc Ratio Estimation for Glaucoma Screening via Semi-Supervised Learning</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2020</year>
          <month>04</month>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>1104</fpage>
          <lpage>1113</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2019.2934477</pub-id>
          <pub-id pub-id-type="medline">31403451</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref117">
        <label>117</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A new convolutional neural network model for peripapillary atrophy area segmentation from retinal fundus images</article-title>
          <source>Applied Soft Computing</source>
          <year>2020</year>
          <month>01</month>
          <volume>86</volume>
          <fpage>105890</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.asoc.2019.105890"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.asoc.2019.105890</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref118">
        <label>118</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Glaucoma diagnosis based on both hidden features and domain knowledge through deep learning models</article-title>
          <source>Knowledge-Based Systems</source>
          <year>2018</year>
          <month>12</month>
          <day>1</day>
          <volume>161</volume>
          <fpage>147</fpage>
          <lpage>156</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.knosys.2018.07.043"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.knosys.2018.07.043</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref119">
        <label>119</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Keel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>RT</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Efficacy of a Deep Learning System for Detecting Glaucomatous Optic Neuropathy Based on Color Fundus Photographs</article-title>
          <source>Ophthalmology</source>
          <year>2018</year>
          <month>08</month>
          <volume>125</volume>
          <issue>8</issue>
          <fpage>1199</fpage>
          <lpage>1206</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2018.01.023</pub-id>
          <pub-id pub-id-type="medline">29506863</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(17)33565-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref120">
        <label>120</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Sung</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Efficacy for Differentiating Nonglaucomatous Versus Glaucomatous Optic Neuropathy Using Deep Learning Systems</article-title>
          <source>Am J Ophthalmol</source>
          <year>2020</year>
          <month>08</month>
          <volume>216</volume>
          <fpage>140</fpage>
          <lpage>146</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ajo.2020.03.035</pub-id>
          <pub-id pub-id-type="medline">32247778</pub-id>
          <pub-id pub-id-type="pii">S0002-9394(20)30146-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref121">
        <label>121</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Phene</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dunn</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Hammel</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Krause</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kitade</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Schaekermann</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sayres</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bora</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Semturs</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Spitze</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Medeiros</surname>
              <given-names>FA</given-names>
            </name>
            <name name-style="western">
              <surname>Maa</surname>
              <given-names>AY</given-names>
            </name>
            <name name-style="western">
              <surname>Gandhi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning and Glaucoma Specialists: The Relative Importance of Optic Disc Features to Predict Glaucoma Referral in Fundus Photographs</article-title>
          <source>Ophthalmology</source>
          <year>2019</year>
          <month>12</month>
          <volume>126</volume>
          <issue>12</issue>
          <fpage>1627</fpage>
          <lpage>1639</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0161-6420(19)31875-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2019.07.024</pub-id>
          <pub-id pub-id-type="medline">31561879</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(19)31875-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref122">
        <label>122</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blumberg</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>De Moraes</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Liebmann</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Garg</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Theventhiran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hood</surname>
              <given-names>DC</given-names>
            </name>
          </person-group>
          <article-title>Technology and the Glaucoma Suspect</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2016</year>
          <month>07</month>
          <day>01</day>
          <volume>57</volume>
          <issue>9</issue>
          <fpage>OCT80</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27409509"/>
          </comment>
          <pub-id pub-id-type="doi">10.1167/iovs.15-18931</pub-id>
          <pub-id pub-id-type="medline">27409509</pub-id>
          <pub-id pub-id-type="pii">2534093</pub-id>
          <pub-id pub-id-type="pmcid">PMC5995486</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref123">
        <label>123</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bhatkalkar</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Prabhu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bhandary</surname>
              <given-names>SV</given-names>
            </name>
          </person-group>
          <article-title>Improving the Performance of Convolutional Neural Network for the Segmentation of Optic Disc in Fundus Images Using Attention Gates and Conditional Random Fields</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <month>02</month>
          <day>07</day>
          <volume>8</volume>
          <fpage>29299</fpage>
          <lpage>29310</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/ACCESS.2020.2972318"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2972318</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref124">
        <label>124</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Graham</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Schulz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kalloniatis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zangerl</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chua</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Arvind</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Grigg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Klistorner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>You</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A Deep Learning-Based Algorithm Identifies Glaucomatous Discs Using Monoscopic Fundus Photographs</article-title>
          <source>Ophthalmol Glaucoma</source>
          <year>2018</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>15</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ogla.2018.04.002</pub-id>
          <pub-id pub-id-type="medline">32672627</pub-id>
          <pub-id pub-id-type="pii">S2589-4196(18)30012-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref125">
        <label>125</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Christopher</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Belghith</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bowd</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Proudfoot</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Goldbaum</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Weinreb</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Girkin</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Liebmann</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Zangwill</surname>
              <given-names>LM</given-names>
            </name>
          </person-group>
          <article-title>Performance of Deep Learning Architectures and Transfer Learning for Detecting Glaucomatous Optic Neuropathy in Fundus Photographs</article-title>
          <source>Sci Rep</source>
          <year>2018</year>
          <month>11</month>
          <day>12</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>16685</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-018-35044-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-018-35044-9</pub-id>
          <pub-id pub-id-type="medline">30420630</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-018-35044-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6232132</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref126">
        <label>126</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>CE-Net: Context Encoder Network for 2D Medical Image Segmentation</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2019</year>
          <month>10</month>
          <volume>38</volume>
          <issue>10</issue>
          <fpage>2281</fpage>
          <lpage>2292</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2019.2903562</pub-id>
          <pub-id pub-id-type="medline">30843824</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref127">
        <label>127</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Serener</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Serte</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Transfer Learning for Early and Advanced Glaucoma Detection with Convolutional Neural Networks</article-title>
          <year>2019</year>
          <month>11</month>
          <day>11</day>
          <conf-name>2019 Medical Technologies Congress (TIPTEKNO)</conf-name>
          <conf-date>3-5 October 2019</conf-date>
          <conf-loc>Izmir, Turkey</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1109/TIPTEKNO.2019.8894965</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref128">
        <label>128</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based automated detection of glaucomatous optic neuropathy on color fundus photographs</article-title>
          <source>Graefes Arch Clin Exp Ophthalmol</source>
          <year>2020</year>
          <month>04</month>
          <volume>258</volume>
          <issue>4</issue>
          <fpage>851</fpage>
          <lpage>867</lpage>
          <pub-id pub-id-type="doi">10.1007/s00417-020-04609-8</pub-id>
          <pub-id pub-id-type="medline">31989285</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00417-020-04609-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref129">
        <label>129</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>DWK</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Disc-Aware Ensemble Network for Glaucoma Screening From Fundus Image</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2018</year>
          <month>11</month>
          <volume>37</volume>
          <issue>11</issue>
          <fpage>2493</fpage>
          <lpage>2501</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2018.2837012</pub-id>
          <pub-id pub-id-type="medline">29994764</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref130">
        <label>130</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mustapha</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Retinal Nerve Fibre Layer Thickness Changes after Pan-Retinal Photocoagulation in Diabetic Retinopathy</article-title>
          <source>JSA</source>
          <year>2016</year>
          <month>6</month>
          <day>3</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>4</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://journalarticle.ukm.my/9828/"/>
          </comment>
          <pub-id pub-id-type="doi">10.17845/jsa.2016.0601.02</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref131">
        <label>131</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russakoff</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Mannil</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Oakley</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Ran</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Dasari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Riyazzuddin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nagaraj</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>RT</given-names>
            </name>
          </person-group>
          <article-title>A 3D Deep Learning System for Detecting Referable Glaucoma Using Full OCT Macular Cube Scans</article-title>
          <source>Transl Vis Sci Technol</source>
          <year>2020</year>
          <month>02</month>
          <day>18</day>
          <volume>9</volume>
          <issue>2</issue>
          <fpage>12</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tvst.arvojournals.org/article.aspx?doi=10.1167/tvst.9.2.12"/>
          </comment>
          <pub-id pub-id-type="doi">10.1167/tvst.9.2.12</pub-id>
          <pub-id pub-id-type="medline">32704418</pub-id>
          <pub-id pub-id-type="pii">TVST-19-2000</pub-id>
          <pub-id pub-id-type="pmcid">PMC7347026</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref132">
        <label>132</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Orlando</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Barbosa Breda</surname>
              <given-names>João</given-names>
            </name>
            <name name-style="western">
              <surname>van Keer</surname>
              <given-names>Karel</given-names>
            </name>
            <name name-style="western">
              <surname>Bathula</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Diaz-Pinto</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Heng</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Murugesan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Naranjo</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Phaye</surname>
              <given-names>SSR</given-names>
            </name>
            <name name-style="western">
              <surname>Shankaranarayana</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Sikka</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Son</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>van den Hengel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bogunović</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>REFUGE Challenge: A unified framework for evaluating automated methods for glaucoma assessment from fundus photographs</article-title>
          <source>Med Image Anal</source>
          <year>2020</year>
          <month>01</month>
          <volume>59</volume>
          <fpage>101570</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2019.101570</pub-id>
          <pub-id pub-id-type="medline">31630011</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(19)30110-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref133">
        <label>133</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xiong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>An approach to locate optic disc in retinal images with pathological changes</article-title>
          <source>Comput Med Imaging Graph</source>
          <year>2016</year>
          <month>01</month>
          <volume>47</volume>
          <fpage>40</fpage>
          <lpage>50</lpage>
          <pub-id pub-id-type="doi">10.1016/j.compmedimag.2015.10.003</pub-id>
          <pub-id pub-id-type="medline">26650403</pub-id>
          <pub-id pub-id-type="pii">S0895-6111(15)00144-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref134">
        <label>134</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Qu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>DSC</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Intelligent Glaucoma Diagnosis Via Active Learning And Adversarial Data Augmentation</article-title>
          <source>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</source>
          <year>2019</year>
          <month>07</month>
          <day>11</day>
          <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name>
          <conf-date>8-11 April 2019</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1234</fpage>
          <lpage>1237</lpage>
          <pub-id pub-id-type="doi">10.1109/ISBI.2019.8759178</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref135">
        <label>135</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Multi-path convolutional neural network in fundus segmentation of blood vessels</article-title>
          <source>Biocybernetics and Biomedical Engineering</source>
          <year>2020</year>
          <month>04</month>
          <volume>40</volume>
          <issue>2</issue>
          <fpage>583</fpage>
          <lpage>595</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.bbe.2020.01.011"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.bbe.2020.01.011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref136">
        <label>136</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wormstone</surname>
              <given-names>IM</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mou</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zangwill</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Moghimi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Bowd</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tham</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Weinreb</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Development and Validation of a Deep Learning System to Detect Glaucomatous Optic Neuropathy Using Fundus Photographs</article-title>
          <source>JAMA Ophthalmol</source>
          <year>2019</year>
          <month>12</month>
          <day>01</day>
          <volume>137</volume>
          <issue>12</issue>
          <fpage>1353</fpage>
          <lpage>1360</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31513266"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamaophthalmol.2019.3501</pub-id>
          <pub-id pub-id-type="medline">31513266</pub-id>
          <pub-id pub-id-type="pii">2749330</pub-id>
          <pub-id pub-id-type="pmcid">PMC6743057</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref137">
        <label>137</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>A Large-Scale Database and a CNN Model for Attention-Based Glaucoma Detection</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2020</year>
          <month>02</month>
          <volume>39</volume>
          <issue>2</issue>
          <fpage>413</fpage>
          <lpage>424</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2019.2927226</pub-id>
          <pub-id pub-id-type="medline">31283476</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref138">
        <label>138</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dengpan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shunzhi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shiyu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>ChangRui</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Faster and transferable deep learning steganalysis on GPU</article-title>
          <source>J Real-Time Image Proc</source>
          <year>2019</year>
          <month>4</month>
          <day>1</day>
          <volume>16</volume>
          <issue>3</issue>
          <fpage>623</fpage>
          <lpage>633</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s11554-019-00870-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11554-019-00870-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref139">
        <label>139</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
            <collab>editors</collab>
          </person-group>
          <article-title>Deep Residual Learning for Image Recognition</article-title>
          <year>2016</year>
          <month>12</month>
          <day>12</day>
          <conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>27-30 June 2016</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>770</fpage>
          <lpage>778</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.90</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref140">
        <label>140</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sohail</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zahoora</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Qureshi</surname>
              <given-names>AS</given-names>
            </name>
          </person-group>
          <article-title>A survey of the recent architectures of deep convolutional neural networks</article-title>
          <source>Artif Intell Rev</source>
          <year>2020</year>
          <month>04</month>
          <day>21</day>
          <volume>53</volume>
          <issue>8</issue>
          <fpage>5455</fpage>
          <lpage>5516</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s10462-020-09825-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10462-020-09825-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref141">
        <label>141</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Loy</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Image Super-Resolution Using Deep Convolutional Networks</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2016</year>
          <month>02</month>
          <volume>38</volume>
          <issue>2</issue>
          <fpage>295</fpage>
          <lpage>307</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2015.2439281</pub-id>
          <pub-id pub-id-type="medline">26761735</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref142">
        <label>142</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ratliff</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Burden</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Sastry</surname>
              <given-names>SS</given-names>
            </name>
          </person-group>
          <article-title>On the Characterization of Local Nash Equilibria in Continuous Games</article-title>
          <source>IEEE Trans. Automat. Contr</source>
          <year>2016</year>
          <month>8</month>
          <volume>61</volume>
          <issue>8</issue>
          <fpage>2301</fpage>
          <lpage>2307</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/TAC.2016.2583518"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TAC.2016.2583518</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref143">
        <label>143</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heusel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ramsauer</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Unterthiner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nessler</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hochreiter</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>GANs trained by a two time-scale update rule converge to a local nash equilibrium</article-title>
          <year>2017</year>
          <month>12</month>
          <day>04</day>
          <conf-name>NIPS'17: Proceedings of the 31st International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>4 December 2017</conf-date>
          <conf-loc>Long Beach, CA</conf-loc>
          <publisher-loc>Red Hook, NY</publisher-loc>
          <publisher-name>Curran Associates Inc</publisher-name>
          <fpage>6629</fpage>
          <lpage>6640</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref144">
        <label>144</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ganin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lempitsky</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised Domain Adaptation by Backpropagation</article-title>
          <year>2015</year>
          <conf-name>Proceedings of the 32nd International Conference on Machine Learning</conf-name>
          <conf-date>07-09 Jul 2015</conf-date>
          <conf-loc>Lille, France</conf-loc>
          <fpage>1180</fpage>
          <lpage>1189</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref145">
        <label>145</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tzeng</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Saenko</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Darrell</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Adversarial Discriminative Domain Adaptation</article-title>
          <year>2017</year>
          <month>11</month>
          <day>09</day>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>21-26 July 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2962</fpage>
          <lpage>2971</lpage>
          <pub-id pub-id-type="doi">10.1109/CVPR.2017.316</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref146">
        <label>146</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mahayuddin</surname>
              <given-names>ZR</given-names>
            </name>
            <name name-style="western">
              <surname>Saifuddin Saif</surname>
              <given-names>AFM</given-names>
            </name>
          </person-group>
          <article-title>A Comprehensive Review Towards Segmentation and Detection of Cancer Cell and Tumor for Dynamic 3D Reconstruction</article-title>
          <source>APJITM</source>
          <year>2020</year>
          <month>06</month>
          <day>15</day>
          <volume>09</volume>
          <issue>01</issue>
          <fpage>28</fpage>
          <lpage>39</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ukm.my/apjitm/view.php?id=184"/>
          </comment>
          <pub-id pub-id-type="doi">10.17576/apjitm-2020-0901-03</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref147">
        <label>147</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luc</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Couprie</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Verbeek</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Semantic segmentation using adversarial networks</article-title>
          <year>2016</year>
          <conf-name>NIPS 2016: The Thirtieth Annual Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 4-9, 2016</conf-date>
          <conf-loc>Barcelona, Spain</conf-loc>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://research.fb.com/wp-content/uploads/2016/11/luc16wat.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref148">
        <label>148</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>GSW</given-names>
            </name>
            <name name-style="western">
              <surname>Quang</surname>
              <given-names>ND</given-names>
            </name>
            <name name-style="western">
              <surname>Gan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hamzah</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia-Franco</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>San Yeo</surname>
              <given-names>IY</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>EYM</given-names>
            </name>
            <name name-style="western">
              <surname>Sabanayagam</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Baskaran</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ibrahim</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>NC</given-names>
            </name>
            <name name-style="western">
              <surname>Finkelstein</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Lamoureux</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>IY</given-names>
            </name>
            <name name-style="western">
              <surname>Bressler</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Sivaprasad</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Varma</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jonas</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>GCM</given-names>
            </name>
            <name name-style="western">
              <surname>Aung</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hsu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>Development and Validation of a Deep Learning System for Diabetic Retinopathy and Related Eye Diseases Using Retinal Images From Multiethnic Populations With Diabetes</article-title>
          <source>JAMA</source>
          <year>2017</year>
          <month>12</month>
          <day>12</day>
          <volume>318</volume>
          <issue>22</issue>
          <fpage>2211</fpage>
          <lpage>2223</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29234807"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jama.2017.18152</pub-id>
          <pub-id pub-id-type="medline">29234807</pub-id>
          <pub-id pub-id-type="pii">2665775</pub-id>
          <pub-id pub-id-type="pmcid">PMC5820739</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref149">
        <label>149</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kahng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Thorat</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Chau</surname>
              <given-names>DHP</given-names>
            </name>
            <name name-style="western">
              <surname>Viegas</surname>
              <given-names>FB</given-names>
            </name>
            <name name-style="western">
              <surname>Wattenberg</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>GAN Lab: Understanding Complex Deep Generative Models using Interactive Visual Experimentation</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2018</year>
          <month>08</month>
          <day>20</day>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>310</fpage>
          <lpage>320</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/tvcg.2018.2864500"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TVCG.2018.2864500</pub-id>
          <pub-id pub-id-type="medline">30130198</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref150">
        <label>150</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taha</surname>
              <given-names>BA</given-names>
            </name>
            <name name-style="western">
              <surname>Al Mashhadany</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hafiz Mokhtar</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Dzulkefly Bin Zan</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Arsad</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>An Analysis Review of Detection Coronavirus Disease 2019 (COVID-19) Based on Biosensor Application</article-title>
          <source>Sensors (Basel)</source>
          <year>2020</year>
          <month>11</month>
          <day>26</day>
          <volume>20</volume>
          <issue>23</issue>
          <fpage>6764</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s20236764"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s20236764</pub-id>
          <pub-id pub-id-type="medline">33256085</pub-id>
          <pub-id pub-id-type="pii">s20236764</pub-id>
          <pub-id pub-id-type="pmcid">PMC7729752</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref151">
        <label>151</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Javanmardi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tasdizen</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Domain adaptation for biomedical image segmentation using adversarial training</article-title>
          <year>2018</year>
          <month>05</month>
          <day>24</day>
          <conf-name>2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)</conf-name>
          <conf-date>4-7 April 2018</conf-date>
          <conf-loc>Washington, DC</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>554</fpage>
          <lpage>558</lpage>
          <pub-id pub-id-type="doi">10.1109/ISBI.2018.8363637</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref152">
        <label>152</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sajjadi</surname>
              <given-names>MSM</given-names>
            </name>
            <name name-style="western">
              <surname>Bachem</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Lucic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bousquet</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Gelly</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Assessing generative models via precision and recall</article-title>
          <source>Advances in Neural Information Processing Systems</source>
          <year>2018</year>
          <month>05</month>
          <day>31</day>
          <conf-name>32nd Conference on Neural Information Processing Systems (NeurIPS 2018)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Montréal, QC, Canada</conf-loc>
          <publisher-loc>Red Hook, NY</publisher-loc>
          <publisher-name>Curran Associates, Inc</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref153">
        <label>153</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arora</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Risteski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Do GANs learn the distribution? Some Theory and Empirics</article-title>
          <source>ICLR 2018 Conference Track 6th International Conference on Learning Representations</source>
          <year>2018</year>
          <month>02</month>
          <day>16</day>
          <conf-name>ICLR 2018 Conference Track 6th International Conference on Learning Representations</conf-name>
          <conf-date>April 30-May 3, 2018</conf-date>
          <conf-loc>Vancouver Convention Center, Vancouver, BC, Canada</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openreview.net/forum?id=BJehNfW0-"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref154">
        <label>154</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Madani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Moradi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Karargyris</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Syeda-mahmood</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Semi-supervised learning with generative adversarial networks for chest X-ray classification with ability of data domain adaptation</article-title>
          <year>2018</year>
          <month>05</month>
          <day>24</day>
          <conf-name>2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)</conf-name>
          <conf-date>4-7 April 2018</conf-date>
          <conf-loc>Washington, DC</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1038</fpage>
          <lpage>1042</lpage>
          <pub-id pub-id-type="doi">10.1109/ISBI.2018.8363749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref155">
        <label>155</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Mish: A Self Regularized Non-Monotonic Neural Activation Function</article-title>
          <year>2020</year>
          <conf-name>BMVC 2020 : British Machine Vision Conference</conf-name>
          <conf-date>September 7-11, 2020</conf-date>
          <conf-loc>Manchester, UK</conf-loc>
          <fpage>1</fpage>
          <lpage>14</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref156">
        <label>156</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ha</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>YS</given-names>
            </name>
            <name name-style="western">
              <surname>Bak</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yun</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>IH</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bae</surname>
              <given-names>YS</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Explaining the Rationale of Deep Learning Glaucoma Decisions with Adversarial Examples</article-title>
          <source>Ophthalmology</source>
          <year>2021</year>
          <month>01</month>
          <volume>128</volume>
          <issue>1</issue>
          <fpage>78</fpage>
          <lpage>88</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2020.06.036</pub-id>
          <pub-id pub-id-type="medline">32598951</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(20)30579-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref157">
        <label>157</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bapna</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Firat</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>MX</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ngiam</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>GPipe: Efficient Training of Giant Neural Networks using Pipeline Parallelism</article-title>
          <year>2019</year>
          <conf-name>Advances in Neural Information Processing Systems 32 (NeurIPS 2019)</conf-name>
          <conf-date>December 8-14, 2019</conf-date>
          <conf-loc>Vancouver, BC, Canada</conf-loc>
          <fpage>103</fpage>
          <lpage>112</lpage>
          <pub-id pub-id-type="doi">10.1109/emc2-nips53020.2019.00008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref158">
        <label>158</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Neff</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Payer</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Stern</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Urschler</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Generative Adversarial Network based Synthesis for Supervised Medical Image Segmentation</article-title>
          <year>2017</year>
          <month>05</month>
          <conf-name>Proceedings of the OAGM&#38;ARW Joint Workshop 2017: Vision, Automation and Robotics</conf-name>
          <conf-date>May 10-12, 2017</conf-date>
          <conf-loc>OAGM/AAPR ARW 2017, Wien, Austria</conf-loc>
          <publisher-loc>Graz, Austria</publisher-loc>
          <publisher-name>Verlag der Technischen Universität Graz</publisher-name>
          <fpage>140</fpage>
          <lpage>145</lpage>
          <pub-id pub-id-type="doi">10.3217/978-3-85125-524-9-30</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
