<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v23i11e22934</article-id>
      <article-id pub-id-type="pmid">34821566</article-id>
      <article-id pub-id-type="doi">10.2196/22934</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Artificial Intelligence for Skin Cancer Detection: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kukafka</surname>
            <given-names>Rita</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Makin</surname>
            <given-names>Jen</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Frontoni</surname>
            <given-names>Emanuele</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Benítez-Andrades</surname>
            <given-names>José Alberto</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Shams</surname>
            <given-names>Shayan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Sutton</surname>
            <given-names>Reed</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Takiddin</surname>
            <given-names>Abdulrahman</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Electrical and Computer Engineering</institution>
            <institution>Texas A&#38;M University</institution>
            <addr-line>188 Bizzell St</addr-line>
            <addr-line>College Station, TX, 77843</addr-line>
            <country>United States</country>
            <phone>974 44230425</phone>
            <email>abdulrahman.takiddin@tamu.edu</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4793-003X</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Schneider</surname>
            <given-names>Jens</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0546-2816</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Yin</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0549-3882</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Abd-Alrazaq</surname>
            <given-names>Alaa</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7695-4626</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Househ</surname>
            <given-names>Mowafa</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3648-6271</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Electrical and Computer Engineering</institution>
        <institution>Texas A&#38;M University</institution>
        <addr-line>College Station, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>College of Science and Engineering</institution>
        <institution>Hamad Bin Khalifa University</institution>
        <addr-line>Doha</addr-line>
        <country>Qatar</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Abdulrahman Takiddin <email>abdulrahman.takiddin@tamu.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>11</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>24</day>
        <month>11</month>
        <year>2021</year>
      </pub-date>
      <volume>23</volume>
      <issue>11</issue>
      <elocation-id>e22934</elocation-id>
      <history>
        <date date-type="received">
          <day>27</day>
          <month>7</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>17</day>
          <month>11</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>5</day>
          <month>1</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>3</day>
          <month>8</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Abdulrahman Takiddin, Jens Schneider, Yin Yang, Alaa Abd-Alrazaq, Mowafa Househ. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 24.11.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2021/11/e22934" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Skin cancer is the most common cancer type affecting humans. Traditional skin cancer diagnosis methods are costly, require a professional physician, and take time. Hence, to aid in diagnosing skin cancer, artificial intelligence (AI) tools are being used, including shallow and deep machine learning–based methodologies that are trained to detect and classify skin cancer using computer algorithms and deep neural networks.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this study was to identify and group the different types of AI-based technologies used to detect and classify skin cancer. The study also examined the reliability of the selected papers by studying the correlation between the data set size and the number of diagnostic classes with the performance metrics used to evaluate the models.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We conducted a systematic search for papers using Institute of Electrical and Electronics Engineers (IEEE) Xplore, Association for Computing Machinery Digital Library (ACM DL), and Ovid MEDLINE databases following the Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews (PRISMA-ScR) guidelines. The studies included in this scoping review had to fulfill several selection criteria: being specifically about skin cancer, detecting or classifying skin cancer, and using AI technologies. Study selection and data extraction were independently conducted by two reviewers. Extracted data were narratively synthesized, where studies were grouped based on the diagnostic AI techniques and their evaluation metrics.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We retrieved 906 papers from the 3 databases, of which 53 were eligible for this review. Shallow AI-based techniques were used in 14 studies, and deep AI-based techniques were used in 39 studies. The studies used up to 11 evaluation metrics to assess the proposed models, where 39 studies used accuracy as the primary evaluation metric. Overall, studies that used smaller data sets reported higher accuracy.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This paper examined multiple AI-based skin cancer detection models. However, a direct comparison between methods was hindered by the varied use of different evaluation metrics and image types. Performance scores were affected by factors such as data set size, number of diagnostic classes, and techniques. Hence, the reliability of shallow and deep models with higher accuracy scores was questionable since they were trained and tested on relatively small data sets of a few diagnostic classes.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>skin cancer</kwd>
        <kwd>skin lesion</kwd>
        <kwd>machine learning</kwd>
        <kwd>deep neural networks</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Skin cancer is the most common cancer type that affects humans [<xref ref-type="bibr" rid="ref1">1</xref>]. Melanoma and nonmelanoma are the two main types of skin cancer [<xref ref-type="bibr" rid="ref2">2</xref>]. Nonmelanoma is of lesser concern since it usually can be cured by surgery and is nonlethal. Melanoma, however, is the most dangerous skin cancer type, with a high mortality rate, although it represents less than 5% of all skin cancer cases [<xref ref-type="bibr" rid="ref1">1</xref>]. The World Health Organization (WHO) estimated 132,000 yearly melanoma cases globally. In 2015, 60,000 cases caused death [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>Traditional methods of early detection of skin cancer include skin self-examination and skin clinical examination (screening) [<xref ref-type="bibr" rid="ref3">3</xref>]. However, skin self-examination, where the patient or a family member notices a lesion, is a random method as people might overreact or underact. In addition, clinical examination using expensive, specialized medical tools, such as a dermoscope, microspectroscopy, and laser-based tools, requires training, effort to operate, time, and regular follow-ups [<xref ref-type="bibr" rid="ref4">4</xref>]. Thus, patients have started using mobile technologies, such as smartphones, to share images with their doctors to get faster diagnoses. However, sharing images over the internet may compromise privacy. Worse yet, the image quality may not be sufficient, which may lead to inaccurate diagnoses. With evolvement, artificial intelligence (AI), which is the human-like intelligence exhibited by trained machines [<xref ref-type="bibr" rid="ref5">5</xref>], has become so pervasive that most humans interact with AI-based tools daily, which assists physicians in decision making and decreases the decision variations among physicians. It is worth mentioning that even with the presence of such AI technologies, the role of an expert dermatologist is vital for diagnosis and treatment.</p>
        <p>The focus of this review is on the use of AI as a tool that helps in the process of skin cancer diagnostics. Herein, AI-based skin cancer diagnostic tools use either shallow or deep AI methodologies. Both involve customizing computer algorithms through a process called training to learn from data formed by predefined features. The difference is that shallow methods tend to not use multilayer neural networks at all or use such networks limited to a minimum of layers [<xref ref-type="bibr" rid="ref6">6</xref>]. In contrast, deep methodologies involve training large, deep multilayer neural networks with many hidden layers, typically ranging from dozens to hundreds [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      </sec>
      <sec>
        <title>Research Problem</title>
        <p>Detecting skin cancer can be challenging, time consuming, and relatively expensive [<xref ref-type="bibr" rid="ref4">4</xref>]. For example, <xref rid="figure1" ref-type="fig">Figure 1</xref> shows two lesions that superficially seem identical [<xref ref-type="bibr" rid="ref8">8</xref>]. However, the left image is of a normal benign lesion, whereas the right image shows a melanoma lesion. As AI technologies are becoming smarter and faster [<xref ref-type="bibr" rid="ref5">5</xref>], it is hardly surprising that they are being used to assist in diagnosing skin cancer and suggesting courses of action. This is due to the fact that AI-based methods are considered to be relatively cheap, easy to use, and accessible [<xref ref-type="bibr" rid="ref5">5</xref>]. Thus, they offer the potential to overcome the issues inherent in the aforementioned existing skin cancer detection methods. However, as the literature on the medical use of AI quickly grows and continues to report findings using incompatible performance metrics, direct comparison between prior work becomes more challenging and threatens to hamper future research. This study seeks to address this issue by performing a rigorous and transparent review of the existing literature. We aim to answer the research question, <italic>What are the existing AI-based tools that are used to detect and classify skin cancer?</italic></p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Similarity of normal lesion (left) and melanoma (right).</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p>This scoping review analyzes papers from different online databases. We defined strict inclusion and exclusion criteria to decide which papers to include. We then grouped the papers by the methodology used and analyzed the ground covered in the papers. Finally, we identified gaps in the literature and discussed how these gaps can be filled by future work. We developed a protocol before commencing the review. To ensure that this scoping review is transparent and replicable, we followed the Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews (PRISMA-ScR) instructions and guidelines [<xref ref-type="bibr" rid="ref9">9</xref>].</p>
      <sec>
        <title>Search Strategy</title>
        <p>We conducted a systematic search on July 15, 2020. We identified articles from Institute of Electrical and Electronics Engineers (IEEE) Xplore, Association for Computing Machinery Digital Library (ACM DL), and Ovid MEDLINE databases. The terms used for searching the bibliographic databases were identified based on the target population (eg, “skin neoplasms,” “skin cancer,” “skin lesion”), intervention (eg, “artificial intelligence,” “machine learning,” “deep learning”), and outcome (“diagnosis,” “screening,” “detection,” “classification”). We derived the search terms from previous literature studies and reviews. For practical reasons, we did not conduct backward or forward reference list checking, and we also did not contact experts. <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> shows the search strategy used for searching Ovid MEDLINE, where “skin neoplasms,” “artificial intelligence,” “machine learning,” and “deep learning” were used as MESH terms. <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> also shows the search query for IEEE Xplore and ACM DL.</p>
      </sec>
      <sec>
        <title>Study Eligibility Criteria</title>
        <p>We included studies fulfilling the following criteria:</p>
        <list list-type="bullet">
          <list-item>
            <p>Studies published between January 1, 2009, and July 15, 2020.</p>
          </list-item>
          <list-item>
            <p>Studies written in English.</p>
          </list-item>
          <list-item>
            <p>Population: studies discussing only skin cancer. Studies discussing other diseases or forms of cancer were excluded.</p>
          </list-item>
          <list-item>
            <p>Intervention: studies discussing only AI-based applications. Studies that discussed skin cancer–related applications or systems, including theoretical, statistical, or mathematical approaches, were excluded.</p>
          </list-item>
          <list-item>
            <p>Studies discussing the specific use of AI for detecting, classifying, or diagnosing skin cancer. Studies discussing only the general use of AI in a clinical setting were excluded.</p>
          </list-item>
          <list-item>
            <p>Studies proposing a new AI-based method. Case studies, surveys, review or response papers, or papers that reviewed, assessed, analyzed, evaluated, or compared existing methods were excluded.</p>
          </list-item>
        </list>
        <p>No restrictions on the country of publication, study design, comparator, or outcomes were enforced.</p>
      </sec>
      <sec>
        <title>Study Selection</title>
        <p>Authors Abdulrahman Takiddin (AT) and Alaa Abd-Alrazaq (AA) independently screened the titles and abstracts of all retrieved studies. Following the written protocol, they independently read the full texts of the papers included in this study after reading their titles and abstracts. Any disagreements between both reviewers were resolved by discussion. We assessed the intercoder agreement by calculating the Cohen kappa (κ), which was 0.86 and 0.93 for screening titles and abstracts and for reading full texts, respectively, indicating good agreement.</p>
      </sec>
      <sec>
        <title>Data Extraction</title>
        <p>For reliable and accurate data extraction from the included studies, a data extraction form was developed and piloted using eight included studies (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The data extraction process was independently conducted by AT and AA. Any disagreements were resolved by discussion with good intercoder agreement (Cohen κ=0.88) between the reviewers.</p>
      </sec>
      <sec>
        <title>Data Synthesis</title>
        <p>A narrative approach was used to synthesize the extracted data. Specifically, we first grouped the included studies by diagnostic techniques based on complexity. Then, we discussed the evaluation metrics used in each study. Next, we grouped the studies based on the used evaluation metrics. In addition, we took into consideration the used data set in terms of the number of images, types of images, and number of diseases (diagnostic classes) that the data set contained. We assessed the correlation between the accuracy score and the number of images and diagnostic classes of the data set.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Search Results</title>
        <p>After searching the 3 online databases, we retrieved a total of 906 studies. We then started excluding papers in three phases. As shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, in the first phase, “identification,” we excluded 42 papers. In the second phase, “screening,” we excluded 711 papers. In the last phase, “eligibility,” we included 153 papers for a full-text review. After reviewing the full text of the papers, we excluded 100 papers. The specific reasons behind excluding the papers in each phase are mentioned in <xref rid="figure2" ref-type="fig">Figure 2</xref>. Hence, the total number of included papers in this scoping review was 53.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>PRISMA approach. ACM DL: Association for Computing Machinery Digital Library; AI: artificial intelligence; IEEE: Institute of Electrical and Electronics Engineers; PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses.</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Study Characteristics</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> summarizes the characteristics of the selected studies. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows the number of papers published per year: 4 of 53 studies (7.6%) were published before 2016 [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref13">13</xref>], 26 studies (49.1%) were published in 2016, 2017, and 2018 [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref39">39</xref>], and 23 studies (43.4%) were published in 2019 and 2020 [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref62">62</xref>]. Although our selection criteria included papers published between 2009 and July 2020, the oldest published paper included after the full-text review was published in 2011. We observed that the number of papers sharply increased in 2018 and 2019.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Study characteristics (N=53).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="670"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Characteristics</td>
                <td>n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>Publication year</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Before 2016</td>
                <td>4 (7.5)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>2016-2018</td>
                <td>26 (49.1)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>2019-2020</td>
                <td>23 (43.4)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Country of publication</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>The United States</td>
                <td>9 (16.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>China</td>
                <td>6 (11.3)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>India</td>
                <td>5 (9.4)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Poland</td>
                <td>3 (5.7)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>New Zealand</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Austria</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Germany</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Bangladesh</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Indonesia</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Pakistan</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Turkey</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>France</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Russia</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>The United Kingdom</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Hong Kong</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Iran</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Korea</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Philippines</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Lebanon</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Saudi Arabia</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Singapore</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Thailand</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Australia</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Canada</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Egypt</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Nigeria</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>South Africa</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Publication type</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Conference proceedings</td>
                <td>31 (58.5)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Journals</td>
                <td>22 (41.5)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Number of published papers by year.</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p><xref rid="figure4" ref-type="fig">Figure 4</xref> shows the region of publication of the included studies. The studies included were published in different parts of the world. In Southern Asia, 22 studies (41.5%) were conducted in China, India, Bangladesh, Indonesia, Pakistan, Singapore, South Korea, and Thailand; 10 studies (18.9%) were conducted in North America, specifically the United States and Canada; 10 studies were conducted in Europe, including Austria, Poland, Germany, France, the United Kingdom, and Russia; 5 studies (9.4%) were conducted in the Middle East, including Lebanon, Turkey, Iran, and Saudi Arabia; 3 studies (5.7%) were conducted in Africa, specifically Egypt, South Africa, and Nigeria; and in Oceania, 3 studies were concluded in New Zealand and Australia.</p>
        <p>The selected studies were either published in conference proceedings or journals: 31 of 53 studies (58.5%) were published in conference proceedings, and the rest of the papers (22/53, 41.5%) were published in journals. <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> displays the characteristics of each included study.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Number of published papers by region.</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Data Characteristics</title>
        <p><xref ref-type="table" rid="table2">Table 2</xref> summarizes the characteristics of the used data in the selected studies. The studies used different sizes of data sets to train their models. The average number of used images in the selected studies was around 7800. The lowest number of images used was 40 [<xref ref-type="bibr" rid="ref24">24</xref>], whereas the highest number of images used was 129,450 [<xref ref-type="bibr" rid="ref23">23</xref>]. We categorized these data set sizes into three groups, depending on the number of images used. The first category contained small data sets that had fewer than 1000 images (21/53, 39.6%). The second category used medium-size data sets consisting of 1000-10,000 images (25/53, 47.2%). The last category contained large data sets that included more than 10,000 images (7/53, 13.2%).</p>
        <p>We divided the papers into two groups based on the classification type. We found that more than half of the papers (31/53, 58.5%) built models to classify whether the lesion was benign or malignant (two-class/binary classification). The rest of the papers (22/53, 41.5%) presented models in which skin lesions were classified using three or more diagnostic classes (multiclass classification). <xref rid="figure5" ref-type="fig">Figure 5</xref> shows the number of papers using different diagnostic classes. In the multiclass classification, 8 studies used 3 diagnostic classes, 1 study used 4 classes, 2 studies used 5 classes, 10 studies used 7 classes, and 1 study used 9 classes. The benign classes included benign keratosis, melanocytic nevus, and dermatofibroma. The malignant classes included melanoma and basal cell carcinoma. Other lesions, such as vascular lesions, actinic keratosis, genodermatosis, and tumors, could be either benign or malignant.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Data and deployment characteristics (N=53).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="670"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Characteristics</td>
                <td>n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>Data set size</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Small</td>
                <td>21 (39.6)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Medium</td>
                <td>25 (47.1)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Large</td>
                <td>7 (13.2)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Classification type</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>2 classes</td>
                <td>31 (58.5)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>3 classes</td>
                <td>8 (15.1)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>4 classes</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>5 classes</td>
                <td>2 (3.8)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>7 classes</td>
                <td>10 (18.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>9 classes</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Image type</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Dermoscopic</td>
                <td>43 (81.1)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Clinical</td>
                <td>5 (9.4)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>High quality</td>
                <td>4 (7.5)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Spectroscopic</td>
                <td>1 (1.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Deployment</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Development</td>
                <td>45 (84.9)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>System</td>
                <td>3 (5.7)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Web application</td>
                <td>3 (5.7)</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Mobile application</td>
                <td>2 (3.8)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>With regard to the type of images used to train, test, and validate the models, 43 of 53 studies (81.1%) used dermoscopic images; 5 studies (9.4%) used clinical images that were taken using a normal camera; and 4 studies (7.5%) used high-quality images that were taken with a professional camera. The remaining study used spectroscopic images requiring a specialized system taking images of a lesion from three different spots using polarized and unpolarized light.</p>
        <p>The majority of the studies (45/53, 84.9%) presented technologies that are still in the development phase. The rest of the studies (8/53, 15.1%) have been deployed into a usable form: 3 studies developed a health care system, 3 studies deployed the model into a mobile application, and 2 studies transferred the model into a web application. <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref> displays the data and deployment characteristics of each included study.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Number of published papers by number of diagnostic classes used.</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Diagnostic Techniques</title>
        <p>We categorized the papers into two groups based on the AI technique used in detecting and classifying skin cancer. The groups were <italic>shallow</italic> techniques and <italic>deep</italic> techniques. These two groups differed mainly in the complexity of the AI architecture underlying the model. <italic>Shallow</italic> techniques use either simple machine learning algorithms, such as a support vector machine (SVM), or only a couple of layers of neural networks [<xref ref-type="bibr" rid="ref63">63</xref>]. If, in contrast, the AI architecture is a neural network that consists of at least three layers, it is categorized as a <italic>deep</italic> technique [<xref ref-type="bibr" rid="ref19">19</xref>]. It turns out that around a quarter of the studies (14/53, 26.4%) used shallow techniques, while the rest (39/53, 73.6%) used deep techniques. Within each of the groups, studies may have used different models or algorithms, and some studies proposed multiple methods or provided testing data using multiple methods. In this study, we only considered the model that had the best-reported performance in each paper.</p>
        <p>As shown in <xref ref-type="table" rid="table3">Table 3</xref>, most studies that used <italic>shallow</italic> techniques adopted an SVM (9/14, 64.3%), which is a common two-class classifier that uses a hyperplane as a decision boundary [<xref ref-type="bibr" rid="ref6">6</xref>]. The rest of the studies (5/14, 35.7%) adopted the naive Bayes (NB) algorithm (1/14, 7.1%), which is a probabilistic classifier that assumes conditional independence among the features [<xref ref-type="bibr" rid="ref6">6</xref>]; logistic regression (LR; 1/14), which uses probability for prediction; k-nearest neighbors (kNNs; 1/14), which classify a sample based on samples close to it; and random forests (RFs; 1/14), which classify using decision trees [<xref ref-type="bibr" rid="ref6">6</xref>]. A hybrid model (1/14) classified images through multiple iteratives using Adaboost and an SVM.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Techniques used in included studies using shallow techniques (N=14).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="400"/>
            <col width="300"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td>Model</td>
                <td>n (%)</td>
                <td>Reference</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>SVM<sup>a</sup></td>
                <td>9 (64.3)</td>
                <td>[<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]</td>
              </tr>
              <tr valign="top">
                <td>NB<sup>b</sup></td>
                <td>1 (7.1)</td>
                <td>[<xref ref-type="bibr" rid="ref11">11</xref>]</td>
              </tr>
              <tr valign="top">
                <td>LR<sup>c</sup></td>
                <td>1 (7.1)</td>
                <td>[<xref ref-type="bibr" rid="ref13">13</xref>]</td>
              </tr>
              <tr valign="top">
                <td>kNN<sup>d</sup></td>
                <td>1 (7.1)</td>
                <td>[<xref ref-type="bibr" rid="ref25">25</xref>]</td>
              </tr>
              <tr valign="top">
                <td>RF<sup>e</sup></td>
                <td>1 (7.1)</td>
                <td>[<xref ref-type="bibr" rid="ref28">28</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Hybrid</td>
                <td>1 (7.1)</td>
                <td>[<xref ref-type="bibr" rid="ref18">18</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>NB: naive Bayes.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>LR: logistic regression.</p>
            </fn>
            <fn id="table3fn4">
              <p><sup>d</sup>kNN: k-nearest neighbor.</p>
            </fn>
            <fn id="table3fn5">
              <p><sup>e</sup>RF: random forest.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The majority of the studies that used <italic>deep</italic> techniques (<xref ref-type="table" rid="table4">Table 4</xref>) adopted different types of convolutional neural networks (CNNs; 36/39, 92.3%), which assign importance to parts of images using ImageNet-pretrained architectures (18/39, 46.2%), including the residual network (ResNet), Inception, AlexNet, MobileNet, Visual Geometry Group (VGG), Xception, DenseNet, and GoogleNet. In addition, some of the CNN-based studies (11/39, 28.2%) built customized CNNs or ResNets. Moreover, some studies adopted different combinations of CNNs along with other models (hybrid models; 5/39, 12.8%), as well as using ensemble models (4/39, 10.3%); the remaining study (1/39, 2.6%) used the OpenCV library. <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref> provides further details regarding each of the models in terms of the method used, the number of layers (ranging from 1 to 121 layers), the method used for selecting the hyperparameters, and the performance of the proposed model with respect to other reported models within the study.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Techniques used in included studies using deep techniques (N=39).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="370"/>
            <col width="200"/>
            <col width="400"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Model</td>
                <td>n (%)</td>
                <td>Reference</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Pretrained CNNs<sup>a</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>ResNet<sup>b</sup></td>
                <td>5 (12.8)</td>
                <td>[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Inception</td>
                <td>3 (7.7)</td>
                <td>[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>AlexNet</td>
                <td>3 (7.7)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>MobileNet</td>
                <td>3 (7.7)</td>
                <td>[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>VGG<sup>c</sup></td>
                <td>2 (5.1)</td>
                <td>[<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>Xception</td>
                <td>1 (2.6)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>DenseNet</td>
                <td>1 (2.6)</td>
                <td>[<xref ref-type="bibr" rid="ref58">58</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Custom</bold>
                </td>
              </tr>
              <tr valign="top">
                <td/>
                <td>CNN</td>
                <td>9 (23.1)</td>
                <td>[<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref62">62</xref>]</td>
              </tr>
              <tr valign="top">
                <td/>
                <td>ResNet</td>
                <td>2 (5.1)</td>
                <td>[<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="2">Hybrid</td>
                <td>5 (12.8)</td>
                <td>[<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="2">Ensemble</td>
                <td>4 (10.3)</td>
                <td>[<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="2">OpenCV</td>
                <td>1 (2.6)</td>
                <td>[<xref ref-type="bibr" rid="ref10">10</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>ResNet: residual network.</p>
            </fn>
            <fn id="table4fn3">
              <p><sup>c</sup>VGG: Visual Geometry Group.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Evaluation Metrics</title>
        <p>The studies included in this scoping review used different evaluation metrics to assess their proposed models. In the studies, the following five primary evaluation metrics were used to assess the built models: accuracy, sensitivity and specificity, positive predictive value (PPV) or precision, area under the curve (AUC), and F1-score. All five metrics ranged from 0% to 100%; the higher the score, the better the model performance. To compute the different evaluation metrics, the following types of samples were identified: First, true positives (TPs), which are malignant samples that the AI tool also detected as malignant; second, false positives (FPs), which are benign samples that the AI tool detected as malignant; third, true negatives (TNs), which are benign samples that were also detected as benign by the AI tool; and fourth, false negatives (FNs), which are malignant samples that were detected as benign by the AI tool. It is worth mentioning that more than half of the studies (33/53, 62.3%) reported multiple evaluation metrics, in addition to the primary metric.</p>
        <p>Accuracy = (TP + TN)/(TP + TN + FP + FN), which implies how well the model detects the diagnostic classes, was reported in the majority of the papers (44/53, 83%). Sensitivity or recall = TP/(TP + FN), which is the probability of the model, given only malignant samples, to correctly diagnose them as malignant, was reported in 30 (56.6%) papers. Specificity = TN/(TN + FP), which determines the proportion of negative samples that are correctly detected, was reported in 24 (45.3%) papers. The PPV or precision = TP/(TP + FP) was reported in 13 (24.5%) papers. The AUC, which is the area of the receiver operating characteristic (ROC) curve and plots the TP against the FP, was reported in 11 (20.8%) papers. The F1-score, which is the harmonic mean of recall and precision, was reported in 9 (16.9%) papers. In addition, the dice coefficient = 4TP/(FN + 2TP + FP) was reported in 4 (7.5%) papers. The negative predictive value (NPV) = TN/(TN + FN) was reported in 2 (3.8%) papers. The Jaccard index = 2TP/(TP + FN + FP) was reported in 2 papers. The Cohen κ was also reported in 2 papers. Finally, the Youden index = sensitivity + specificity – 1 was reported in 1 (1.9%) paper.</p>
        <p>Herein, we conducted our analysis of each paper based on the best-performing experiment in case multiple experiments were conducted. In addition, if multiple evaluation metrics were used, we used the primary evaluation metric score that was reported by the authors in the abstract or conclusion as the main focus of the paper or the used average score of each of the diagnostic classes for multiclass classification papers. Of the aforementioned metrics, accuracy, AUC, sensitivity and specificity, and the F1-score were used as the primary evaluation metrics. Around 73% (39/53) of the papers used accuracy as their primary evaluation metric to assess the trained models. The average accuracy value was 86.8%, with a maximum of 98.8% [<xref ref-type="bibr" rid="ref60">60</xref>] and a minimum of 67% [<xref ref-type="bibr" rid="ref10">10</xref>]. The AUC was reported in 9 studies, with an average score of 87.2%; the highest AUC score was 91.7% [<xref ref-type="bibr" rid="ref41">41</xref>], whereas the lowest AUC score was 82.0% [<xref ref-type="bibr" rid="ref26">26</xref>]. Sensitivity and specificity were used in 4 studies, and the F1-score was reported in 1 study. <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref> shows the data characteristics, used model, and evaluation scores for each included study (<xref ref-type="table" rid="table5">Table 5</xref>).</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Primary evaluation metrics and scores reported by included studies (N=53).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="670"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Score</td>
                <td>Reference</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>Accuracy</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>99%</td>
                <td>[<xref ref-type="bibr" rid="ref60">60</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>98%</td>
                <td>[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>96%</td>
                <td>[<xref ref-type="bibr" rid="ref24">24</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>95%</td>
                <td>[<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref61">61</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>94%</td>
                <td>[<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>93%</td>
                <td>[<xref ref-type="bibr" rid="ref16">16</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>92%</td>
                <td>[<xref ref-type="bibr" rid="ref18">18</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>91%</td>
                <td>[<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref62">62</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>90%</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>89%</td>
                <td>[<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>88%</td>
                <td>[<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>87%</td>
                <td>[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>86%</td>
                <td>[<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref58">58</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>84%</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>83%</td>
                <td>[<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>81%</td>
                <td>[<xref ref-type="bibr" rid="ref14">14</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>80%</td>
                <td>[<xref ref-type="bibr" rid="ref19">19</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>77%</td>
                <td>[<xref ref-type="bibr" rid="ref28">28</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>75%</td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref59">59</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>72%</td>
                <td>[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>67%</td>
                <td>[<xref ref-type="bibr" rid="ref10">10</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>AUC<sup>a</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>92%</td>
                <td>[<xref ref-type="bibr" rid="ref41">41</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>91%</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>89%</td>
                <td>[<xref ref-type="bibr" rid="ref32">32</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>87%</td>
                <td>[<xref ref-type="bibr" rid="ref46">46</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>85%</td>
                <td>[<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>84%</td>
                <td>[<xref ref-type="bibr" rid="ref30">30</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>82%</td>
                <td>[<xref ref-type="bibr" rid="ref26">26</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Sensitivity</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>96%</td>
                <td>[<xref ref-type="bibr" rid="ref31">31</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>90%</td>
                <td>[<xref ref-type="bibr" rid="ref15">15</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>83%</td>
                <td>[<xref ref-type="bibr" rid="ref12">12</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>77%</td>
                <td>[<xref ref-type="bibr" rid="ref29">29</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Specificity</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>96%</td>
                <td>[<xref ref-type="bibr" rid="ref15">15</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>90%</td>
                <td>[<xref ref-type="bibr" rid="ref12">12</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>89%</td>
                <td>[<xref ref-type="bibr" rid="ref31">31</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>70%</td>
                <td>[<xref ref-type="bibr" rid="ref29">29</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>F1-score</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>83%</td>
                <td>[<xref ref-type="bibr" rid="ref45">45</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>AUC: area under the curve.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Main Findings</title>
        <p>We studied multiple characteristic types for the 53 selected studies. First, we included the study characteristics. Most studies were published in 2019, the majority of the studies were published in Southern Asia, and most studies were published in journals. Second, we discussed the data characteristics. For training and testing, most of the studies used medium-size data sets, the majority of the studies built binary classifiers, and dermoscopic images were used the most. Third, we categorized the adopted AI models into shallow and deep. Most shallow models were SVM based, whereas most deep models were CNN-based neural networks. Generally, deep models were adopted more than shallow models. Fourth, we listed the evaluation metrics used along with the reported scores to assess the performance of the models. In total, 11 different evaluation metrics were used, where accuracy was the most commonly used metric, so we focused on accuracy.</p>
      </sec>
      <sec>
        <title>Performance Factors</title>
        <p>After analyzing the reported performance scores, we concluded that there is a correlation between the performance and the number of classes used. In addition, another factor that affects the performance is the data set size. Next, we study this hypothesis with respect to accuracy since most of the studies (39/53, 73.6%) used it as the primary evaluation metric, although it might not be the most fitted evaluation metric to assess such a task, especially in the case of imbalanced data. We believe that having a confusion matrix or the number of TPs, FPs, TNs, and FNs would avoid bias and give a clearer evaluation of how the model behaves with regard to each of the diagnostic classes. From the studies, the top accuracy scores were ~98% [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. In studies leading to this accuracy, the authors built a two-class classification (benign vs malignant) model using data sets of 200, 356, and 200 images, respectively. The top 10 accuracy scores (99%-92%) also built two-class classifiers using an average of around 800 images. In addition, 26 studies built two-class classifiers with an average accuracy score of around 88% using an average data set size of around 1000 images, while 17 studies built multiclass classifiers with an average accuracy score of 85%; they used around 15,000 images on average. The second-lowest accuracy score was 72% [<xref ref-type="bibr" rid="ref23">23</xref>], in which the authors developed a multiclass classifier using 9 different diagnostic classes and 129,450 images, which is the highest number of classes and the biggest data set size included in this study. <xref rid="figure6" ref-type="fig">Figure 6</xref> plots the logarithmic data set size over accuracy, using colors to indicate the number of diagnostic classes. As can be seen, accuracy increases as the number of diagnostic classes and data set size decreases. Specifically, after the threshold of 90% in accuracy, we can see that the majority of the studies built two-class classifiers. The factors that might be behind such a pattern are further discussed next.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Effect of the number of diagnostic classes and data set size on accuracy.</p>
          </caption>
          <graphic xlink:href="jmir_v23i11e22934_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Classification Type Factor</title>
        <p>Binary classifiers tend to have better performance when compared to multiclass classifiers. This seems intuitively right since binary classifiers are less expressive. Instead of distinguishing between several classes, binary classifiers have “less to learn.” To illustrate this point, let us compare limits on the probability of each class for a binary and a five-class classifier. For the five-class classifier, there must be at least one class with a probability of ≤20% (according to the <italic>pigeonhole principle</italic> [<xref ref-type="bibr" rid="ref64">64</xref>]). Predicting this low probability class is, therefore, typically harder than in the case of a binary classifier, for which we know that there exists exactly (and, thus, at most) one class with a probability of ≤50%. Another way of looking at it is to consider an algorithm that performs a random choice assuming perfectly balanced data. In the binary case, the error rate of this algorithm would be 50%, whereas for the five-class classifier, it increases to 80%, a 1.6-fold increase. The problem may be further exacerbated by imbalanced data, which often arises naturally due to differences in the prevalence rates of medical conditions. Therefore, it is also not surprising that binary classifiers work well, given less data for training, since the model may still be fed sufficient numbers of examples for each class.</p>
      </sec>
      <sec>
        <title>Data Set Size Factor</title>
        <p>However, what is surprising is that <xref rid="figure6" ref-type="fig">Figure 6</xref> suggests that the performance increased with decreasing training data. To this end, we would like to note that the two methods with the best performance used shallow techniques that tend to be far less hungry for data than deep methods, since manual feature engineering is often part of the pipeline. Furthermore, Afifi et al [<xref ref-type="bibr" rid="ref21">21</xref>] used clinical image data, which may be of superior quality. In addition, depending on the testing setup, it cannot be ruled out that methods relying on less data lack the generality of models that have been trained using large volumes of data. In such scenarios, the models would be closer to data retrieval machines due to overfitting than general detectors and classifiers. To fully assess apparent issues such as this, it is important not to rely on a single performance metric when reporting results. Especially, sensitivity and specificity can be as important as accuracy in this context since they model FN and FP rates. All considered, we would, therefore, like to reiterate our earlier statement that we believe it is important for any AI to undergo rigorous clinical studies and testing before being deployed in a clinical environment.</p>
      </sec>
      <sec>
        <title>Technique Type Factor</title>
        <p>With regard to the techniques described in the studies included in this review, deep and shallow models (regardless of the number of layers) have similar performances. For example, within the shallow models, the top five skin cancer detectors were built using an SVM with accuracy scores of 93%-99% using relatively small data sets. The SVM was the most commonly used method among the shallow models. Similarly, within the deep models, the top five CNN-based skin cancer detectors had 94%-96% accuracy using medium-size data sets. CNNs were also the most commonly used method among the deep models. Theoretically, deep neural networks tend to have better performance with regard to image classifications [<xref ref-type="bibr" rid="ref65">65</xref>]. One reason is that shallow models are often limited to less expressive functional spaces when compared to deep networks. From a technical perspective, this may well explain their lower performance due to a lack of the ability to fully capture the complex nature of images during training. In contrast, deep networks and CNNs can learn features at multiple scales and complexity to provide fast diagnoses [<xref ref-type="bibr" rid="ref66">66</xref>]. Therefore, they not only detect, select, and extract features from medical images but also contribute by enhancing and constructing new features from the medical images [<xref ref-type="bibr" rid="ref67">67</xref>]. Such similarities and inconsistencies in the performances of the included studies are due to the diverse evaluation metrics used, the data set size, image types, and the number of diagnostic classes among the studies.</p>
      </sec>
      <sec>
        <title>Publication Year</title>
        <p>Based on the study characteristics, we noticed that the number of published papers has increased since 2016 and that most papers discuss the use of dermoscopic images, making it the most used image modality for the detection and classification of skin cancer. We believe that this is because the International Skin Imaging Collaboration (ISIC) competition started in 2016 [<xref ref-type="bibr" rid="ref8">8</xref>], which offered several medical data sets of dermoscopic images that have ever since been used to build AI-based models. Most of these studies are still in the development stage, and we firmly believe that these models still need to be further validated and tested in hospitals. However, dermatologists and patients are beginning to adapt to the notion of relying on AI to diagnose skin cancer.</p>
      </sec>
      <sec>
        <title>Practical and Research Implications</title>
        <p>In this scoping review, we summarized the findings in the literature related to diagnosing skin cancer by using AI-based technology. We also categorized the papers included in this review based on the methodology used, the type of AI techniques, and their performance, and found the link between these aspects.</p>
        <p>We noted that although all the papers included in this scoping review discuss the application and performance of a specific AI technology, the reporting is performed heterogeneously. A discussion of the relationship between using one specific AI technique and other aspects, such as data set size, or even a discussion of why the evaluation metric used is reasonable is normally not attempted. This, of course, potentially hampers research in this direction, as it becomes harder for future studies to provide a comprehensive comparison with the existing work that follows scientific rigor. This scoping review filled this gap by performing the necessary characterizations and analyses. This was achieved by grouping each of the used AI technologies into shallow and deep approaches, linking each type to the evaluation metrics used, listing and interpreting the number of diagnostic classes used in each study, and highlighting the dependency of performance on data set size and other factors. To the best of our knowledge, no similar work has been performed to fill this gap. In the Conclusion section, we will highlight our main findings.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>This scoping review examined papers that were published between January 2009 and July 2020, and any published study outside this time line was excluded, which may have excluded older AI-based methods. In addition, we examined papers written in English; other languages were not included, which may have led to the exclusion of some studies conducted in other parts of the world. Another limitation might be the gap between the time the research was performed and the time the work was submitted, which excluded published papers during that period. Although we applied all due diligence, a small residual chance of accidentally having overlooked papers in an academic database cannot be fully ruled out. In addition, although we tried to discuss all findings in the literature, it is beyond the scope of this review to detail every single finding of the papers. Similarly, an investigation into data biases in the literature (imbalanced data with respect to diagnostic classes, patient ethnicity and skin color, gender, etc) is left as a direction for future studies.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>The use of AI has high potential to facilitate the way skin cancer is diagnosed. Two main branches of AI are used to detect and classify skin cancer, namely shallow and deep techniques. However, the reliability of such AI tools is questionable since different data set sizes, image types, and number of diagnostic classes are being used and evaluated with different evaluation metrics. Accuracy is the metric used most as a primary evaluation metric but does not allow for independently assessing FN and FP rates. This study found that higher accuracy scores are reported when fewer diagnostic classes are included. Interestingly and counterintuitively, our analysis also suggests that higher accuracy scores are reported when smaller sample sizes are included, which may be due to factors such as the type of images and the techniques used. Furthermore, only independent, external validation using a large, diverse, and unbiased database is fit to demonstrate the generality and reliability of any AI technology prior to clinical deployment.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Search query.</p>
        <media xlink:href="jmir_v23i11e22934_app1.docx" xlink:title="DOCX File , 16 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Data extraction form.</p>
        <media xlink:href="jmir_v23i11e22934_app2.docx" xlink:title="DOCX File , 14 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Study characteristics.</p>
        <media xlink:href="jmir_v23i11e22934_app3.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Data and deployment characteristics.</p>
        <media xlink:href="jmir_v23i11e22934_app4.docx" xlink:title="DOCX File , 21 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>Technical details.</p>
        <media xlink:href="jmir_v23i11e22934_app5.docx" xlink:title="DOCX File , 32 KB"/>
      </supplementary-material>
      <supplementary-material id="app6">
        <label>Multimedia Appendix 6</label>
        <p>Data, model, and evaluation.</p>
        <media xlink:href="jmir_v23i11e22934_app6.docx" xlink:title="DOCX File , 32 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">ACM DL</term>
          <def>
            <p>Association for Computing Machinery Digital Library</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">FN</term>
          <def>
            <p>false negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">FP</term>
          <def>
            <p>false positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">IEEE</term>
          <def>
            <p>Institute of Electrical and Electronics Engineers</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">ISIC</term>
          <def>
            <p>International Skin Imaging Collaboration</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">kNN</term>
          <def>
            <p>k-nearest neighbor</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">LR</term>
          <def>
            <p>logistic regression</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">NB</term>
          <def>
            <p>naive Bayes</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">NPV</term>
          <def>
            <p>negative predictive value</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">PPV</term>
          <def>
            <p>positive predictive value</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">PRISMA-ScR</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">ResNet</term>
          <def>
            <p>residual network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">RF</term>
          <def>
            <p>random forest</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb17">ROC</term>
          <def>
            <p>receiver operating characteristic</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb18">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb19">TN</term>
          <def>
            <p>true negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb20">TP</term>
          <def>
            <p>true positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb21">VGG</term>
          <def>
            <p>Visual Geometry Group</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Al</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Skin lesion classification with deep convolutional neural network: process development and validation</article-title>
          <source>JMIR Dermatol</source>
          <year>2020</year>
          <month>5</month>
          <day>7</day>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>e18438</fpage>
          <pub-id pub-id-type="doi">10.2196/18438</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Carvalho</surname>
              <given-names>TM</given-names>
            </name>
            <name name-style="western">
              <surname>Noels</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wakkee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Udrea</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nijsten</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Development of smartphone apps for skin cancer risk assessment: progress and promise</article-title>
          <source>JMIR Dermatol</source>
          <year>2019</year>
          <month>07</month>
          <day>11</day>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>e13376</fpage>
          <pub-id pub-id-type="doi">10.2196/13376</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Loescher</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Janda</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Soyer</surname>
              <given-names>HP</given-names>
            </name>
            <name name-style="western">
              <surname>Shea</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Curiel-Lewandrowski</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Advances in skin cancer early detection and diagnosis</article-title>
          <source>Semin Oncol Nurs</source>
          <year>2013</year>
          <month>08</month>
          <volume>29</volume>
          <issue>3</issue>
          <fpage>170</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1016/j.soncn.2013.06.003</pub-id>
          <pub-id pub-id-type="medline">23958215</pub-id>
          <pub-id pub-id-type="pii">S0749-2081(13)00033-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lieber</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Majumder</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Ellis</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Billheimer</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Mahadevan-Jansen</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>In vivo nonmelanoma skin cancer diagnosis using Raman microspectroscopy</article-title>
          <source>Lasers Surg Med</source>
          <year>2008</year>
          <month>09</month>
          <volume>40</volume>
          <issue>7</issue>
          <fpage>461</fpage>
          <lpage>467</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/18727020"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/lsm.20653</pub-id>
          <pub-id pub-id-type="medline">18727020</pub-id>
          <pub-id pub-id-type="pmcid">PMC2782422</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <source>Introduction to AI Robotics</source>
          <year>2019</year>
          <publisher-loc>Cambridge, MA</publisher-loc>
          <publisher-name>MIT Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marsland</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Machine Learning: An Algorithmic Perspective</source>
          <year>2011</year>
          <publisher-loc>Boca Raton, FL</publisher-loc>
          <publisher-name>CRC Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mitra</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Craswell</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <source>An Introduction to Neural Information Retrieval</source>
          <year>2018</year>
          <publisher-loc>Boston, MA</publisher-loc>
          <publisher-name>Now Foundations and Trends</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <source>ISIC 2018: Skin Lesion Analysis Towards Melanoma Detection</source>
          <access-date>2020-06-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://challenge2018.isic-archive.com/">https://challenge2018.isic-archive.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lillie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zarin</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Colquhoun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Levac</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Horsley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hempel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McGowan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hartling</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aldcroft</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Garritty</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lewin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Macdonald</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Langlois</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Soares-Weiser</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Moriarty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tunçalp</surname>
              <given-names>Ö</given-names>
            </name>
            <name name-style="western">
              <surname>Straus</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>PRISMA Extension for Scoping Reviews (PRISMA-ScR): checklist and explanation</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <month>10</month>
          <day>02</day>
          <volume>169</volume>
          <issue>7</issue>
          <fpage>467</fpage>
          <lpage>473</lpage>
          <pub-id pub-id-type="doi">10.7326/m18-0850</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ramlakhan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A mobile automated skin lesion classification system</article-title>
          <year>2011</year>
          <conf-name>IEEE 23rd International Conference on Tools with Artificial Intelligence</conf-name>
          <conf-date>2011</conf-date>
          <conf-loc>Boca Raton, FL</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ictai.2011.29</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Thiers</surname>
              <given-names>BH</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>JZ</given-names>
            </name>
          </person-group>
          <article-title>Automatic diagnosis of melanoma using machine learning methods on a spectroscopic system</article-title>
          <source>BMC Med Imaging</source>
          <year>2014</year>
          <month>10</month>
          <day>13</day>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>36</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedimaging.biomedcentral.com/articles/10.1186/1471-2342-14-36"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/1471-2342-14-36</pub-id>
          <pub-id pub-id-type="medline">25311811</pub-id>
          <pub-id pub-id-type="pii">1471-2342-14-36</pub-id>
          <pub-id pub-id-type="pmcid">PMC4204387</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sabouri</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>GholamHosseini</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Larsson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A cascade classifier for diagnosis of melanoma in clinical images</article-title>
          <year>2014</year>
          <conf-name>2014 36th Annual International Conference of the IEEE Engineering in Medicine and Biology Society</conf-name>
          <conf-date>26-30 Aug. 2014</conf-date>
          <conf-loc>Chicago, IL</conf-loc>
          <fpage>6751</fpage>
          <pub-id pub-id-type="doi">10.1109/embc.2014.6945177</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaur</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Albano</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Cole</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Hagerty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>LeAnder</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Moss</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Stoecker</surname>
              <given-names>WV</given-names>
            </name>
          </person-group>
          <article-title>Real-time supervised detection of pink areas in dermoscopic images of melanoma: importance of color shades, texture and location</article-title>
          <source>Skin Res Technol</source>
          <year>2015</year>
          <month>11</month>
          <day>22</day>
          <volume>21</volume>
          <issue>4</issue>
          <fpage>466</fpage>
          <lpage>73</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25809473"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/srt.12216</pub-id>
          <pub-id pub-id-type="medline">25809473</pub-id>
          <pub-id pub-id-type="pmcid">PMC4578974</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasr-Esfahani</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Samavi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Karimi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Soroushmehr</surname>
              <given-names>SMR</given-names>
            </name>
            <name name-style="western">
              <surname>Jafari</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Ward</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Najarian</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Melanoma detection by analysis of clinical images using convolutional neural network</article-title>
          <year>2016</year>
          <conf-name>38th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2016</conf-date>
          <conf-loc>Orlando, FL</conf-loc>
          <pub-id pub-id-type="doi">10.1109/embc.2016.7590963</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaworek-Korjakowska</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Computer-aided diagnosis of micro-malignant melanoma lesions applying support vector machines</article-title>
          <source>Biomed Res Int</source>
          <year>2016</year>
          <volume>2016</volume>
          <fpage>4381972</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2016/4381972"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2016/4381972</pub-id>
          <pub-id pub-id-type="medline">27382567</pub-id>
          <pub-id pub-id-type="pmcid">PMC4921724</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jaworek-Korjakowska</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kłeczek</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Automatic classification of specific melanocytic lesions using artificial intelligence</article-title>
          <source>Biomed Res Int</source>
          <year>2016</year>
          <volume>2016</volume>
          <fpage>8934242</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2016/8934242"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2016/8934242</pub-id>
          <pub-id pub-id-type="medline">26885520</pub-id>
          <pub-id pub-id-type="pmcid">PMC4739011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sabbaghi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aldeen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Garnavi</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A deep bag-of-features model for the classification of melanomas in dermoscopy images</article-title>
          <year>2016</year>
          <conf-name>2016 38th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2016</conf-date>
          <conf-loc>Orlando, FL</conf-loc>
          <fpage>2016</fpage>
          <pub-id pub-id-type="doi">10.1109/embc.2016.7590962</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Premaladha</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ravichandran</surname>
              <given-names>KS</given-names>
            </name>
          </person-group>
          <article-title>Novel approaches for diagnosing melanoma skin lesions through supervised and deep learning algorithms</article-title>
          <source>J Med Syst</source>
          <year>2016</year>
          <month>04</month>
          <day>12</day>
          <volume>40</volume>
          <issue>4</issue>
          <fpage>96</fpage>
          <pub-id pub-id-type="doi">10.1007/s10916-016-0460-2</pub-id>
          <pub-id pub-id-type="medline">26872778</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-016-0460-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mustafa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dauda</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Dauda</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Image processing and SVM classification for melanoma detection</article-title>
          <year>2017</year>
          <conf-name>2017 International Conference on Computing Networking and Informatics (ICCNI)</conf-name>
          <conf-date>2017</conf-date>
          <conf-loc>Lagos, Nigeria</conf-loc>
          <fpage>1</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1109/iccni.2017.8123777</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bovik</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Melanoma classification on dermoscopy images using a neural network ensemble model</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2017</year>
          <month>3</month>
          <volume>36</volume>
          <issue>3</issue>
          <fpage>849</fpage>
          <lpage>858</lpage>
          <pub-id pub-id-type="doi">10.1109/tmi.2016.2633551</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Afifi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>GholamHosseini</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sinha</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>SVM classifier on chip for melanoma detection</article-title>
          <year>2017</year>
          <conf-name>39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2017</conf-date>
          <conf-loc>Jeju, Korea (South)</conf-loc>
          <pub-id pub-id-type="doi">10.1109/embc.2017.8036814</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Dou</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Heng</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Automated melanoma recognition in dermoscopy images via very deep residual networks</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2017</year>
          <month>4</month>
          <volume>36</volume>
          <issue>4</issue>
          <fpage>994</fpage>
          <lpage>1004</lpage>
          <pub-id pub-id-type="doi">10.1109/tmi.2016.2642839</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esteva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kuprel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Novoa</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Swetter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Blau</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Thrun</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>
          <source>Nature</source>
          <year>2017</year>
          <month>01</month>
          <day>25</day>
          <volume>542</volume>
          <issue>7639</issue>
          <fpage>115</fpage>
          <lpage>118</lpage>
          <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mandache</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dalimier</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Durkin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Boceara</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Olivo-Marin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Meas-Yedid</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Basal cell carcinoma detection in full field OCT images using convolutional neural networks</article-title>
          <year>2018</year>
          <conf-name>IEEE 15th International Symposium on Biomedical Imaging (ISBI)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Washington, DC</conf-loc>
          <pub-id pub-id-type="doi">10.1109/isbi.2018.8363689</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Linsangan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Adtoon</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Skin cancer detection classification for moles using k-nearest neighbor algorithm</article-title>
          <year>2018</year>
          <conf-name>5th International Conference on Bioinformatics Research Applications</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3309129.3309141</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marchetti</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Codella</surname>
              <given-names>NC</given-names>
            </name>
            <name name-style="western">
              <surname>Dusza</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Gutman</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Helba</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kalloo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Carrera</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Celebi</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>DeFazio</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Jaimes</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Marghoob</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Quigley</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Scope</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yélamos</surname>
              <given-names>Oriol</given-names>
            </name>
            <name name-style="western">
              <surname>Halpern</surname>
              <given-names>AC</given-names>
            </name>
            <collab>International Skin Imaging Collaboration</collab>
          </person-group>
          <article-title>Results of the 2016 International Skin Imaging Collaboration International Symposium on Biomedical Imaging challenge: comparison of the accuracy of computer algorithms to dermatologists for the diagnosis of melanoma from dermoscopic images</article-title>
          <source>J Am Acad Dermatol</source>
          <year>2018</year>
          <month>02</month>
          <volume>78</volume>
          <issue>2</issue>
          <fpage>270</fpage>
          <lpage>277.e1</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28969863"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jaad.2017.08.016</pub-id>
          <pub-id pub-id-type="medline">28969863</pub-id>
          <pub-id pub-id-type="pii">S0190-9622(17)32202-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC5768444</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasir</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Attique Khan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sharif</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lali</surname>
              <given-names>IU</given-names>
            </name>
            <name name-style="western">
              <surname>Saba</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>An improved strategy for skin lesion detection and classification using uniform segmentation and feature selection based approach</article-title>
          <source>Microsc Res Tech</source>
          <year>2018</year>
          <month>06</month>
          <day>21</day>
          <volume>81</volume>
          <issue>6</issue>
          <fpage>528</fpage>
          <lpage>543</lpage>
          <pub-id pub-id-type="doi">10.1002/jemt.23009</pub-id>
          <pub-id pub-id-type="medline">29464868</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gautam</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Meena</surname>
              <given-names>YK</given-names>
            </name>
            <name name-style="western">
              <surname>Ul Haq</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Machine learning-based diagnosis of melanoma using macro images</article-title>
          <source>Int J Numer Method Biomed Eng</source>
          <year>2018</year>
          <month>05</month>
          <day>20</day>
          <volume>34</volume>
          <issue>5</issue>
          <fpage>e2953</fpage>
          <pub-id pub-id-type="doi">10.1002/cnm.2953</pub-id>
          <pub-id pub-id-type="medline">29266819</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salem</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Azar</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tokajian</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>An image processing and genetic algorithm-based approach for the detection of melanoma in patients</article-title>
          <source>Methods Inf Med</source>
          <year>2018</year>
          <fpage>231</fpage>
          <lpage>286</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Acral melanoma detection using a convolutional neural network for dermoscopy images</article-title>
          <source>PLoS ONE</source>
          <year>2018</year>
          <month>3</month>
          <day>7</day>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>e0193321</fpage>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0193321</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Putten</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Kambod</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kambod</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Deep residual neural networks for automated basal cell carcinoma detection</article-title>
          <year>2018</year>
          <conf-name>IEEE EMBS International Conference on Biomedical Health Informatics (BHI)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bhi.2018.8333437</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hangxing</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yeo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Skin lesion analysis by multi-target deep neural networks</article-title>
          <year>2018</year>
          <conf-name>40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>2018</fpage>
          <pub-id pub-id-type="doi">10.1109/embc.2018.8512488</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Skin lesion analysis towards melanoma detection using deep learning network</article-title>
          <source>Sensors (Basel)</source>
          <year>2018</year>
          <month>02</month>
          <day>11</day>
          <volume>18</volume>
          <issue>2</issue>
          <fpage>556</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s18020556"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s18020556</pub-id>
          <pub-id pub-id-type="medline">29439500</pub-id>
          <pub-id pub-id-type="pii">s18020556</pub-id>
          <pub-id pub-id-type="pmcid">PMC5855504</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaymak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Esmaili</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Serener</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for two-step classification of malignant pigmented skin lesions</article-title>
          <year>2018</year>
          <conf-name>14th Symposium on Neural Networks and Applications (NEUREL)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Belgrade, Serbia</conf-loc>
          <fpage>1</fpage>
          <pub-id pub-id-type="doi">10.1109/neurel.2018.8587019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hameed</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Shabut</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hossain</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Multi-class skin diseases classification using deep convolutional neural network and support vector machine</article-title>
          <year>2018</year>
          <conf-name>12th International Conference on Software, Knowledge, Information Management Applications (SKIMA)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Phnom Penh, Cambodia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/skima.2018.8631525</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shahin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kamal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Elattar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Deep ensemble learning for skin lesion classification from dermoscopic images</article-title>
          <year>2018</year>
          <conf-name>9th Cairo International Biomedical Engineering Conference (CIBEC)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Cairo, Egypt</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cibec.2018.8641815</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harangi</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Baran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hajdu</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Classification of skin lesions using an ensemble of deep neural networks</article-title>
          <year>2018</year>
          <conf-name>2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>2018</fpage>
          <pub-id pub-id-type="doi">10.1109/embc.2018.8512800</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mahbod</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schaefer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ecker</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ellinge</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Skin lesion classification using hybrid deep neural networks</article-title>
          <year>2019</year>
          <conf-name>ICASSP  IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Brighton, UK</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icassp.2019.8683352</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shihadeh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ansari</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ozunfunmi</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Deep learning based image classification for remote medical diagnosis</article-title>
          <year>2018</year>
          <conf-name>IEEE Global Humanitarian Technology Conference (GHTC)</conf-name>
          <conf-date>2018</conf-date>
          <conf-loc>San Jose, CA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ghtc.2018.8601558</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nida</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Irtaza</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Javed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yousaf</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmood</surname>
              <given-names>MT</given-names>
            </name>
          </person-group>
          <article-title>Melanoma lesion detection and segmentation using deep region based convolutional neural network and fuzzy C-means clustering</article-title>
          <source>Int J Med Inform</source>
          <year>2019</year>
          <month>04</month>
          <volume>124</volume>
          <fpage>37</fpage>
          <lpage>48</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2019.01.005</pub-id>
          <pub-id pub-id-type="medline">30784425</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(18)30747-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Attention residual learning for skin lesion classification</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2019</year>
          <month>9</month>
          <volume>38</volume>
          <issue>9</issue>
          <fpage>2092</fpage>
          <lpage>2103</lpage>
          <pub-id pub-id-type="doi">10.1109/tmi.2019.2893944</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Demir</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kose</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Early detection of skin cancer using deep learning architectures: ResNet-101 and Inception-v3</article-title>
          <year>2019</year>
          <conf-name>Medical Technologies Congress (TIPTEKNO)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Izmir, Turkey</conf-loc>
          <pub-id pub-id-type="doi">10.1109/tiptekno47231.2019.8972045</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gavrilov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lazarenko</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zakirov</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>AI recognition in skin pathologies detection</article-title>
          <year>2019</year>
          <conf-name>2019 International Conference on Artificial Intelligence: Applications and Innovations (IC-AIAI)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Belgrade, Serbia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ic-aiai48757.2019.00017</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aggarwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Sreedevi</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Attention-guided deep convolutional neural networks for skin cancer classification</article-title>
          <year>2019</year>
          <conf-name>Ninth International Conference on Image Processing Theory, Tools and Applications (IPTA)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Istanbul, Turkey</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ipta.2019.8936100</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The application of deep learning on fast skin cancer diagnosis</article-title>
          <year>2019</year>
          <conf-name>2019 International Conference on Information Technology and Computer Application (ITCA)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Guangzhou, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/itca49981.2019.00034</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Multi-pooling attention learning for melanoma recognition</article-title>
          <year>2019</year>
          <conf-name>2019 Digital Image Computing: Techniques and Applications (DICTA)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Perth, WA, Australia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/dicta47822.2019.8945868</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Spasi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Chapman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Andres</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Machine learning on mobile: an on-device inference app for skin cancer detection</article-title>
          <year>2019</year>
          <conf-name>Fourth International Conference on Fog and Mobile Edge Computing (FMEC)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Rome, Italy</conf-loc>
          <fpage>A</fpage>
          <pub-id pub-id-type="doi">10.1109/fmec.2019.8795362</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mahbod</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schaefer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ellinger</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ecker</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pitiot</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Fusing fine-tuned deep features for skin lesion classification</article-title>
          <source>Comput Med Imaging Graph</source>
          <year>2019</year>
          <month>01</month>
          <volume>71</volume>
          <fpage>19</fpage>
          <lpage>29</lpage>
          <pub-id pub-id-type="doi">10.1016/j.compmedimag.2018.10.007</pub-id>
          <pub-id pub-id-type="medline">30458354</pub-id>
          <pub-id pub-id-type="pii">S0895-6111(18)30605-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wodzinski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Skalski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Witkowski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pellacani</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ludzik</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Convolutional neural network approach to classify skin lesions using reflectance confocal microscopy</article-title>
          <year>2019</year>
          <conf-name>2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Berlin, Germany</conf-loc>
          <pub-id pub-id-type="doi">10.1109/embc.2019.8856731</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brinker</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hekler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Enk</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>von Kalle</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Enhanced classifier training to improve precision of a convolutional neural network to identify images of skin lesions</article-title>
          <source>PLoS One</source>
          <year>2019</year>
          <month>6</month>
          <day>24</day>
          <volume>14</volume>
          <issue>6</issue>
          <fpage>e0218713</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0218713"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0218713</pub-id>
          <pub-id pub-id-type="medline">31233565</pub-id>
          <pub-id pub-id-type="pii">PONE-D-18-30501</pub-id>
          <pub-id pub-id-type="pmcid">PMC6590821</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ech-Cherif</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Misbhauddin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ech-Cherif</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Deep neural network based mobile dermoscopy application for triaging skin cancer detection</article-title>
          <year>2019</year>
          <conf-name>2nd International Conference on Computer Applications Information Security (ICCAIS)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Riyadh, Saudi Arabia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cais.2019.8769517</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guha</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Rafizul Haque</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Convolutional neural network based skin lesion analysis for classifying melanoma</article-title>
          <year>2019</year>
          <conf-name>International Conference on Sustainable Technologies for Industry 4.0 (STI)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Dhaka, Bangladesh</conf-loc>
          <pub-id pub-id-type="doi">10.1109/sti47673.2019.9067979</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kassani</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Kassani</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Wesolowski</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Schneider</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Deters</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Depthwise separable convolutional neural network for skin lesion classification</article-title>
          <year>2019</year>
          <conf-name>IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Ajman, United Arab Emirates</conf-loc>
          <pub-id pub-id-type="doi">10.1109/isspit47144.2019.9001790</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Budhiman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Suyanto</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Arifianto</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Melanoma cancer classification using ResNet with data augmentation</article-title>
          <year>2019</year>
          <conf-name>2019 International Seminar on Research of Information Technology and Intelligent Systems (ISRITI)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Yogyakarta, Indonesia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/isriti48646.2019.9034624</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sae-Lim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wettayaprasit</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Aiyarak</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Convolutional neural networks using MobileNet for skin lesion classification</article-title>
          <year>2019</year>
          <conf-name>16th International Joint Conference on Computer Science and Software Engineering (JCSSE)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Chonburi, Thailand</conf-loc>
          <pub-id pub-id-type="doi">10.1109/jcsse.2019.8864155</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Purnama</surname>
              <given-names>IKE</given-names>
            </name>
            <name name-style="western">
              <surname>Hernanda</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Ratna</surname>
              <given-names>AAP</given-names>
            </name>
            <name name-style="western">
              <surname>Nurtanio</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hidayati</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Purnomo</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Nugroho</surname>
              <given-names>SMS</given-names>
            </name>
            <name name-style="western">
              <surname>Rachmadi</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Disease classification based on dermoscopic skin images using convolutional neural network in teledermatology system</article-title>
          <year>2019</year>
          <conf-name>International Conference on Computer Engineering, Network, and Intelligent Multimedia (CENIM)</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Surabaya, Indonesia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cenim48368.2019.8973303</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hasan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Barman</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reza</surname>
              <given-names>AW</given-names>
            </name>
          </person-group>
          <article-title>Skin cancer detection using convolutional neural network</article-title>
          <year>2019</year>
          <conf-name>5th International Conference on ComputingArtificial Intelligence, New York, NY, USA</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Bali, Indonesia</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3330482.3330525</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Automatic skin cancer detection in dermoscopy images based on ensemble lightweight deep learning network</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>99633</fpage>
          <lpage>99647</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2020.2997710</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasiri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Helsper</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fathi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>DePicT Melanoma Deep-CLASS: a deep convolutional neural networks approach to classify skin lesion images</article-title>
          <source>BMC Bioinformatics</source>
          <year>2020</year>
          <month>03</month>
          <day>11</day>
          <volume>21</volume>
          <issue>Suppl 2</issue>
          <fpage>84</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-020-3351-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12859-020-3351-y</pub-id>
          <pub-id pub-id-type="medline">32164530</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12859-020-3351-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC7068864</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Poovizhi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ganesh Babu</surname>
              <given-names>TR</given-names>
            </name>
          </person-group>
          <article-title>An efficient skin cancer diagnostic system using Bendlet Transform and support vector machine</article-title>
          <source>An Acad Bras Cienc</source>
          <year>2020</year>
          <volume>92</volume>
          <issue>1</issue>
          <fpage>e20190554</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.scielo.br/scielo.php?script=sci_arttext&#38;pid=S0001-37652020000100803&#38;lng=en&#38;nrm=iso&#38;tlng=en"/>
          </comment>
          <pub-id pub-id-type="doi">10.1590/0001-3765202020190554</pub-id>
          <pub-id pub-id-type="medline">32491128</pub-id>
          <pub-id pub-id-type="pii">S0001-37652020000100803</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adegun</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Viriri</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based system for automatic melanoma detection</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>7160</fpage>
          <lpage>7172</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2019.2962812</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sanketh</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Madhu Bala</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Narendra Reddy</surname>
              <given-names>PV</given-names>
            </name>
            <name name-style="western">
              <surname>Phani Kumar</surname>
              <given-names>GVS</given-names>
            </name>
          </person-group>
          <article-title>Melanoma disease detection using convolutional neural networks</article-title>
          <year>2020</year>
          <conf-name>2020 4th International Conference on Intelligent Computing and Control Systems (ICICCS), 2020</conf-name>
          <conf-date>2020</conf-date>
          <conf-loc>Madurai, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/iciccs48265.2020.9121075</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodfellow</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Deep Learning</source>
          <year>2016</year>
          <publisher-loc>Cambridge, MA</publisher-loc>
          <publisher-name>MIT Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Herstein</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <source>Topics in Algebra</source>
          <year>1964</year>
          <publisher-loc>Waltham, MA</publisher-loc>
          <publisher-name>Blaisdell</publisher-name>
          <fpage>90</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pei</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hao</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Shallow classification or deep learning: an experimental study</article-title>
          <year>2014</year>
          <conf-name>2014 22nd International Conference on Pattern Recognition</conf-name>
          <conf-date>2014</conf-date>
          <conf-loc>Stockholm, Sweden</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icpr.2014.333</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brinker</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hekler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Utikal</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Grabe</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Schadendorf</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Klode</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Berking</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Steeb</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Enk</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>von Kalle</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Skin cancer classification using convolutional neural networks: systematic review</article-title>
          <source>J Med Internet Res</source>
          <year>2018</year>
          <month>10</month>
          <day>17</day>
          <volume>20</volume>
          <issue>10</issue>
          <fpage>e11936</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2018/10/e11936/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/11936</pub-id>
          <pub-id pub-id-type="medline">30333097</pub-id>
          <pub-id pub-id-type="pii">v20i10e11936</pub-id>
          <pub-id pub-id-type="pmcid">PMC6231861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Razzak</surname>
              <given-names>M. I</given-names>
            </name>
            <name name-style="western">
              <surname>Zaib</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for medical image processing: overview, challenges and the future</article-title>
          <source>Classification in BioApps: Automation of Decision Making</source>
          <year>2018</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer International</publisher-name>
          <fpage>323</fpage>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
