<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e57723</article-id>
      <article-id pub-id-type="pmid">39879621</article-id>
      <article-id pub-id-type="doi">10.2196/57723</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Transformers for Neuroimage Segmentation: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Coristine</surname>
            <given-names>Andrew</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mistry</surname>
            <given-names>Jinal</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Shen</surname>
            <given-names>Yiqing</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mary</surname>
            <given-names>Stella</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Iratni</surname>
            <given-names>Maya</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3567-7885</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Abdullah</surname>
            <given-names>Amira</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9896-5676</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Aldhaheri</surname>
            <given-names>Mariam</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0005-8901-4242</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Elharrouss</surname>
            <given-names>Omar</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5341-5440</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Abd-alrazaq</surname>
            <given-names>Alaa</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7695-4626</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Rustamov</surname>
            <given-names>Zahiriddin</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4977-1781</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Zaki</surname>
            <given-names>Nazar</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6259-9843</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Damseh</surname>
            <given-names>Rafat</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Computer Science and Software Engineering</institution>
            <institution>United Arab Emirates University</institution>
            <addr-line>Sheik Khalifa Bin Zayed St - 'Asharij - Shiebat Al Oud - Abu Dhabi</addr-line>
            <addr-line>Al Ain, 15551</addr-line>
            <country>United Arab Emirates</country>
            <phone>971 37135586</phone>
            <email>rdamseh@uaeu.ac.ae</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6797-0448</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Computer Science and Software Engineering</institution>
        <institution>United Arab Emirates University</institution>
        <addr-line>Al Ain</addr-line>
        <country>United Arab Emirates</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Weill Cornell Medicine-Qatar</institution>
        <addr-line>Doha</addr-line>
        <country>Qatar</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Rafat Damseh <email>rdamseh@uaeu.ac.ae</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>29</day>
        <month>1</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e57723</elocation-id>
      <history>
        <date date-type="received">
          <day>25</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>2</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>23</day>
          <month>8</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>19</day>
          <month>11</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Maya Iratni, Amira Abdullah, Mariam Aldhaheri, Omar Elharrouss, Alaa Abd-alrazaq, Zahiriddin Rustamov, Nazar Zaki, Rafat Damseh. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 29.01.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e57723" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Neuroimaging segmentation is increasingly important for diagnosing and planning treatments for neurological diseases. Manual segmentation is time-consuming, apart from being prone to human error and variability. Transformers are a promising deep learning approach for automated medical image segmentation.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This scoping review will synthesize current literature and assess the use of various transformer models for neuroimaging segmentation.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A systematic search in major databases, including Scopus, IEEE Xplore, PubMed, and ACM Digital Library, was carried out for studies applying transformers to neuroimaging segmentation problems from 2019 through 2023. The inclusion criteria allow only for peer-reviewed journal papers and conference papers focused on transformer-based segmentation of human brain imaging data. Excluded are the studies dealing with nonneuroimaging data or raw brain signals and electroencephalogram data. Data extraction was performed to identify key study details, including image modalities, datasets, neurological conditions, transformer models, and evaluation metrics. Results were synthesized using a narrative approach.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Of the 1246 publications identified, 67 (5.38%) met the inclusion criteria. Half of all included studies were published in 2022, and more than two-thirds used transformers for segmenting brain tumors. The most common imaging modality was magnetic resonance imaging (n=59, 88.06%), while the most frequently used dataset was brain tumor segmentation dataset (n=39, 58.21%). 3D transformer models (n=42, 62.69%) were more prevalent than their 2D counterparts. The most developed were those of hybrid convolutional neural network-transformer architectures (n=57, 85.07%), where the vision transformer is the most frequently used type of transformer (n=37, 55.22%). The most frequent evaluation metric was the Dice score (n=63, 94.03%). Studies generally reported increased segmentation accuracy and the ability to model both local and global features in brain images.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This review represents the recent increase in the adoption of transformers for neuroimaging segmentation, particularly for brain tumor detection. Currently, hybrid convolutional neural network-transformer architectures achieve state-of-the-art performances on benchmark datasets over standalone models. Nevertheless, their applicability remains highly limited by high computational costs and potential overfitting on small datasets. The heavy reliance of the field on the brain tumor segmentation dataset hints at the use of a more diverse set of datasets to validate the performances of models on a variety of neurological diseases. Further research is needed to define the optimal transformer architectures and training methods for clinical applications. Continuing development may make transformers the state-of-the-art for fast, accurate, and reliable brain magnetic resonance imaging segmentation, which could lead to improved clinical tools for diagnosing and evaluating neurological disorders.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>3D segmentation</kwd>
        <kwd>brain tumor segmentation</kwd>
        <kwd>deep learning</kwd>
        <kwd>neuroimaging</kwd>
        <kwd>transformer</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Neuroimaging refers to the visualization of the structure and function of the brain. It is one of the most important tools in the understanding of different neurological disorders. Generally, neuroimages can be obtained using 3 principal imaging modalities, where each modality shows the complexities of the brain from a different perspective. Of the 3, magnetic resonance imaging (MRI) is still the most frequently used due to high contrasting ability of brain tissues, high spatial resolution, and no risk of radiation exposure [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. For different brain regions to be viewed, multiple MRI sequences are needed, such as T1, T1ce, T2, and fluid-attenuated inversion recovery, as presented in <xref rid="figure1" ref-type="fig">Figure 1</xref> [<xref ref-type="bibr" rid="ref4">4</xref>]. The second neuroimaging modality is computed tomography (CT), which can produce high-resolution images. On the other hand, it has limited soft tissue characterization, and its radiation risk makes it unsuitable for repetitive use [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. The third neuroimaging modality is positron emission tomography (PET), which integrates nuclear medicine to visualize metabolic activity [<xref ref-type="bibr" rid="ref2">2</xref>]. PET has high sensitivity, making it effective in detecting metastases, finding abnormalities, and imaging deep structures. However, it has limited resolution, and repeated use causes radiation risk [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Finding changes in brain tissue through neuroimaging analysis is critical for detecting and monitoring neurological disorders [<xref ref-type="bibr" rid="ref6">6</xref>] and brain tumors [<xref ref-type="bibr" rid="ref7">7</xref>]. Segmentation is a useful process in outlining regions of interest in medical images [<xref ref-type="bibr" rid="ref8">8</xref>], which enables the quantitative assessment of atrophy, growths, and anatomical differences that depict conditions like Alzheimer disease, schizophrenia, and brain tumors among other neurodegenerative diseases [<xref ref-type="bibr" rid="ref2">2</xref>]. Because of this, segmentation is applied broadly in different medical applications in diagnosis, tissue classification, radiotherapy treatment, and surgical planning [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref9">9</xref>].</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Magnetic resonance imaging modalities of brain tumor. FLAIR: fluid-attenuated inversion recovery; T1: T1-weighted imaging; T2: T2-weighted imaging; T1ce: T1-weighted contrast-enhanced imaging.</p>
        </caption>
        <graphic xlink:href="jmir_v27i1e57723_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>Segmentation techniques can be classified into 3 categories: manual, semiautomated, and fully automated. Manual segmentation is the standard for segmentation because it is believed to be the most accurate [<xref ref-type="bibr" rid="ref10">10</xref>]. The technique, however, is laborious, time-consuming, and subjective, since it depends on human judgment, and this may result in variation in the results because of the different interpretations. Due to this, there has been a great deal of research into automated segmentation techniques to replicate the results from manual segmentation but with a higher level of efficiency and consistency [<xref ref-type="bibr" rid="ref2">2</xref>]. To do this, 2 early paradigms were used: intensity-based approaches, which include thresholding, edge-detection, and region-based [<xref ref-type="bibr" rid="ref8">8</xref>], and traditional machine learning paradigms, including support vector machine, k-nearest neighbor clustering, and random forest [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. Each of these methods has been applied in 1 or more ways, but their applicability and performance within the task of image segmentation remain limited [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Since then, deep learning (DL) methods have transformed medical imaging applications and became a strong alternative to classical techniques.</p>
      <p>DL is a subclass of machine learning that involves artificial neural networks with multiple layers. These networks are designed to progressively learn hierarchical representations and features of data, which both eliminates the need for manual feature engineering [<xref ref-type="bibr" rid="ref2">2</xref>] and enables the extraction of complicated patterns from large datasets [<xref ref-type="bibr" rid="ref13">13</xref>]. Different DL architectures have been used for medical image segmentation, but the most widely used and popular one is convolutional neural networks (CNNs), which have achieved state-of-the-art performances in different medical imaging tasks, including segmentation [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. U-Net [<xref ref-type="bibr" rid="ref15">15</xref>] is another notable model that was specially designed for biomedical image segmentation and has produced very good results in its field [<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. Some other notable models include SegNet [<xref ref-type="bibr" rid="ref19">19</xref>], ResNet [<xref ref-type="bibr" rid="ref20">20</xref>], DenseNet [<xref ref-type="bibr" rid="ref21">21</xref>], 3D-ConvNet [<xref ref-type="bibr" rid="ref22">22</xref>], and DeepLab [<xref ref-type="bibr" rid="ref23">23</xref>]. These models have served as a solid foundation for the imaging field and have resulted in a plethora of variants, each developed for specific imaging modalities, anatomical structures, and segmentation tasks. Transformers [<xref ref-type="bibr" rid="ref24">24</xref>] are a type of neural network architectures that mainly rely on self-attention mechanisms. They were first proposed in 2017 and have since yielded state-of-the-art results in the field of natural language processing [<xref ref-type="bibr" rid="ref25">25</xref>]. More recently, transformers have also shown success when applied to a wide array of computer vision tasks, one of which is segmentation [<xref ref-type="bibr" rid="ref26">26</xref>]. Although CNNs have achieved impressive performances in image-related tasks, they may not capture global and long-range dependencies well due to the small kernel size [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>].</p>
      <p>Transformers have recently gained popularity in imaging due to their self-attention mechanism, which can model these long-range dependencies—especially useful in brain segmentation [<xref ref-type="bibr" rid="ref27">27</xref>]. The great success of transformers has motivated the construction of vision transformers (ViT) [<xref ref-type="bibr" rid="ref28">28</xref>], which forego the use of convolutional layers and rely instead on a multihead self-attention mechanism [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. This architecture divides an image into fixed-size patches, linearly embeds them, and processes them through a transformer network, thereby allowing it to model long-range dependencies with reduced inductive bias [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. Recently, ViT architectures specifically designed for medical image segmentation have been explored and resulted in models like TransUnet [<xref ref-type="bibr" rid="ref31">31</xref>] and Swin-UNet [<xref ref-type="bibr" rid="ref32">32</xref>] for general-purpose use and models like TransBTS [<xref ref-type="bibr" rid="ref33">33</xref>] and Swin-UNETR [<xref ref-type="bibr" rid="ref34">34</xref>] with the backbone for brain tumor segmentation [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. Special study deserves transformer use in neuroimaging, as the structures of the brain are complicated. Neural networks based on transformers can model long-range dependencies and spatial relationships of the brain images [<xref ref-type="bibr" rid="ref27">27</xref>], which is very important in brain segmentation.</p>
      <p>Although transformers have shown very promising results in many medical imaging tasks, their use in neuroimaging segmentation remains an evolving field that had not been systematically reviewed. Existing literature reviews have either examined the use of transformers for general medical image segmentation without focusing specifically on brain segmentation [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>] or have reviewed brain segmentation techniques using various DL methods without emphasizing the role of transformers [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. One more difference that exists between this review and others is the focus on applying transformers to neuroimage segmentation, which is a central task in neurological disorder diagnosis and treatment. For example, compared with more general surveys such as those by Shamshad et al [<xref ref-type="bibr" rid="ref29">29</xref>] and Xiao et al [<xref ref-type="bibr" rid="ref26">26</xref>], which address a wide range of tasks or organ systems, our work specifically focuses on the unique challenges and developments within brain image segmentation. Thus, this scoping review will seek to fill the gap by focusing solely on transformer applications in neuroimage segmentation, an area of paramount importance for the diagnosis and treatment of neurological disorders. A scoping review on this topic is appropriate because the application of transformers in this area is relatively new and fast-developing; hence, it allows for comprehensively mapping the current research landscape and identifying knowledge gaps.</p>
      <p>The main purpose of this scoping review is to synthesize and critically evaluate the existing literature on the use of different transformer models for neuroimaging segmentation. This review aims at summarizing the types of transformer models applied, their performance, applications in various neurological conditions and imaging modalities, limitations of the current literature, and highlighting the existing gaps in research.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Design</title>
        <p>The approach of this scoping review follows the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) guidelines (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) [<xref ref-type="bibr" rid="ref35">35</xref>]. Our primary research question was “What are the current applications, performance, and limitations of transformer models in neuroimaging segmentation?”</p>
        <p>The goal was the extraction of key themes within recent literature related to transformer use in neuroimaging segmentation that will guide future research and clinical applications. Only the literature starting from 2019 was considered, since the rapid evolution in the development of transformer models for medical imaging is a key recent development in this field.</p>
        <p>We defined transformer models as DL architectures relying on self-attention mechanisms, capable of processing sequential data and capturing long-range dependencies. From the neuroimaging perspective, we considered those studies where these models were applied to different modalities of brain imaging, focusing on MRI due to its prevalence in neurological diagnostics.</p>
        <p>Our review process followed a systematic search in 4 major databases: Scopus, IEEE Xplore, PubMed, and ACM Digital Library. We present a comprehensive review of methodologies, results, strengths, and limitations of the included studies to derive useful insights that bridge technical developments with their implications in the clinical domain of neuroimage segmentation.</p>
      </sec>
      <sec>
        <title>Search Strategy</title>
        <p>Studies were retrieved on May 22, 2023, through searching the following databases: IEEE Xplore, ACM Digital Library, Scopus, and PubMed. The search was limited to 5 years, from 2019 to 2023, to prioritize recent research and consisted of search queries related to transformers such as “transformer,” “deep learning,” and “self-attention”; queries related to neuroimaging such as “brain,” “neuroimaging,” “MRI,” “CT,” and “PET”; and queries related to the medical field such as “health care,” “medical,” “health.”</p>
      </sec>
      <sec>
        <title>Study Eligibility Criteria</title>
        <p>This review only included papers whose primary purpose was on the use of transformers for the segmentation of neuroimages. Our search included journal papers, conference papers, and dissertations that focused on applying transformer models to imaging scans (eg, MRI and CT) of the human brain. We excluded all studies that (1) used transformers for the segmentation of nonneuroimaging, raw brain signals, or electroencephalogram data; (2) were not in English, review papers, conference abstracts, preprints, protocols, and conference abstracts; (3) focused on neuroimaging tasks other than segmentation (eg, classification and prediction); and (4) were published before 2019.</p>
      </sec>
      <sec>
        <title>Study Selection</title>
        <p>For this review, we used a 3-step study selection process. First, we used EndNote (Clarivate) to remove duplicate studies returned by our initial search. Next, 3 independent reviewers (MI, AA, and MA) screened the titles and abstracts of the remaining papers to exclude irrelevant studies. We then obtained full texts of the studies that passed the initial screening, and the same 3 reviewers (MI, AA, and MA) examined them against our predefined inclusion criteria. Any disagreements between reviewers during the screening processes were resolved through in-person discussions until a consensus was reached.</p>
      </sec>
      <sec>
        <title>Data Collection</title>
        <p>The data extraction for this review is done in Microsoft Excel by 2 independent groups of 2 reviewers (MI and AA and MA and OE) to share the workload for extraction and resolve conflicts between the groups. Disagreements in data extraction were resolved through consensus during face-to-face discussions. Data extractions fall into 3 broad categories: study characteristics, neuroimaging acquisition, and transformer features.</p>
        <p>Synthesis of included data was done using a narrative approach. Descriptive texts, tables, and figures describe and show the summary and characteristics of the data. Microsoft Excel was used to manage and synthesize the data. First, we depict the characteristics of each included study concerning publication year, type of publication, and country of origin. Then, we describe the neuroimaging acquisition of these studies by including the imaging modality, dataset, dataset accessibility, and neurological condition. Finally, it explains the transformer architecture of the included studies: the number of parameters, transformer type, hybrid component, and the training and evaluation methodology used along with loss function, optimizer, and metrics.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This scoping review synthesized and analyzed publicly available research studies. No direct human participant research was conducted; therefore, approval from an institutional review board or a research ethics committee was unnecessary. There was no collection, use, or dissemination of personal data from human participants in this study. Data extracted and analyzed in this review had been sourced from published studies, previously subjected to ethical review processes as part of their original publication. The review followed ethics in research practices, in that the representation of the included studies was true to form, and the methodology for the selection of studies and extraction was transparent. No individual participant data were accessed or reported in this review; hence, privacy and confidentiality were ensured. Since this study did not involve direct contact with human participants, issues regarding informed consent and protection of privacy and compensation of participants were therefore not relevant in this study. No images or supplementary material showing identifiable information of any individual were used in this review.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>A total of 1246 publications were retrieved from the initial search of the selected databases. In the first round of screening, 261 duplicates were identified and removed through the use of EndNote X9, leaving 985 publications remaining. In the second round, 761 publications were excluded through the analysis of their titles and abstracts against our predefined inclusion or exclusion criteria. The remaining 224 publications continued to the third round of screening, which included a detailed full-text read-through and resulted in the exclusion of 156 publications. Of the 1246 initial publications, only 68 studies met our criteria and were thereby included in this review. <xref rid="figure2" ref-type="fig">Figure 2</xref> depicts the full screening process in more detail.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews) flowchart of the study selection process.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e57723_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Characteristics of the Included Studies</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> depicts the characteristics and metadata of each included study, including the publication year, country, and type. Included studies ranged between 2019 and 2023. Over half of the included studies were published in 2022, followed by 32.84% (n=22) in 2023. Studies included peer-reviewed papers (n=48, 71.64%) and conference papers (n=19, 29.36%). The included studies spanned a total of 13 countries, with China being by far the largest contributor in this domain, representing 68.66% (n=46) of the total studies. Following China, we can find the United States (n=5, 7.46%), the United Kingdom (n=4, 5.97%), and India (n=3, 4.48%), with other countries contributing 1 paper apiece.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Characteristics of the studies used in this review, including the year, type, and country of publication.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="220"/>
            <col width="150"/>
            <col width="600"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Features</td>
                <td>Studies, n (%)</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Year of publication</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2023</td>
                <td>22 (32.84)</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref57">57</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2022</td>
                <td>34 (50.75)</td>
                <td>[<xref ref-type="bibr" rid="ref58">58</xref>-<xref ref-type="bibr" rid="ref91">91</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2021</td>
                <td>10 (14.93)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref92">92</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2019</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Type of publication</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Journal paper</td>
                <td>48 (71.64)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref96">96</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Conference paper</td>
                <td>19 (29.36)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref77">77</xref>-<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>-<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>-<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Country of publication</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>China</td>
                <td>46 (68.66)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref54">54</xref>-<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>-<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>-<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>-<xref ref-type="bibr" rid="ref94">94</xref>,<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>United States</td>
                <td>5 (7.46)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>United Kingdom</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref97">97</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>India</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref98">98</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>9 (13.43)</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Neuroimaging Acquisition and Neurological Condition</title>
        <p><xref ref-type="table" rid="table2">Table 2</xref> depicts the different imaging modalities used, the datasets used, and the different neurological conditions across the included studies. The included studies included a range of 6 different modalities, with MRI being by far the most common with 88% (n=59), followed by CT with 10.45% (n=7), and the remaining modalities with 1 each. Over half of the included studies used only 1 dataset (n=40, 59.70%) for training and evaluation purposes, followed by 23.88% (n=16) using 2 datasets. Of the 44 unique datasets used across the included studies, 70.45% (n=31) are public or open-source datasets, and 29.55% (n=13) are private datasets obtained directly from medical institutions. Regarding the public dataset category, the brain tumor segmentation dataset (BraTS dataset) is by far the most widely used, with 58.21% (n=39) of total studies using its variants (including BraTS 2015, 2017, 2018, 2019, 2020, and 2021), followed by Medical Segmentation Decathlon and low-grade glioma-Kaggle with 5.97% (n=4) each. The main neurological condition of the included studies was the segmentation of brain tumors, with 71.64% (n=48) of studies conducting research specifically in this area.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Description of features used in the included studies, including modalities, datasets, dataset types, and neurological conditions.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="230"/>
            <col width="140"/>
            <col width="600"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Features</td>
                <td>Studies, n (%)</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Imaging modality</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MRI<sup>a</sup></td>
                <td>59 (88.06)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>-<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref71">71</xref>-<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>CT<sup>b</sup></td>
                <td>7 (10.45)</td>
                <td>[<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>PET<sup>c</sup></td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref73">73</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Interventional ultrasound</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref70">70</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Electron microscopy</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Digital subtraction angiography</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref91">91</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Number of datasets used</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>1</td>
                <td>40 (59.7)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>-<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref63">63</xref>-<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref83">83</xref>-<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>-<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2</td>
                <td>16 (23.88)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref96">96</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>3+</td>
                <td>10 (14.93)</td>
                <td>[<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not mentioned</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref52">52</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Dataset accessibility</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Public</td>
                <td>31 (70.45)</td>
                <td>—<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Private</td>
                <td>13 (29.55)</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Dataset</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Public: BraTS<sup>e</sup></td>
                <td>39 (58.21)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref60">60</xref>-<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>-<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>-<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MSD<sup>f</sup></td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>iseg-2017</td>
                <td>3 (4.48</td>
                <td>[<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref96">96</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ADNI<sup>g</sup></td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref73">73</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MRBrainS<sup>h</sup></td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref96">96</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LGG<sup>i</sup>-Kaggle</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref84">84</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ISLES<sup>j</sup></td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref75">75</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>WMH<sup>k</sup></td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>12 (17.91)</td>
                <td>[<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Private</td>
                <td>9</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref52">52</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Neurological condition</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Brain tumor</td>
                <td>48 (71.64)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref60">60</xref>-<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>-<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ischemic stroke</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Alzheimer disease</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Parkinson disease</td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Intracerebral hemorrhage</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Intracranial aneurysms</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref91">91</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Autism</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref55">55</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Brain lesions</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref78">78</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Healthy brain</td>
                <td>6 (8.96)</td>
                <td>[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>MRI: magnetic resonance imaging.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>CT: computed tomography.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>PET: positron emission tomography.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>Not available.</p>
            </fn>
            <fn id="table2fn5">
              <p><sup>e</sup>BraTS: brain tumor segmentation dataset.</p>
            </fn>
            <fn id="table2fn6">
              <p><sup>f</sup>MSD: Medical Segmentation Decathlon.</p>
            </fn>
            <fn id="table2fn7">
              <p><sup>g</sup>ADNI: Alzheimer’s Disease Neuroimaging Initiative.</p>
            </fn>
            <fn id="table2fn8">
              <p><sup>h</sup>MRBrainS: magnetic resonance brain image segmentation.</p>
            </fn>
            <fn id="table2fn9">
              <p><sup>i</sup>LGG: low-grade glioma.</p>
            </fn>
            <fn id="table2fn10">
              <p><sup>j</sup>ISLES: ischemic stroke lesion segmentation.</p>
            </fn>
            <fn id="table2fn11">
              <p><sup>k</sup>WMH: white matter hyperintensities.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Transformer-Based Techniques Types, Training Parameters, and Evaluation</title>
        <p>The proposed neuroimage segmentation techniques used various artificial intelligence (AI) techniques. In this review, we focused on the deep transformer–based techniques that have gained more attention recently. From the proposed models, we can find transformer-based, CNN with transformer-based, and generative adversarial network with transformer-based techniques. At the same time, the methods based on TransBTS, TransUNet, SeinUNet, and U-Net with transformer are the most used models for neuroimage segmentation. <xref rid="figure3" ref-type="fig">Figure 3</xref> illustrates these models in terms of architecture. <xref ref-type="table" rid="table3">Table 3</xref> depicts the characteristics of transformer models used within the included studies. From <xref ref-type="table" rid="table3">Table 3</xref>, we can find that 58.21% (n=39) of the included studies did not explicitly report the number of parameters of their proposed models. Of the studies that did, however, the majority of the transformer models proposed had between 20 and 40 million parameters (n=10, 14.93%), followed by 1 and 19 million (n=8, 11.94%). A majority of studies implemented a 3D segmentation network (n=42, 62.69%), with 37.31% (n=25) being 2D. An overwhelming 85.07% (n=57) of included studies proposed transformer models that are hybrid, with only 14.93% (n=10) of them being standalone transformer models. ViT was the most used transformer architecture, with 55.22% (n=37) of studies using it as its main component. Another significant transformer model is the Swin transformer, with 20.89% (n=14), followed by TransUnet, with 5.97% (n=4). Of the 57 hybrid transformer models, 55 (96.49%) studies opted for a combination of CNN with their transformer, and of those 55 CNN-transformer models, 56.36% (n=31) were U-Net based, and 9.09% (n=5) were ResNet based. Both generative adversarial network (n=2, 3.51%) and autoencoders (n=2, 3.51%) were also combined with transformers.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Various transformer-based architectures used for neuroimage segmentation. (A) U-Net+transformer, (B) TransBTS, (C) TransUNet, (D) SwinUnet, (E) UNETR, and (F) transformer.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e57723_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Proposed methods based on the weight of the models, type of data used, and type of transformer technique used.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="130"/>
            <col width="130"/>
            <col width="710"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Features</td>
                <td>Studies, n (%)</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Number</bold>
                  <bold>of</bold>
                  <bold>parameters</bold>
                  <bold>(in millions)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>1-19</td>
                <td>8 (11.94)</td>
                <td>[<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref82">82</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>20-39</td>
                <td>10 (14.93)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref88">88</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>40-59</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref86">86</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>60-100</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>100-120</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref97">97</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>120+</td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not mentioned</td>
                <td>39 (58.21)</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref70">70</xref>-<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref98">98</xref>-<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Dimensionality</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2D</td>
                <td>25 (37.31)</td>
                <td>[<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref94">94</xref>,<xref ref-type="bibr" rid="ref99">99</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>3D</td>
                <td>42 (62.69)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref59">59</xref>-<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref71">71</xref>-<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>-<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>-<xref ref-type="bibr" rid="ref98">98</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Transformer model</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Standalone</td>
                <td>10 (14.93)</td>
                <td>[<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hybrid</td>
                <td>57 (85.07)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref56">56</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>-<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref83">83</xref>-<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Type</bold>
                  <bold>of</bold>
                  <bold>transformer</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ViT<sup>a</sup></td>
                <td>37 (55.22)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Swin</td>
                <td>14 (20.89)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref67">67</xref>-<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>SwinUnet</td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>TransUnet</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>TransBTS</td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref92">92</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>8 (11.94)</td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Type</bold>
                  <bold>of</bold>
                  <bold>hybrid</bold>
                  <bold>component</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>CNN<sup>b</sup></td>
                <td>55 (96.49)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref56">56</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref72">72</xref>-<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref83">83</xref>-<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>U-Net</td>
                <td>31 (56.36)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref83">83</xref>-<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>-<xref ref-type="bibr" rid="ref94">94</xref>,<xref ref-type="bibr" rid="ref97">97</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ResNet</td>
                <td>5 (9.09)</td>
                <td>[<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GAN<sup>c</sup></td>
                <td>2 (3.51)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Autoencoder</td>
                <td>2 (3.51)</td>
                <td>[<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>ViT: vision transformer.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>GAN: generative adversarial network.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p><xref ref-type="table" rid="table4">Table 4</xref> depicts the loss function used, the optimizer used, and the different evaluation methods used across each included study. The loss function was not mentioned in 11.94% (n=8) of the included studies. Of the studies that mentioned it, the most popular loss function is a combination of cross-entropy and Dice loss with 40.30% (n=27) of included studies, followed by Dice loss with 19.40% (n=13). Adam is the most used optimizer, with 47.76% (n=32) of included studies using it, followed by AdamW at 14.92% (n=10). However, the optimizer was not mentioned in 22.39% (n=15) of studies. In terms of evaluation, over half of the included studies used at least two evaluation metrics (n=34, 50.75%), followed by 1 metric (n=11, 16.42%). Of these evaluation metrics, the Dice score is by far the most used, with 94.03% (n=63) of all studies using it, followed by HD95, 52.24% (n=35), and sensitivity, 28.36% (n=19).</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Experimental setups and evaluation for the proposed transformer-based techniques.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="200"/>
            <col width="180"/>
            <col width="590"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Features</td>
                <td>Studies, n (%)</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Loss</bold>
                  <bold>function1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dice loss</td>
                <td>13 (19.4)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref87">87</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cross-entropy</td>
                <td>9 (13.43)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref96">96</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dice cross-entropy</td>
                <td>27 (40.3)</td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref93">93</xref>-<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>10 (14.93)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not mentioned</td>
                <td>8 (11.94)</td>
                <td>[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Optimizer</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Adam</td>
                <td>32 (47.76)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref60">60</xref>-<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref65">65</xref>-<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref86">86</xref>-<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref91">91</xref>-<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AdamW</td>
                <td>10 (14.92)</td>
                <td>[<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>SGD<sup>a</sup></td>
                <td>7 (10.45)</td>
                <td>[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref94">94</xref>,<xref ref-type="bibr" rid="ref97">97</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ranger</td>
                <td>2 (2.99)</td>
                <td>[<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>RMSprop<sup>b</sup></td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref63">63</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Apollo</td>
                <td>1 (1.49)</td>
                <td>[<xref ref-type="bibr" rid="ref64">64</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not mentioned</td>
                <td>15 (22.39)</td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Evaluation</bold>
                  <bold>metrics</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dice score</td>
                <td>63 (94.03)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref68">68</xref>-<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>HD95<sup>c</sup></td>
                <td>35 (52.24)</td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref56">56</xref>-<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref97">97</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Recall or sensitivity</td>
                <td>19 (28.36)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>IoU<sup>d</sup></td>
                <td>12 (17.91)</td>
                <td>[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref52">52</xref>-<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref91">91</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Precision</td>
                <td>10 (11.94)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref90">90</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td>6 (8.96)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref86">86</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Specificity</td>
                <td>5 (7.46)</td>
                <td>[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AUC<sup>e</sup></td>
                <td>5 (7.46)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>F-measure</td>
                <td>3 (4.48)</td>
                <td>[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Jaccard index</td>
                <td>4 (5.97)</td>
                <td>[<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref90">90</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>5 (7.46)</td>
                <td>[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref91">91</xref>]</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>SGD: stochastic gradient descent.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>RMSprop: root mean square propagation.</p>
            </fn>
            <fn id="table4fn3">
              <p><sup>c</sup>HD95: Hausdorff distance at the 95th percentile.</p>
            </fn>
            <fn id="table4fn4">
              <p><sup>d</sup>IoU: Intersection Over Union.</p>
            </fn>
            <fn id="table4fn5">
              <p><sup>e</sup>AUC: area under the curve.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Strengths and Limitations of Transformer-Based Techniques</title>
        <p>Transformers have revolutionized the area of neuroimage segmentation by offering unparalleled capabilities in modeling complex features in medical imaging. It has the ability to model both local and global information, which substantially improves the accuracy of segmentation and therefore becomes very useful in various neurological applications. As shown in <xref ref-type="table" rid="table5">Table 5</xref>, the common strengths of transformer-based techniques include a high mean Dice score, effective fusion of multimodal MRI, and robust performance across diverse and complex datasets. However, these models also have substantial limitations in terms of high computational and memory costs, sensitivity to small areas of tumors, and possible overfitting on smaller datasets.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Strengths and limitations of common transformer-based techniques.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="70"/>
            <col width="480"/>
            <col width="420"/>
            <thead>
              <tr valign="top">
                <td colspan="2">References</td>
                <td>Strengths</td>
                <td>Limitations</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>ViT<sup>a</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>, <xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>, <xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref55">55</xref>, <xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref65">65</xref>, <xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref71">71</xref>,<xref ref-type="bibr" rid="ref73">73</xref>, <xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref83">83</xref>, <xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref88">88</xref>, <xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref93">93</xref>, <xref ref-type="bibr" rid="ref95">95</xref>-<xref ref-type="bibr" rid="ref99">99</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Effectively models local and global features in 3D MRIb data.</p>
                    </list-item>
                    <list-item>
                      <p>High mean Dice score.</p>
                    </list-item>
                    <list-item>
                      <p>Demonstrates consistent improvements in segmentation performance.</p>
                    </list-item>
                    <list-item>
                      <p>Effective in emphasizing informative brain regions.</p>
                    </list-item>
                    <list-item>
                      <p>Uses symmetry of brain structures for improved feature learning.</p>
                    </list-item>
                    <list-item>
                      <p>Outperforms state-of-the-art SSLc methods and medical image segmentation models on benchmarks.</p>
                    </list-item>
                    <list-item>
                      <p>Incorporates gradient-based scoring for attentive reconstruction.</p>
                    </list-item>
                    <list-item>
                      <p>Effective multimodal MRI fusion.</p>
                    </list-item>
                    <list-item>
                      <p>Enhanced long-term dependencies within individual modalities.</p>
                    </list-item>
                    <list-item>
                      <p>Complementary contextual information among modalities.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Computationally intensive.</p>
                    </list-item>
                    <list-item>
                      <p>High computational and memory cost.</p>
                    </list-item>
                    <list-item>
                      <p>Memory constraints when lowering patch resolution.</p>
                    </list-item>
                    <list-item>
                      <p>Complexity in integrating CNNd and transformer features.</p>
                    </list-item>
                    <list-item>
                      <p>Sensitivity to small tumor areas in LGGe.</p>
                    </list-item>
                    <list-item>
                      <p>Overlap in feature dimensions between CNN and transformer branches.</p>
                    </list-item>
                    <list-item>
                      <p>Misclassification of voxels in LGG.</p>
                    </list-item>
                    <list-item>
                      <p>Imbalance in dataset affecting performance.</p>
                    </list-item>
                    <list-item>
                      <p>Need for extensive validation on more diverse datasets.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Swin</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>, <xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref62">62</xref>, <xref ref-type="bibr" rid="ref67">67</xref>-<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref72">72</xref>, <xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref77">77</xref>, <xref ref-type="bibr" rid="ref89">89</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Capable of learning multiscale contextual information, enhancing performance across various tasks.</p>
                    </list-item>
                    <list-item>
                      <p>Combines advantages of ViT and CNNs, balancing both local and global feature learning.</p>
                    </list-item>
                    <list-item>
                      <p>Maintains high-resolution features, crucial for precise segmentation tasks.</p>
                    </list-item>
                    <list-item>
                      <p>Performs efficient tri-level preprocessing, including noise removal improving input quality for better results.</p>
                    </list-item>
                    <list-item>
                      <p>Incorporates advantages of 3D Swin transformer, improving performance in 3D medical image analysis.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Slight decrease in performance for specific areas like tumor core segmentation in some instances.</p>
                    </list-item>
                    <list-item>
                      <p>ViTs have many parameters and structures, making them complex and resource-intensive.</p>
                    </list-item>
                    <list-item>
                      <p>Potential risk of overfitting on smaller datasets due to high model complexity.</p>
                    </list-item>
                    <list-item>
                      <p>Existing neural network algorithms may often extract redundant features, reducing overall efficiency.</p>
                    </list-item>
                    <list-item>
                      <p>Limited exploration in preprocessing and postprocessing techniques, which might enhance model performance further.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>SwinUnet</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Improves the efficiency of using limited labeled data.</p>
                    </list-item>
                    <list-item>
                      <p>Competitive performance in Dice score, Hausdorff distance, and other segmentation metrics.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Requires verification of the improvement with diverse and larger datasets.</p>
                    </list-item>
                    <list-item>
                      <p>Lower recall and F1-scores compared to other CNN-based methods.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>TransUnet</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref84">84</xref>, <xref ref-type="bibr" rid="ref94">94</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>High effectiveness in model design.</p>
                    </list-item>
                    <list-item>
                      <p>Combines the strengths of U-Net and transformer models.</p>
                    </list-item>
                    <list-item>
                      <p>Achieves higher Dice scores compared to U-Net and transformer.</p>
                    </list-item>
                    <list-item>
                      <p>Effectively learns global context features in images.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Some models within the study showed poor performance.</p>
                    </list-item>
                    <list-item>
                      <p>Cross-application limitations in certain scenarios.</p>
                    </list-item>
                    <list-item>
                      <p>High data requirements for effective training.</p>
                    </list-item>
                    <list-item>
                      <p>Limited dataset size affecting generalization.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>TransBTS</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref92">92</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Residual basis blocks reduce feature loss and enhance feature extraction.</p>
                    </list-item>
                    <list-item>
                      <p>Combines CNN and transformer for improved segmentation performance, leveraging both local and global information.</p>
                    </list-item>
                    <list-item>
                      <p>Attention mechanisms enhance the model’s ability to focus on relevant features, improving accuracy.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Increased network depth leads to higher parameter counts, increasing computational requirements.</p>
                    </list-item>
                    <list-item>
                      <p>May struggle with unseen patterns in the testing phase, affecting performance robustness.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Other transformer types</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref48">48</xref>, <xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref100">100</xref>]</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Effectively capture both local and global context in medical images.</p>
                    </list-item>
                    <list-item>
                      <p>Enhance segmentation accuracy.</p>
                    </list-item>
                    <list-item>
                      <p>Robust across diverse and complex datasets.</p>
                    </list-item>
                    <list-item>
                      <p>Improve semantic information representation.</p>
                    </list-item>
                    <list-item>
                      <p>Combine multiresolution information effectively.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>High computational complexity.</p>
                    </list-item>
                    <list-item>
                      <p>Significant memory use.</p>
                    </list-item>
                    <list-item>
                      <p>Requires substantial hardware resources.</p>
                    </list-item>
                    <list-item>
                      <p>Extensive data preprocessing is needed.</p>
                    </list-item>
                    <list-item>
                      <p>Limited scalability to very large datasets.</p>
                    </list-item>
                    <list-item>
                      <p>Sensitive to variations in input data quality.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>ViT: vision transformer.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>MRI: magnetic resonance imaging.</p>
            </fn>
            <fn id="table5fn3">
              <p><sup>c</sup>SSL: self-supervised learning.</p>
            </fn>
            <fn id="table5fn4">
              <p><sup>d</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table5fn5">
              <p><sup>e</sup>LGG: low-grade glioma.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The main purpose of this scoping review is to conduct a thorough investigation into the use of different transformer models in the field of neuroimaging, specifically segmentation. From the gathered data, it is clear that the use of transformers in neuroimaging experienced a great boost in research from 2021 to 2022, with over half of the included studies being published in 2022 compared to only 10 studies in 2021. It is also important to note that for the year 2023, only the studies up to May 22 were included; yet, this constitutes a total percentage of the included studies of 32.84% (n=22) and could very well be even higher when the whole year is considered.</p>
        <p>From the studies included in this review, it is clear that MRI is by far the most popular image modality for applying transformer models to neuroimaging segmentation. This can be attributed to how common the use of MRI is in the diagnosis of neurological illnesses, especially for brain tumors [<xref ref-type="bibr" rid="ref27">27</xref>], wherein it is able to provide functional, structural, and metabolic information [<xref ref-type="bibr" rid="ref27">27</xref>] through the use of its different modalities (T1, T2, T1ce, and fluid-attenuated inversion recovery). MRI is particularly suitable for neuroimaging segmentation purposes because of the high spatial resolution and soft tissue contrast, both being critical for any form of precise segmentation it exhibits, since it is able to show good detailed visualization of structures in the brain and distinction between different tissues of the brain sizes [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>Another reason for the popularity of MRI in the included studies is the availability of brain MRI scans sourced from the widely used BraTS datasets [<xref ref-type="bibr" rid="ref101">101</xref>]. This yearly and open-source dataset contains a wide variety of different MRI modalities that are manually annotated, making it a very important resource for developing and benchmarking segmentation methods based on different transformer models. This is why it is no surprise that it is by far the most used dataset in the included studies.</p>
        <p>When it comes to neurological conditions, a majority of included studies in this review focused on the use of transformers in brain tumor segmentation. This can be attributed to multiple factors, including the availability of MRI scans from the BraTS dataset that are specifically for brain tumor segmentation. Brain tumors are also highly prevalent among all ages and have a high fatality rate [<xref ref-type="bibr" rid="ref102">102</xref>], making it a prime area for research into new methods of diagnosis and treatment. In addition, brain tumors are fairly complex and irregular in both location and shape [<xref ref-type="bibr" rid="ref27">27</xref>], which makes manual segmentation a very tedious and time-consuming process that would benefit greatly from increased research into more automated methods for segmentation. The BraTS dataset is also a factor, as it provides a large variety of MRI scans that are specifically for brain tumor segmentation. Transformers are particularly useful for brain tumor segmentation due to their self-attention mechanism, which allows them to account for different variations in tumor characteristics, such as size and shape, during the segmentation process [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        <p>Most included studies proposed and developed models with 3D segmentation networks, specifically for 3D imaging data. In terms of neuroimaging, 3D scans are more common in part due to the 3D nature of MRI scans. Since MRI is the most common imaging modality used in neuroimaging, it makes sense that it is preferable to develop models for 3D imaging data in order to avoid the loss of information. Even though 3D models are typically more accurate for 3D imaging segmentation, they are computationally expensive [<xref ref-type="bibr" rid="ref26">26</xref>], which is why some proposed models in the included studies chose to instead extract 2D slices from 3D imaging data. While this technique is suitable, reducing a 3D scan into 2D slices can lead to a degradation of volume and spatial characteristics native to 3D data [<xref ref-type="bibr" rid="ref103">103</xref>].</p>
        <p>CNN-transformer hybrid models were used far more than standalone transformer models in the included studies, specifically in the form of a U-Net and transformer combination. These combinations capitalize on the strengths of both CNN and transformers while minimizing their weaknesses. CNN is particularly useful in extracting local features and spatial information from the provided scans; however, it often struggles to capture long-range dependencies due to its small kernel size [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. On the other hand, transformers are able to model these long-range dependencies due to their self-attention module, making them very useful for neuroimaging segmentation, especially in the case of brain tumors [<xref ref-type="bibr" rid="ref27">27</xref>]. This is why most included studies opted to use the use of CNN to capture local features and transformers to capture global features to increase the performance of their models in the task of segmentation [<xref ref-type="bibr" rid="ref102">102</xref>].</p>
      </sec>
      <sec>
        <title>Research and Practical Implications</title>
        <p>This scoping review provides an overview of the available research regarding the use of transformers in the context of neuroimaging segmentation. These findings underline important implications for future research and applications in this area.</p>
        <p>It is a notable finding of this review that many studies apply transformers, specifically to brain tumor segmentation, which might hint at the potential of transformers in assisting diagnosis and treatment planning in this field. As shown here, transformers are well-suited for this task. However, further research is needed to assess the real-world clinical usefulness of transformers for brain tumor segmentation. While brain tumors are an important challenge, the focus on this single application at this level would seem indicative of the current lack of large and good-quality datasets in many other big neurological diseases and conditions, such as Alzheimer and Parkinson disease, and strokes. Making publicly available manually annotated datasets of different neurological conditions would motivate new research and developments on the application of transformers in this field. On top of this, the heavy reliance of studies on the BraTS dataset shows that there is a need to diversify datasets in order to validate different models correctly. Most of the included studies favored hybrid use by combining CNN and transformers, which illustrates the complementary strengths of these architectures for neuroimaging segmentation. Success in hybrid techniques shows that further exploration of novel integrations between transformers, CNNs, and other modules could become a promising direction to achieve better performances on more complex medical image analysis problems. Improved accuracy in neuroimaging segmentation, through the ability of transformer models to extract local and global features, allows for more accurate identification of neurological conditions such as brain tumors. This will provide earlier diagnosis and treatment. Moreover, automation with these models will save much time of the clinicians in performing manual segmentation so that they can concentrate on the care of patients and other important tasks. Treatment planning may also be improved with transformer models, where the potential for more accurate and consistent segmentation results helps a lot in this respect. Moreover, these models can also potentially be integrated into clinical workflows without much hassle by developing user-friendly interfaces and collaboration between AI researchers and clinicians to ensure these tools are adopted and effectively used in practice.</p>
      </sec>
      <sec>
        <title>Strengths and Limitations</title>
        <p>This scoping review has numerous key strengths with regard to the analysis of transformer applications in neuroimaging segmentation. First, it gives a broad overview of the fast-evolving field by capturing recent works from 2019 through 2023. Second, it allows focusing on current research so that the review reflects the state-of-the-art in transformer applications for medical imaging. It is a systematic approach, covering 4 major databases; hence, wide and comprehensive coverage of the literature reviewed. The inclusion of journal papers and conference papers facilitates a wide view of both consolidated and emergent research. Third, this review gives elaborate insights into various aspects of transformer use in neuroimaging: imaging modality, dataset, neurological condition, and metric for performance evaluation. This level of analysis provides rich information relevant to both researchers and practitioners within the field. Finally, the review’s focus on brain tumor segmentation, while a limitation in some respects, also serves as a strength by providing an in-depth look at transformer applications in a critical area of medical imaging with significant clinical implications.</p>
        <p>While this scoping review offers a number of strengths, its limitations need to be acknowledged so as to strike a balance. First, the review was on transformers in neuroimaging segmentation alone, excluding other medical imaging tasks or organs. This narrow focus allows for an in-depth analysis of transformer applications in brain imaging but may not be representative of the full spectrum of use that transformers have seen in medical imaging. This limitation could be reduced by expanding the scope of future reviews to multiple organ systems or imaging tasks, giving a wider look at transformer applications in medical imaging.</p>
        <p>Second, the review was focused on studies published in the English language, published from 2019 up to 2023. This narrowing was necessary, as most current works are favored in this novelty area of transformer use in medical imaging. In so doing, this review criterion may have left out important non-English language publications or early applications of transformers. This is likely a limitation in the representation of research trends worldwide. In this respect, future studies can be designed to include more languages, also extending the date range to capture more diverse sets of publications and track the evolution of transformer use in medical imaging over an extended period.</p>
        <p>Third, the fact that 58.21% (n=39) of the works included in this review were based on the BraTS dataset introduces a certain bias in the domain toward the segmentation of brain tumors. Though it is a very critical area, it might not be useful to represent transformers completely for other neurological conditions. Future research needs to give more emphasis to developing and publicly releasing manually annotated datasets about more neurological conditions to address this limitation. This will further encourage diverse applications of transformers in neuroimaging and provide a wider understanding of the capability of transformers across different pathologies.</p>
        <p>The review demonstrated a high dominance by studies from China, with 46 (68.66%) studies of the total (see <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> for detailed analysis). This aligns with broader publication patterns in AI research where Chinese institutions contribute approximately 40% of global publications. While this distribution reflects documented trends in international research output, future reviews might benefit from more diversified search strategies to ensure comprehensive coverage of global research activities in this field.</p>
        <p>Finally, no formal quality or risk-of-bias assessment of the included studies was performed. Although this represents a common approach when it comes to scoping reviews, this limits the degree to which strong conclusions can be drawn about the relative effectiveness of various approaches to transformers. Future systematic reviews or meta-analyses may involve quality assessments to support more robust evidence in terms of the efficacy of transformer models in neuroimaging segmentation.</p>
      </sec>
      <sec>
        <title>Future Directions</title>
        <p>These findings point to a variety of promising directions for future research on the application of transformers to neuroimaging segmentation. First, future studies should develop novel integrations between transformers, CNNs, and other advanced modules that will further improve performance for complex medical image analysis tasks. This might be achieved by investigating various hybrid models leveraging the strengths of transformers and more traditional DL methods. Second, the extension of transformer applications to more neurological conditions other than brain tumors, which would allow a wider grasp of the potential capability of transformers across different pathologies. More clinical applications are likely to follow from here. Third, the development of new transformer-based methods or their combination with emerging techniques like diffusion models could further improve efficiency and robustness for both 2D and 3D brain segmentation. Fourth, future studies shall be done to bridge the current limitations in dataset diversity. This may be in creating and publishing manually annotated datasets for a wider range of neurological conditions that can enable transformers to apply to neuroimaging in more diversified ways. Finally, the translation of research findings into clinical practice remains a high unmet need. This transition will require extensive validation of transformer models on diverse, real-world datasets and close collaboration between AI researchers and clinicians. Such collaboration could result in the development of more clinically relevant models and user-friendly interfaces, which would expedite the translation of these advanced technologies into routine clinical practice.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This scoping review has thoroughly investigated the applications of transformers in neuroimaging segmentation and discovered a highly evolving field with great potential. The results of this paper have shown that transformer models, especially combined with CNNs in hybrid architectures, are also very promising for the task of brain MRI segmentation. Some of the big advantages of transformers include the modeling of long-range dependencies in images through self-attention mechanisms while still being able to perform local feature extraction. Such a combination uniquely allows for more accurate and detailed segmentation in highly complex neurological pathologies, like brain tumors.</p>
        <p>There is clearly a trend toward 3D transformer models and hybrid CNN-transformer architectures, dominated by ViT as the variant of transformer used most frequently. These approaches also obtain superior performance on benchmark datasets, such as brain tumor segmentation tasks. However, reliance on the BraTS dataset highlights a requirement for more diverse data sources to ensure that performance could be validated across more multiple neurological conditions.</p>
        <p>While this is promising, there are still important issues in the field: high computational costs associated with transformer models, overfitting on smaller datasets, and validation in larger clinical settings. Another issue is the geographical concentration of research output that highlights the need for greater diversity in the origins of studies worldwide to improve the generalizability of findings.</p>
        <p>The future prospect of transformer models will unlock the potential that neuroimaging segmentation demands. Refining both architectures and training methods and integration into clinical workflows, transformations may provide state-of-the-art for fast, accurate, and reproducible brain-MRI segmentation, hence advancing clinical diagnosis and evaluation techniques for a better outcome in regard to patients with neurological disorders.</p>
        <p>Although transformers have shown great improvement in neuroimaging segmentation, much potential is yet to be realized. Future work will need to be focused on present limitations, the extension of applications across a wider range of neurological conditions, and narrowing the gap between research and clinical practice to ensure that transformers are a valuable and impactful technology in medical imaging analysis.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews) checklist.</p>
        <media xlink:href="jmir_v27i1e57723_app1.docx" xlink:title="DOCX File , 84 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Significant prevalence of studies published in China.</p>
        <media xlink:href="jmir_v27i1e57723_app2.docx" xlink:title="DOCX File , 104 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BraTS dataset</term>
          <def>
            <p>brain tumor segmentation dataset</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CT</term>
          <def>
            <p>computed tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MRI</term>
          <def>
            <p>magnetic resonance imaging</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">PET</term>
          <def>
            <p>positron emission tomography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PRISMA-ScR</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses Extension for Scoping Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">ViT</term>
          <def>
            <p>vision transformer</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was supported in part by the United Arab Emirates University (UAEU grant 12T037) and in part by the Big Data Analytics Center (UAEU grant 12R239).</p>
    </ack>
    <notes>
      <title>Data Availability</title>
      <p>All data generated or analyzed during this study are included in this published paper and <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abd-Ellah</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Awad</surname>
              <given-names>AI</given-names>
            </name>
            <name name-style="western">
              <surname>Khalaf</surname>
              <given-names>AAM</given-names>
            </name>
            <name name-style="western">
              <surname>Hamed</surname>
              <given-names>HFA</given-names>
            </name>
          </person-group>
          <article-title>A review on brain tumor diagnosis from MRI images: practical implications, key achievements, and lessons learned</article-title>
          <source>Magn Reson Imaging</source>
          <year>2019</year>
          <volume>61</volume>
          <fpage>300</fpage>
          <lpage>318</lpage>
          <pub-id pub-id-type="doi">10.1016/j.mri.2019.05.028</pub-id>
          <pub-id pub-id-type="medline">31173851</pub-id>
          <pub-id pub-id-type="pii">S0730-725X(18)30430-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akkus</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Galimzianova</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hoogi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rubin</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Erickson</surname>
              <given-names>BJ</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for brain MRI segmentation: state of the art and future directions</article-title>
          <source>J Digit Imaging</source>
          <year>2017</year>
          <volume>30</volume>
          <issue>4</issue>
          <fpage>449</fpage>
          <lpage>459</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28577131"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10278-017-9983-4</pub-id>
          <pub-id pub-id-type="medline">28577131</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10278-017-9983-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC5537095</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shoeibi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khodatars</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jafari</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Moridian</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Alizadehsani</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Khosravi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alinejad-Rokny</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Fuller-Tyszkiewicz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Acharya</surname>
              <given-names>UR</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gorriz</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Diagnosis of brain diseases in fusion of neuroimaging modalities using deep learning: a review</article-title>
          <source>Inf Fusion</source>
          <year>2023</year>
          <volume>93</volume>
          <fpage>85</fpage>
          <lpage>117</lpage>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2022.12.010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hasan</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Meziane</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Aspin</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jalab</surname>
              <given-names>HA</given-names>
            </name>
          </person-group>
          <article-title>Segmentation of brain tumors in MRI images using three-dimensional active contour without edge</article-title>
          <source>Symmetry</source>
          <year>2016</year>
          <volume>8</volume>
          <issue>11</issue>
          <fpage>132</fpage>
          <pub-id pub-id-type="doi">10.3390/sym8110132</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Domingues</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Pereira</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Martins</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Duarte</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Santos</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Abreu</surname>
              <given-names>PH</given-names>
            </name>
          </person-group>
          <article-title>Using deep learning techniques in medical imaging: a systematic review of applications on CT and PET</article-title>
          <source>Artif Intell Rev</source>
          <year>2019</year>
          <volume>53</volume>
          <issue>6</issue>
          <fpage>4093</fpage>
          <lpage>4160</lpage>
          <pub-id pub-id-type="doi">10.1007/s10462-019-09788-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Noor</surname>
              <given-names>MBT</given-names>
            </name>
            <name name-style="western">
              <surname>Zenia</surname>
              <given-names>NZ</given-names>
            </name>
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Mamun</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmud</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Application of deep learning in detecting neurological disorders from magnetic resonance images: a survey on the detection of Alzheimer's disease, Parkinson's disease and schizophrenia</article-title>
          <source>Brain Inform</source>
          <year>2020</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>11</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1186/s40708-020-00112-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s40708-020-00112-2</pub-id>
          <pub-id pub-id-type="medline">33034769</pub-id>
          <pub-id pub-id-type="pii">10.1186/s40708-020-00112-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC7547060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghaffari</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sowmya</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Oliver</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Automated brain tumor segmentation using multimodal brain scans: a survey based on models submitted to the BraTS 2012-2018 challenges</article-title>
          <source>IEEE Rev Biomed Eng</source>
          <year>2020</year>
          <volume>13</volume>
          <fpage>156</fpage>
          <lpage>168</lpage>
          <pub-id pub-id-type="doi">10.1109/RBME.2019.2946868</pub-id>
          <pub-id pub-id-type="medline">31613783</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fawzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Achuthan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Belaton</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Brain image segmentation in recent years: a narrative review</article-title>
          <source>Brain Sci</source>
          <year>2021</year>
          <volume>11</volume>
          <issue>8</issue>
          <fpage>1055</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=brainsci11081055"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/brainsci11081055</pub-id>
          <pub-id pub-id-type="medline">34439674</pub-id>
          <pub-id pub-id-type="pii">brainsci11081055</pub-id>
          <pub-id pub-id-type="pmcid">PMC8392552</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wadhwa</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bhardwaj</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Singh Verma</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>A review on brain tumor segmentation of MRI images</article-title>
          <source>Magn Reson Imaging</source>
          <year>2019</year>
          <volume>61</volume>
          <fpage>247</fpage>
          <lpage>259</lpage>
          <pub-id pub-id-type="doi">10.1016/j.mri.2019.05.043</pub-id>
          <pub-id pub-id-type="medline">31200024</pub-id>
          <pub-id pub-id-type="pii">S0730-725X(19)30034-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gau</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>CSM</given-names>
            </name>
            <name name-style="western">
              <surname>Urbach</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zentner</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schulze-Bonhage</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kaller</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Foit</surname>
              <given-names>NA</given-names>
            </name>
          </person-group>
          <article-title>Accuracy and practical aspects of semi- and fully automatic segmentation methods for resected brain areas</article-title>
          <source>Neuroradiology</source>
          <year>2020</year>
          <volume>62</volume>
          <issue>12</issue>
          <fpage>1637</fpage>
          <lpage>1648</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32691076"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00234-020-02481-1</pub-id>
          <pub-id pub-id-type="medline">32691076</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00234-020-02481-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC7666677</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lei</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nandi</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>Medical image segmentation using deep learning: a survey</article-title>
          <source>IET Image Process</source>
          <year>2022</year>
          <volume>16</volume>
          <issue>5</issue>
          <fpage>1243</fpage>
          <lpage>1267</lpage>
          <pub-id pub-id-type="doi">10.1049/ipr2.12419</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seo</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Badiei Khuzani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vasudevan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Machine learning techniques for biomedical image segmentation: an overview of technical aspects and introduction to state-of-art applications</article-title>
          <source>Med Phys</source>
          <year>2020</year>
          <volume>47</volume>
          <issue>5</issue>
          <fpage>e148</fpage>
          <lpage>e167</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32418337"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mp.13649</pub-id>
          <pub-id pub-id-type="medline">32418337</pub-id>
          <pub-id pub-id-type="pmcid">PMC7338207</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chuah</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>CO</given-names>
            </name>
            <name name-style="western">
              <surname>Gochoo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dhanalakshmi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Conventional machine learning and deep learning in Alzheimer's disease diagnosis using neuroimaging: a review</article-title>
          <source>Front Comput Neurosci</source>
          <year>2023</year>
          <volume>17</volume>
          <fpage>1038636</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36814932"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fncom.2023.1038636</pub-id>
          <pub-id pub-id-type="medline">36814932</pub-id>
          <pub-id pub-id-type="pmcid">PMC9939698</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Balwant</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <article-title>A review on convolutional neural networks for brain tumor segmentation: methods, datasets, libraries, and future directions</article-title>
          <source>IRBM</source>
          <year>2022</year>
          <volume>43</volume>
          <issue>6</issue>
          <fpage>521</fpage>
          <lpage>537</lpage>
          <pub-id pub-id-type="doi">10.1016/j.irbm.2022.05.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ronneberger</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brox</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>U-net: convolutional networks for biomedical image segmentation</article-title>
          <year>2015</year>
          <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention, Proceedings, Part III</conf-name>
          <conf-date>October 5-9, 2015</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <fpage>18</fpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-24574-4_28</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mall</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Srivastav</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Narayan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Paprzycki</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jaworska</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ganzha</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A comprehensive review of deep neural networks for medical image processing: recent developments and future opportunities</article-title>
          <source>Healthc Anal</source>
          <year>2023</year>
          <volume>4</volume>
          <fpage>100216</fpage>
          <pub-id pub-id-type="doi">10.1016/j.health.2023.100216</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gul</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Bibi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khandakar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ayari</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Chowdhury</surname>
              <given-names>MEH</given-names>
            </name>
          </person-group>
          <article-title>Deep learning techniques for liver and liver tumor segmentation: a review</article-title>
          <source>Comput Biol Med</source>
          <year>2022</year>
          <volume>147</volume>
          <fpage>105620</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105620</pub-id>
          <pub-id pub-id-type="medline">35667155</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(22)00412-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yousef</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Siddiqui</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Albahlal</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Alajlan</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Haq</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>U-net-based models towards optimal MR brain image segmentation</article-title>
          <source>Diagnostics (Basel)</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>9</issue>
          <fpage>1624</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=diagnostics13091624"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/diagnostics13091624</pub-id>
          <pub-id pub-id-type="medline">37175015</pub-id>
          <pub-id pub-id-type="pii">diagnostics13091624</pub-id>
          <pub-id pub-id-type="pmcid">PMC10178263</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Badrinarayanan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Kendall</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cipolla</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>SegNet: a deep convolutional encoder-decoder architecture for image segmentation</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2017</year>
          <volume>39</volume>
          <issue>12</issue>
          <fpage>2481</fpage>
          <lpage>2495</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2016.2644615</pub-id>
          <pub-id pub-id-type="medline">28060704</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <year>2016</year>
          <conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>June 27-30, 2016</conf-date>
          <conf-loc>Las Vegas, NV, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.90</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Van Der Maaten</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Weinberger</surname>
              <given-names>KQ</given-names>
            </name>
          </person-group>
          <article-title>Densely connected convolutional networks</article-title>
          <year>2017</year>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>July 21-26, 2017</conf-date>
          <conf-loc>Honolulu, HI, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2017.243</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bourdev</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Fergus</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Torresani</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Paluri</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Learning spatiotemporal features with 3D convolutional networks</article-title>
          <year>2015</year>
          <conf-name>2015 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>December 7-13, 2015</conf-date>
          <conf-loc>Santiago, Chile</conf-loc>
          <pub-id pub-id-type="doi">10.1109/iccv.2015.510</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Papandreou</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Kokkinos</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yuille</surname>
              <given-names>AL</given-names>
            </name>
          </person-group>
          <article-title>DeepLab: semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2018</year>
          <volume>40</volume>
          <issue>4</issue>
          <fpage>834</fpage>
          <lpage>848</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2017.2699184</pub-id>
          <pub-id pub-id-type="medline">28463186</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vaswani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shazeer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Parmar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Uszkoreit</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gomez</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Polosukhin</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Attention is all you need</article-title>
          <year>2017</year>
          <conf-name>NIPS'17: Proceedings of the 31st International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 4-9, 2017</conf-date>
          <conf-loc>Long Beach, CA, United States</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Rekik</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Transformers in medical image analysis</article-title>
          <source>Intell Med</source>
          <year>2023</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>59</fpage>
          <lpage>78</lpage>
          <pub-id pub-id-type="doi">10.1016/j.imed.2022.07.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Transformers in medical image segmentation: a review</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2023</year>
          <volume>84</volume>
          <fpage>104791</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2023.104791</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akinyelu</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Zaccagna</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grist</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Castelli</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rundo</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Brain tumor diagnosis using machine learning, convolutional neural networks, capsule neural networks and vision transformers, applied to MRI: a survey</article-title>
          <source>J Imaging</source>
          <year>2022</year>
          <volume>8</volume>
          <issue>8</issue>
          <fpage>205</fpage>
          <pub-id pub-id-type="doi">10.3390/jimaging8080205</pub-id>
          <pub-id pub-id-type="medline">35893083</pub-id>
          <pub-id pub-id-type="pii">jimaging8080205</pub-id>
          <pub-id pub-id-type="pmcid">PMC9331677</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dosovitskiy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Beyer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kolesnikov</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Weissenborn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhai</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Unterthiner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Dehghani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Minderer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Heigold</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gelly</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Uszkoreit</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Houlsby</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>An image is worth 16x16 words: transformers for image recognition at scale</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on June 3, 2021</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2010.11929"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shamshad</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zamir</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Hayat</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>FS</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Transformers in medical imaging: a survey</article-title>
          <source>Med Image Anal</source>
          <year>2023</year>
          <volume>88</volume>
          <fpage>102802</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2023.102802</pub-id>
          <pub-id pub-id-type="medline">37315483</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(23)00063-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thisanke</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Deshan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chamith</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Seneviratne</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vidanaarachchi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Herath</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Semantic segmentation using vision transformers: a survey</article-title>
          <source>Eng Appl Artif Intell</source>
          <year>2023</year>
          <volume>126</volume>
          <fpage>106669</fpage>
          <pub-id pub-id-type="doi">10.1016/j.engappai.2023.106669</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Adeli</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Yuille</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>TransUNet: transformers make strong encoders for medical image segmentation</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on February 8, 2021</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2102.04306"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Swin-Unet: Unet-like pure transformer for medical image segmentation</article-title>
          <year>2022</year>
          <conf-name>Computer Vision—ECCV 2022 Workshops</conf-name>
          <conf-date>October 23-27, 2022</conf-date>
          <conf-loc>Tel Aviv, Israel</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-25066-8_9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zha</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>TransBTS: multimodal brain tumor segmentation using transformer</article-title>
          <year>2021</year>
          <conf-name>Medical Image Computing and Computer Assisted Intervention—MICCAI 2021</conf-name>
          <conf-date>September 27-October 1, 2021</conf-date>
          <conf-loc>Strasbourg, France</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-030-87193-2_11</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hatamizadeh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nath</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Roth</surname>
              <given-names>HR</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Swin UNETR: swin transformers for semantic segmentation of brain tumors in MRI images</article-title>
          <year>2021</year>
          <conf-name>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</conf-name>
          <conf-date>September 27, 2021</conf-date>
          <conf-loc>Virtual Event</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-08999-2_22</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lillie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zarin</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Colquhoun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Levac</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Horsley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hempel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McGowan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hartling</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aldcroft</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Garritty</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lewin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Macdonald</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Langlois</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Soares-Weiser</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Moriarty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tunçalp</surname>
              <given-names>Ö</given-names>
            </name>
            <name name-style="western">
              <surname>Straus</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>PRISMA Extension for Scoping Reviews (PRISMA-ScR): checklist and explanation</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <volume>169</volume>
          <issue>7</issue>
          <fpage>467</fpage>
          <lpage>473</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/abs/10.7326/M18-0850?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-0850</pub-id>
          <pub-id pub-id-type="medline">30178033</pub-id>
          <pub-id pub-id-type="pii">2700389</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hahm</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Youn</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Ahn</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Comparative validation of AI and non-AI methods in MRI volumetry to diagnose Parkinsonian syndromes</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>3439</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-30381-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-30381-w</pub-id>
          <pub-id pub-id-type="medline">36859498</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-30381-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC10156821</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Long</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Swin Unet3D: a three-dimensional medical image segmentation network combining vision transformer and convolution</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>33</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-023-02129-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-023-02129-z</pub-id>
          <pub-id pub-id-type="medline">36788560</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-023-02129-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC9926542</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>ZC</given-names>
            </name>
          </person-group>
          <article-title>Multi-task learning for concurrent survival prediction and semi-supervised segmentation of gliomas in brain MRI</article-title>
          <source>Displays</source>
          <year>2023</year>
          <volume>78</volume>
          <fpage>102402</fpage>
          <pub-id pub-id-type="doi">10.1016/j.displa.2023.102402</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>JQ</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>3D medical image segmentation using parallel transformers</article-title>
          <source>Pattern Recognit</source>
          <year>2023</year>
          <volume>138</volume>
          <fpage>109432</fpage>
          <pub-id pub-id-type="doi">10.1016/j.patcog.2023.109432</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>She</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Xuan</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>TransMVU: multi‐view 2D U‐Nets with transformer for brain tumour segmentation</article-title>
          <source>IET Image Process</source>
          <year>2023</year>
          <volume>17</volume>
          <issue>6</issue>
          <fpage>1874</fpage>
          <lpage>1882</lpage>
          <pub-id pub-id-type="doi">10.1049/ipr2.12762</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>GMetaNet: multi-scale ghost convolutional neural network with auxiliary MetaFormer decoding path for brain tumor segmentation</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2023</year>
          <volume>83</volume>
          <fpage>104694</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2023.104694</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>High-resolution Swin transformer for automatic medical image segmentation</article-title>
          <source>Sensors (Basel)</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>7</issue>
          <fpage>3420</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s23073420"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s23073420</pub-id>
          <pub-id pub-id-type="medline">37050479</pub-id>
          <pub-id pub-id-type="pii">s23073420</pub-id>
          <pub-id pub-id-type="pmcid">PMC10099222</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gharaibeh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Ein</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Al-hazaimeh</surname>
              <given-names>OM</given-names>
            </name>
            <name name-style="western">
              <surname>Nahar</surname>
              <given-names>KMO</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Ain</surname>
              <given-names>WA</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Nawashi</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Swin transformer-based segmentation and multi-scale feature pyramid fusion module for Alzheimer’s disease with machine learning</article-title>
          <source>Int J Onl Eng</source>
          <year>2023</year>
          <volume>19</volume>
          <issue>04</issue>
          <fpage>22</fpage>
          <lpage>50</lpage>
          <pub-id pub-id-type="doi">10.3991/ijoe.v19i04.37677</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Qu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>SDPN: a slight dual-path network with local-global attention guided for medical image segmentation</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2023</year>
          <volume>27</volume>
          <issue>6</issue>
          <fpage>2956</fpage>
          <lpage>2967</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2023.3260026</pub-id>
          <pub-id pub-id-type="medline">37030687</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Anaya-Isaza</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mera-Jiménez</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Fernandez-Quilez</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>CrossTransUnet: a new computationally inexpensive tumor segmentation model for brain MRI</article-title>
          <source>IEEE Access</source>
          <year>2023</year>
          <volume>11</volume>
          <fpage>27066</fpage>
          <lpage>27085</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2023.3257767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Qiu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>CKD-TransBTS: clinical knowledge-driven hybrid transformer with modality-correlated cross-attention for brain tumor segmentation</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2023</year>
          <volume>42</volume>
          <issue>8</issue>
          <fpage>2451</fpage>
          <lpage>2461</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2023.3250474</pub-id>
          <pub-id pub-id-type="medline">37027751</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Long</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based multiclass brain tissue segmentation in fetal MRIs</article-title>
          <source>Sensors (Basel)</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>2</issue>
          <fpage>655</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s23020655"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s23020655</pub-id>
          <pub-id pub-id-type="medline">36679449</pub-id>
          <pub-id pub-id-type="pii">s23020655</pub-id>
          <pub-id pub-id-type="pmcid">PMC9862805</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sui</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>An efficient R-Transformer network with dual encoders for brain glioma segmentation in MR images</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2023</year>
          <volume>79</volume>
          <fpage>104034</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2022.104034</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dhamija</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>S</given-names>
            </name>
            <collab>Anjum</collab>
            <name name-style="western">
              <surname>Katarya</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Semantic segmentation in medical images through transfused convolution and transformer networks</article-title>
          <source>Appl Intell (Dordr)</source>
          <year>2023</year>
          <volume>53</volume>
          <issue>1</issue>
          <fpage>1132</fpage>
          <lpage>1148</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35498554"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10489-022-03642-w</pub-id>
          <pub-id pub-id-type="medline">35498554</pub-id>
          <pub-id pub-id-type="pii">3642</pub-id>
          <pub-id pub-id-type="pmcid">PMC9035506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Miao</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Deep mutual learning for brain tumor segmentation with the fusion network</article-title>
          <source>Neurocomputing</source>
          <year>2023</year>
          <volume>521</volume>
          <fpage>213</fpage>
          <lpage>220</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2022.11.038</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ashtari</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sima</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>De Lathauwer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sappey-Marinier</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Maes</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Van Huffel</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Factorizer: a scalable interpretable approach to context modeling for medical image segmentation</article-title>
          <source>Med Image Anal</source>
          <year>2023</year>
          <volume>84</volume>
          <fpage>102706</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2022.102706</pub-id>
          <pub-id pub-id-type="medline">36516557</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(22)00334-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marcus</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bentley</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rueckert</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Concurrent ischemic lesion age estimation and segmentation of CT brain using a transformer-based network</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2023</year>
          <volume>42</volume>
          <issue>12</issue>
          <fpage>3464</fpage>
          <lpage>3473</lpage>
          <pub-id pub-id-type="doi">10.1109/TMI.2023.3287361</pub-id>
          <pub-id pub-id-type="medline">37335797</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Piao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>YH</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yoo</surname>
              <given-names>SJ</given-names>
            </name>
          </person-group>
          <article-title>Intracerebral hemorrhage CT scan image segmentation with HarDNet based transformer</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>7208</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-33775-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-33775-y</pub-id>
          <pub-id pub-id-type="medline">37137921</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-33775-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC10156735</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Flexible fusion network for multi-modal brain tumor segmentation</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2023</year>
          <volume>27</volume>
          <issue>7</issue>
          <fpage>3349</fpage>
          <lpage>3359</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2023.3271808</pub-id>
          <pub-id pub-id-type="medline">37126623</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lv</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ge</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Qiang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ge</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Accurate corresponding fiber tract segmentation via FiberGeoMap learner with application to autism</article-title>
          <source>Cereb Cortex</source>
          <year>2023</year>
          <volume>33</volume>
          <issue>13</issue>
          <fpage>8405</fpage>
          <lpage>8420</lpage>
          <pub-id pub-id-type="doi">10.1093/cercor/bhad125</pub-id>
          <pub-id pub-id-type="medline">37083279</pub-id>
          <pub-id pub-id-type="pii">7133663</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>BTSwin-unet: 3D U-shaped symmetrical swin transformer-based network for brain tumor segmentation with self-supervised pre-training</article-title>
          <source>Neural Process Lett</source>
          <year>2022</year>
          <volume>55</volume>
          <issue>4</issue>
          <fpage>3695</fpage>
          <lpage>3713</lpage>
          <pub-id pub-id-type="doi">10.1007/s11063-022-10919-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rui-Qiang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao-Dong</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ren-Zhe</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cai-Zi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dou-Dou</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lin-Xia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wei-Xin</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Automatic localization of target point for subthalamic nucleus-deep brain stimulation via hierarchical attention-UNet based MRI segmentation</article-title>
          <source>Med Phys</source>
          <year>2023</year>
          <volume>50</volume>
          <issue>1</issue>
          <fpage>50</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1002/mp.15956</pub-id>
          <pub-id pub-id-type="medline">36053005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>TW-Net: transformer weighted network for neonatal brain MRI segmentation</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2023</year>
          <volume>27</volume>
          <issue>2</issue>
          <fpage>1072</fpage>
          <lpage>1083</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2022.3225475</pub-id>
          <pub-id pub-id-type="medline">36446007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khaled</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ghaleb</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>Learning to detect boundary information for brain image segmentation</article-title>
          <source>BMC Bioinformatics</source>
          <year>2022</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>332</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-022-04882-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12859-022-04882-w</pub-id>
          <pub-id pub-id-type="medline">35953776</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12859-022-04882-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC9367147</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A transformer-based generative adversarial network for brain tumor segmentation</article-title>
          <source>Front Neurosci</source>
          <year>2022</year>
          <volume>16</volume>
          <fpage>1054948</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36532274"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnins.2022.1054948</pub-id>
          <pub-id pub-id-type="medline">36532274</pub-id>
          <pub-id pub-id-type="pmcid">PMC9750177</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>JX</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>LETCP: a label-efficient transformer-based contrastive pre-training method for brain tumor segmentation</article-title>
          <source>Appl Sci</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>21</issue>
          <fpage>11016</fpage>
          <pub-id pub-id-type="doi">10.3390/app122111016</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>3D PSwinBTS: an efficient transformer-based Unet using 3D parallel shifted windows for brain tumor segmentation</article-title>
          <source>Digital Signal Process</source>
          <year>2022</year>
          <volume>131</volume>
          <fpage>103784</fpage>
          <pub-id pub-id-type="doi">10.1016/j.dsp.2022.103784</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>SWTRU: star-shaped window transformer reinforced U-net for medical image segmentation</article-title>
          <source>Comput Biol Med</source>
          <year>2022</year>
          <volume>150</volume>
          <fpage>105954</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105954</pub-id>
          <pub-id pub-id-type="medline">36122443</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(22)00688-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Segmentation method of magnetoelectric brain image based on the transformer and the CNN</article-title>
          <source>Information</source>
          <year>2022</year>
          <volume>13</volume>
          <issue>10</issue>
          <fpage>445</fpage>
          <pub-id pub-id-type="doi">10.3390/info13100445</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Qi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A medical image segmentation method based on multi-dimensional statistical features</article-title>
          <source>Front Neurosci</source>
          <year>2022</year>
          <volume>16</volume>
          <fpage>1009581</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36188458"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnins.2022.1009581</pub-id>
          <pub-id pub-id-type="medline">36188458</pub-id>
          <pub-id pub-id-type="pmcid">PMC9521364</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gai</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Min</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhong</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>RMTF-Net: residual mix transformer fusion net for 2D brain tumor segmentation</article-title>
          <source>Brain Sci</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>9</issue>
          <fpage>1145</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=brainsci12091145"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/brainsci12091145</pub-id>
          <pub-id pub-id-type="medline">36138880</pub-id>
          <pub-id pub-id-type="pii">brainsci12091145</pub-id>
          <pub-id pub-id-type="pmcid">PMC9497369</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Qi</surname>
              <given-names>XR</given-names>
            </name>
          </person-group>
          <article-title>Swin transformer improves the IDH mutation status prediction of gliomas free of MRI-based tumor segmentation</article-title>
          <source>J Clin Med</source>
          <year>2022</year>
          <volume>11</volume>
          <issue>15</issue>
          <fpage>4625</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm11154625"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm11154625</pub-id>
          <pub-id pub-id-type="medline">35956236</pub-id>
          <pub-id pub-id-type="pii">jcm11154625</pub-id>
          <pub-id pub-id-type="pmcid">PMC9369996</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ser</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Swin transformer for fast MRI</article-title>
          <source>Neurocomputing</source>
          <year>2022</year>
          <volume>493</volume>
          <fpage>281</fpage>
          <lpage>304</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2022.04.051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>CSU-Net: a CNN-transformer parallel network for multimodal brain tumour segmentation</article-title>
          <source>Electronics</source>
          <year>2022</year>
          <volume>11</volume>
          <issue>14</issue>
          <fpage>2226</fpage>
          <pub-id pub-id-type="doi">10.3390/electronics11142226</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zeineldin</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Pollok</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mangliers</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Karar</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Mathis-Ullrich</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Burgert</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Deep automatic segmentation of brain tumours in interventional ultrasound data</article-title>
          <source>Curr Dir Biomed Eng</source>
          <year>2022</year>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>133</fpage>
          <lpage>137</lpage>
          <pub-id pub-id-type="doi">10.1515/cdbme-2022-0034</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pinaya</surname>
              <given-names>WHL</given-names>
            </name>
            <name name-style="western">
              <surname>Tudosiu</surname>
              <given-names>PD</given-names>
            </name>
            <name name-style="western">
              <surname>Gray</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rees</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Nachev</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ourselin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cardoso</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised brain imaging 3D anomaly detection and segmentation with transformers</article-title>
          <source>Med Image Anal</source>
          <year>2022</year>
          <volume>79</volume>
          <fpage>102475</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1361-8415(22)00122-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2022.102475</pub-id>
          <pub-id pub-id-type="medline">35598520</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(22)00122-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10108352</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>SwinBTS: a method for 3D multimodal brain tumor segmentation using swin transformer</article-title>
          <source>Brain Sci</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>6</issue>
          <fpage>797</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=brainsci12060797"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/brainsci12060797</pub-id>
          <pub-id pub-id-type="medline">35741682</pub-id>
          <pub-id pub-id-type="pii">brainsci12060797</pub-id>
          <pub-id pub-id-type="pmcid">PMC9221215</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kadri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bouaziz</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tmar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gargouri</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Multimodal deep learning based on the combination of EfficientNetV2 and ViT for Alzheimer's disease early diagnosis enhanced by SAGAN data augmentation</article-title>
          <source>Int J Comput Inf Syst Ind Manag Appl</source>
          <year>2022</year>
          <volume>14</volume>
          <fpage>313</fpage>
          <lpage>325</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mirlabs.org/ijcisim/regular_papers_2022/IJCISIM_27.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jiao</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Transition Net: 2D backbone to segment 3D brain tumor</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2022</year>
          <volume>75</volume>
          <fpage>103622</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2022.103622</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>METrans: multi‐encoder transformer for ischemic stroke segmentation</article-title>
          <source>Electron Lett</source>
          <year>2022</year>
          <volume>58</volume>
          <issue>9</issue>
          <fpage>340</fpage>
          <lpage>342</lpage>
          <pub-id pub-id-type="doi">10.1049/ell2.12444</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>TransConver: transformer and convolution parallel network for developing automatic brain tumor segmentation in MRI images</article-title>
          <source>Quant Imaging Med Surg</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>2397</fpage>
          <lpage>2415</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35371952"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/qims-21-919</pub-id>
          <pub-id pub-id-type="medline">35371952</pub-id>
          <pub-id pub-id-type="pii">qims-12-04-2397</pub-id>
          <pub-id pub-id-type="pmcid">PMC8923874</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>STC-Net: fusing Swin transformer and convolution neural network for 2D medical image segmentation</article-title>
          <year>2022</year>
          <conf-name>2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)</conf-name>
          <conf-date>October 28-30, 2022</conf-date>
          <conf-loc>Yan'an, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/eiect58010.2022.00069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Viteri</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Piguave</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Pelaez</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Loayza</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Automatic brain white matter hyperintensities segmentation with Swin U-net</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE ANDESCON</conf-name>
          <conf-date>November 16-19, 2022</conf-date>
          <conf-loc>Barranquilla, Colombia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/andescon56260.2022.9989775</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>AST-Net: lightweight hybrid transformer for multi-modal brain tumor segmentation</article-title>
          <year>2022</year>
          <conf-name>2022 26th International Conference on Pattern Recognition (ICPR)</conf-name>
          <conf-date>August 21-25, 2022</conf-date>
          <conf-loc>Montreal, QC, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icpr56361.2022.9956705</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>mmFormer: multi-modal medical transformer for incomplete multimodal learning of brain tumor segmentation</article-title>
          <year>2022</year>
          <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name>
          <conf-date>September 18-22, 2022</conf-date>
          <conf-loc>Singapore</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-16443-9_11</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Attentive symmetric autoencoder for brain MRI segmentation</article-title>
          <year>2022</year>
          <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name>
          <conf-date>September 18-22, 2022</conf-date>
          <conf-loc>Singapore</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-16443-9_20</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>NestedFormer: nested modality-aware transformer for brain tumor segmentation</article-title>
          <year>2022</year>
          <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name>
          <conf-date>September 18-22, 2022</conf-date>
          <conf-loc>Singapore</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-16443-9_14</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>TSEUnet: A 3D neural network with fused Transformer and SE-Attention for brain tumor segmentation</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)</conf-name>
          <conf-date>July 21-23, 2022</conf-date>
          <conf-loc>Shenzen, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cbms55023.2022.00030</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>TransUNet with attention mechanism for brain tumor segmentation on MR images</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE International Conference on Artificial Intelligence and Computer Applications (ICAICA)</conf-name>
          <conf-date>June 24-26, 2022</conf-date>
          <conf-loc>Dalian, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icaica54878.2022.9844551</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hatamizadeh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Nath</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Myronenko</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Landman</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>UNETR: transformers for 3D medical image segmentation</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)</conf-name>
          <conf-date>January 3-8, 2022</conf-date>
          <conf-loc>Waikoloa, HI, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1109/wacv51458.2022.00181</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nijiati</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tuersun</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Abulizi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tuoheti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abulaiti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zou</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>A symmetric prior knowledge based deep learning model for intracerebral hemorrhage lesion segmentation</article-title>
          <source>Front Physiol</source>
          <year>2022</year>
          <volume>13</volume>
          <fpage>977427</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36505076"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fphys.2022.977427</pub-id>
          <pub-id pub-id-type="medline">36505076</pub-id>
          <pub-id pub-id-type="pii">977427</pub-id>
          <pub-id pub-id-type="pmcid">PMC9727183</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laiton-Bonadiez</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Sanchez-Torres</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Branch-Bedoya</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep 3D neural network for brain structures segmentation using self-attention modules in MRI images</article-title>
          <source>Sensors (Basel)</source>
          <year>2022</year>
          <volume>22</volume>
          <issue>7</issue>
          <fpage>2559</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s22072559"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s22072559</pub-id>
          <pub-id pub-id-type="medline">35408173</pub-id>
          <pub-id pub-id-type="pii">s22072559</pub-id>
          <pub-id pub-id-type="pmcid">PMC9002763</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>More than encoder: introducing transformer decoder to upsample</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>
          <conf-date>December 6-8, 2022</conf-date>
          <conf-loc>Las Vegas, NV, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bibm55620.2022.9995378</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rasoulian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Salari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Weakly supervised intracranial hemorrhage segmentation using hierarchical combination of attention maps from a Swin transformer</article-title>
          <year>2022</year>
          <conf-name>International Workshop on Machine Learning in Clinical Neuroimaging</conf-name>
          <conf-date>September 18, 2022</conf-date>
          <conf-loc>Singapore</conf-loc>
          <fpage>63</fpage>
          <lpage>72</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-031-17899-3_7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayivi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yussif</surname>
              <given-names>SB</given-names>
            </name>
            <name name-style="western">
              <surname>Browne</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Agbesi</surname>
              <given-names>VK</given-names>
            </name>
            <name name-style="western">
              <surname>Sam</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>McGrath</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Segmentation of glioblastoma multiforme via-attention neural network</article-title>
          <year>2022</year>
          <conf-name>33rd Irish Signals and Systems Conference (ISSC)</conf-name>
          <conf-date>June 9-10, 2022</conf-date>
          <conf-loc>Cork, Ireland</conf-loc>
          <pub-id pub-id-type="doi">10.1109/issc55427.2022.9826163</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chong</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Si</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>CZ</given-names>
            </name>
          </person-group>
          <article-title>A deep learning-based automatic system for intracranial aneurysms diagnosis on three-dimensional digital subtraction angiographic images</article-title>
          <source>Med Phys</source>
          <year>2022</year>
          <volume>49</volume>
          <issue>11</issue>
          <fpage>7038</fpage>
          <lpage>7053</lpage>
          <pub-id pub-id-type="doi">10.1002/mp.15846</pub-id>
          <pub-id pub-id-type="medline">35792717</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Shu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>BiTr-Unet: a CNN-transformer combined network for MRI brain tumor segmentation</article-title>
          <year>2021</year>
          <conf-name>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</conf-name>
          <conf-date>September 27, 2021</conf-date>
          <conf-loc>Virtual Event</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36005929"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Hierarchical and global modality interaction for brain tumor segmentation</article-title>
          <year>2022</year>
          <conf-name>Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries</conf-name>
          <conf-date>September 27, 2021</conf-date>
          <conf-loc>Quebec City, Canada</conf-loc>
          <fpage>441</fpage>
          <lpage>450</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-031-08999-2_38</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Weng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>UCATR: based on CNN and transformer encoding and cross-attention decoding for lesion segmentation of acute ischemic stroke in non-contrast computed tomography images</article-title>
          <year>2021</year>
          <conf-name>43rd Annual International Conference of the IEEE Engineering in Medicine &#38; Biology Society (EMBC)</conf-name>
          <conf-date>November 1-5, 2021</conf-date>
          <conf-loc>Mexico</conf-loc>
          <pub-id pub-id-type="doi">10.1109/embc46164.2021.9630336</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sagar</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Vitbis: Vision transformer for biomedical image segmentation</article-title>
          <year>2021</year>
          <conf-name>MICCAI Workshop on Distributed and Collaborative Learning</conf-name>
          <conf-date>October 1, 2021</conf-date>
          <conf-loc>Strasbourg, France</conf-loc>
          <fpage>34</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-90874-4_4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>HybridCTrm: bridging CNN and transformer for multimodal brain image segmentation</article-title>
          <source>J Healthc Eng</source>
          <year>2021</year>
          <volume>2021</volume>
          <fpage>7467261</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2021/7467261"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2021/7467261</pub-id>
          <pub-id pub-id-type="medline">34630994</pub-id>
          <pub-id pub-id-type="pmcid">PMC8500745</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fidon</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Shit</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ezhov</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Paetzold</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Ourselin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vercauteren</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Generalized wasserstein dice loss, test-time augmentation, and transformers for the BraTS 2021 challenge</article-title>
          <year>2022</year>
          <conf-name>International MICCAI Brainlesion Workshop</conf-name>
          <conf-date>September 27, 2021</conf-date>
          <conf-loc>Singapore</conf-loc>
          <fpage>187</fpage>
          <lpage>196</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-031-09002-8_17</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sagar</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Emsvit: Efficient multi scale vision transformer for biomedical image segmentation</article-title>
          <year>2021</year>
          <conf-name>International MICCAI Brainlesion Workshop</conf-name>
          <conf-date>September 27, 2021</conf-date>
          <conf-loc>Quebec City, Canada</conf-loc>
          <fpage>39</fpage>
          <lpage>51</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref99">
        <label>99</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Combining global information with topological prior for brain tumor segmentation</article-title>
          <year>2021</year>
          <conf-name>International MICCAI Brainlesion Workshop</conf-name>
          <conf-date>October 8, 2023</conf-date>
          <conf-loc>Quebec City, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-031-08999-2_16</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref100">
        <label>100</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Davidson</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dense transformer networks for brain electron microscopy image segmentation</article-title>
          <year>2019</year>
          <conf-name>Proceedings of the 28th International Joint Conference on Artificial Intelligence</conf-name>
          <conf-date>August 10-16, 2019</conf-date>
          <conf-loc>Macao, China</conf-loc>
          <pub-id pub-id-type="doi">10.24963/ijcai.2019/401</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref101">
        <label>101</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Menze</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Jakab</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bauer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kalpathy-Cramer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Farahani</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kirby</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Burren</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Porz</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Slotboom</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wiest</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lanczi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gerstner</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Weber</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Arbel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Avants</surname>
              <given-names>BB</given-names>
            </name>
            <name name-style="western">
              <surname>Ayache</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Buendia</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Cordier</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Corso</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Criminisi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Delingette</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Demiralp</surname>
              <given-names>Ç</given-names>
            </name>
            <name name-style="western">
              <surname>Durst</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Dojat</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Doyle</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Festa</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Forbes</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Geremia</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Glocker</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Golland</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hamamci</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Iftekharuddin</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Jena</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>John</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Konukoglu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lashkari</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mariz</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Meier</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pereira</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Precup</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Price</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Raviv</surname>
              <given-names>TR</given-names>
            </name>
            <name name-style="western">
              <surname>Reza</surname>
              <given-names>SMS</given-names>
            </name>
            <name name-style="western">
              <surname>Ryan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sarikaya</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Shotton</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Silva</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Sousa</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Subbanna</surname>
              <given-names>NK</given-names>
            </name>
            <name name-style="western">
              <surname>Szekely</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>OM</given-names>
            </name>
            <name name-style="western">
              <surname>Tustison</surname>
              <given-names>NJ</given-names>
            </name>
            <name name-style="western">
              <surname>Unal</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vasseur</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wintermark</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zikic</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Prastawa</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Reyes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Van Leemput</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The Multimodal Brain Tumor Image Segmentation Benchmark (BRATS)</article-title>
          <source>IEEE Trans Med Imaging</source>
          <year>2015</year>
          <volume>34</volume>
          <issue>10</issue>
          <fpage>1993</fpage>
          <lpage>2024</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/25494501"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TMI.2014.2377694</pub-id>
          <pub-id pub-id-type="medline">25494501</pub-id>
          <pub-id pub-id-type="pmcid">PMC4833122</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref102">
        <label>102</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Vision transformers in multi-modal brain tumor MRI segmentation: a review</article-title>
          <source>Meta-Radiology</source>
          <year>2023</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>100004</fpage>
          <pub-id pub-id-type="doi">10.1016/j.metrad.2023.100004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref103">
        <label>103</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>RF</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Transformers in medical image segmentation: a narrative review</article-title>
          <source>Quant Imaging Med Surg</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>12</issue>
          <fpage>8747</fpage>
          <lpage>8767</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38106306"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/qims-23-542</pub-id>
          <pub-id pub-id-type="medline">38106306</pub-id>
          <pub-id pub-id-type="pii">qims-13-12-8747</pub-id>
          <pub-id pub-id-type="pmcid">PMC10722011</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
