<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e78377</article-id><article-id pub-id-type="doi">10.2196/78377</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>The Performance of Artificial Intelligence in Classifying Molecular Markers in Adult-Type Gliomas Using Histopathological Images: Systematic Review</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Almaabreh</surname><given-names>Obada</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Al-Dafi</surname><given-names>Rukaya</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Tabassum</surname><given-names>Aliya</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Othman</surname><given-names>Ahmad</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Abd-alrazaq</surname><given-names>Alaa</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib></contrib-group><aff id="aff1"><institution>Faculty of Medicine, Yarmouk University</institution><addr-line>Irbid</addr-line><country>Jordan</country></aff><aff id="aff2"><institution>Department of Computer Science and Engineering, College of Engineering, Qatar University</institution><addr-line>Doha</addr-line><country>Qatar</country></aff><aff id="aff3"><institution>AI Center for Precision Health, Weill Cornell Medical College in Qatar</institution><addr-line>103 Ezdan Oasis, Alwakrah</addr-line><addr-line>Doha</addr-line><country>Qatar</country></aff><aff id="aff4"><institution>Department of Biomedical Sciences, College of Health Sciences, Qatar University</institution><addr-line>Doha</addr-line><country>Qatar</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Schwartz</surname><given-names>Amy</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Ibrahim</surname><given-names>Babul Salam</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Dhou</surname><given-names>Khaldoon</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Lu</surname><given-names>Yi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Alaa Abd-alrazaq, PhD, AI Center for Precision Health, Weill Cornell Medical College in Qatar, 103 Ezdan Oasis, Alwakrah, Doha, Qatar, 974 17849573; <email>aaa4027@qatar-med.cornell.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>13</day><month>3</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e78377</elocation-id><history><date date-type="received"><day>01</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>06</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Obada Almaabreh, Rukaya Al-Dafi, Aliya Tabassum, Ahmad Othman, Alaa Abd-alrazaq. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 13.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e78377"/><abstract><sec><title>Background</title><p>Adult-type gliomas are among the most prevalent and lethal primary central nervous system tumors, where prompt and accurate diagnosis is essential for maximizing survival prospects. Molecular classification, particularly the detection of isocitrate dehydrogenase (IDH) mutations and 1p/19q codeletions, has become crucial for accurate diagnosis and prognosis. Artificial intelligence (AI) has emerged as a promising adjunct in enhancing diagnostic accuracy using histopathological images. Existing reviews mostly focused on radiology rather than histopathology, and no comprehensive systematic review has specifically evaluated AI performance exclusively from histopathological images for detecting these two molecular markers.</p></sec><sec><title>Objective</title><p>This study aims to systematically evaluate the performance of AI models in detecting and classifying IDH mutation status and 1p/19q gene codeletion in adult-type gliomas using histopathological images.</p></sec><sec sec-type="methods"><title>Methods</title><p>A systematic review was conducted in accordance with PRISMA-DTA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;Extension for Diagnostic Test Accuracy) guidelines. Seven databases (MEDLINE, PsycINFO, Embase, IEEE Xplore, ACM Digital Library, Scopus, and Google Scholar) were searched for studies published between 2015 and 2025. Eligible studies used AI models on histopathological images for molecular classification of adult-type gliomas and reported performance metrics. Study selection, data extraction, and risk of bias assessment using a modified QUADAS-2 (Quality Assessment of Diagnostic Accuracy Studies 2) tool were conducted independently by two reviewers. Extracted data were synthesized narratively.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 2453 reports were identified, with 22 studies meeting the inclusion criteria. The pooled average accuracy, sensitivity, specificity, and area under the curve (AUC) across studies were 85.46%, 84.55%, 86.03%, and 86.53%, respectively. Hybrid models demonstrated the highest diagnostic performance (accuracy 92.80% and sensitivity 89.62%). In general, AI models that used multimodal data outperformed those that used unimodal data in terms of sensitivity (90.15% vs 84.31%) and AUC (88.93% vs 86.29%). Furthermore, models had a better overall performance in identifying IDH mutations than 1p/19q codeletions, with higher accuracy (86.13% vs 81.63%), specificity (86.61% vs 78.11%), and AUC (86.74% vs 85.15%). Unexpectedly, AI models designed for binary classification exhibited lower performance than those for multiclass classification in terms of both accuracy (91.98% vs 84.02%) and sensitivity (93.41% vs 80.18%). However, these differences should be interpreted as descriptive trends rather than statistically validated superiority, as formal between-group comparisons were not feasible.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>AI models show strong potential as complementary tools for the molecular classification of adult-type gliomas using histopathology images, particularly for IDH mutation detection. However, these findings are constrained by the limited number of studies, the focus on adult-type gliomas, lack of meta-analysis, and restriction to English-language publications. While AI offers valuable diagnostic support, it must be integrated with expert clinical judgment. Future research should prioritize larger, more diverse datasets and multimodal AI frameworks and extend to other brain tumor types for broader applicability.</p></sec><sec><title>Trial Registration</title><p>PROSPERO CRD420250653668; https://www.crd.york.ac.uk/PROSPERO/view/CRD420250653668</p></sec></abstract><kwd-group><kwd>glioma</kwd><kwd>brain tumors</kwd><kwd>molecular markers</kwd><kwd>artificial intelligence</kwd><kwd>histopathology</kwd><kwd>isocitrate dehydrogenase</kwd><kwd>IDH mutation</kwd><kwd>1p/19q codeletion</kwd><kwd>systematic review</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Gliomas are the most frequent primary tumors affecting the central nervous system (CNS) in adults, with a variable prognosis depending on the specific subtype and histological grade [<xref ref-type="bibr" rid="ref1">1</xref>]. The global prevalence of CNS neoplasms in 5 years is approximately 771,110 cases. Primary brain tumors account for 1.7% of all cancers, with a global incidence of 3.9 cases per 100,000 people each year. Among these, gliomas are the most common malignant type, accounting for 75% of all malignant CNS tumors, with an incidence of 6 cases per 100,000 people annually [<xref ref-type="bibr" rid="ref2">2</xref>]. Gliomas are among the tumors that are difficult to treat, with a 5-year overall survival of less than 35% [<xref ref-type="bibr" rid="ref3">3</xref>]. Patients with brain tumors present with variable clinical symptoms according to the part of the brain affected. However, they usually share general symptoms that are nonspecific to anatomic location (eg, seizures, headaches).</p><p>Gliomas are neuroectodermal tumors arising from glial cells or their precursors and encompass astrocytomas, oligodendrogliomas, and ependymomas [<xref ref-type="bibr" rid="ref3">3</xref>]. Mutations in isocitrate dehydrogenase 1 and 2 (IDH1 and IDH2) are considered early events in gliomagenesis and occur more frequently in lower-grade gliomas [<xref ref-type="bibr" rid="ref3">3</xref>]. Diffuse gliomas harboring IDH1/2 mutations are associated with significantly improved prognosis compared with IDH&#x2013;wild-type diffuse gliomas [<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>Codeletion of chromosomal arms 1p and 19q results from an unbalanced centromeric translocation, t(1;19)(q10;p10) [<xref ref-type="bibr" rid="ref3">3</xref>]. In combination with IDH mutation, 1p/19q codeletion is a defining molecular feature required for the diagnosis of oligodendroglioma, IDH mutant, and 1p/19q codeleted. This molecular alteration is associated with a favorable prognosis among diffuse gliomas and predicts enhanced responsiveness to alkylating chemotherapy [<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>Conventional classification of gliomas relied on histologic features as a gold standard. Initially, pathologists utilized microscopic analysis of histochemically stained (eg, hematoxylin and eosin) sections for the diagnosis of gliomas [<xref ref-type="bibr" rid="ref4">4</xref>]. However, ancillary immunohistochemistry has been increasingly used over the past few decades to enhance diagnostic accuracy [<xref ref-type="bibr" rid="ref4">4</xref>]. With either method, pathologists followed a diagnostic algorithm to rule out nonneoplastic lesions (eg, infarcts) or other types of malignancies (eg, metastatic) based on histologic features. Clinical information (eg, age) and radiologic findings (eg, location) are also valuable clues in the diagnosis [<xref ref-type="bibr" rid="ref4">4</xref>]. Nonetheless, the histologic-based classification system has a few limitations, particularly interobserver variability, in addition to insufficient or nonrepresentative tissue sampling [<xref ref-type="bibr" rid="ref5">5</xref>]. Although histologic-based classification can be accurate for prototypic tumors, classification of tumors encountered in clinical practice is often more challenging due to a degree of mixed features in the same sample [<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>In order to address these limitations, molecular-based diagnosis has emerged as a possible solution. Recent advancements in genetics have helped to identify specific gene alterations involved in the development of different types of gliomas. Detection of these gene alterations is the base of molecular-based diagnosis. This method offers improved accuracy and made it possible to distinguish between primary and secondary gliomas, which was not possible with the previous histologic-based method [<xref ref-type="bibr" rid="ref6">6</xref>]. Therefore, the World Health Organization (WHO) integrated molecular features into its most recent classification of CNS tumors, published in 2021, based on the presence or absence of IDH1 and IDH2 mutations, in addition to chromosome 1p/19q codeletion [<xref ref-type="bibr" rid="ref7">7</xref>]. It categorized adult-type gliomas into three categories: astrocytoma with IDH mutation, oligodendroglioma with IDH mutation and 1p/19q codeletion, and glioblastoma with IDH wild-type [<xref ref-type="bibr" rid="ref7">7</xref>]. Furthermore, a combined approach using molecular and histologic features is used for the grading of IDH-mutant astrocytomas as CNS WHO grade 2, 3, or 4. The new WHO classification also led to a more accurate prognosis of gliomas, as the absence of IDH mutation is often a sign of more aggressive tumors [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Neuroimaging provides noninvasive insight into the molecular landscape of diffuse gliomas. Advanced magnetic resonance imaging (MRI) features can suggest IDH mutation status, as IDH-mutant tumors typically exhibit lower perfusion and less aggressive radiological behavior than IDH&#x2013;wild-type gliomas [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Magnetic resonance spectroscopy allows in vivo detection of the IDH-specific oncometabolite D-2-hydroxyglutarate, enabling preoperative identification of IDH-mutant tumors [<xref ref-type="bibr" rid="ref7">7</xref>]. In addition, oligodendrogliomas defined by combined IDH mutation and 1p/19q codeletion display characteristic imaging features, including cortical localization, calcifications, and relatively increased perfusion, reflecting distinct tumor biology [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. These radiogenomic associations support the integration of imaging into molecular stratification of adult-type diffuse gliomas [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>As gliomas pose diagnostic challenges due to their complex histological and molecular features, artificial intelligence (AI) has emerged as a promising tool to enhance diagnostic accuracy and efficiency. AI models were able to offer rapid and accurate results with accuracy exceeding 90%, which can further improve the access and speed of molecular diagnosis [<xref ref-type="bibr" rid="ref8">8</xref>]. In general, AI implementation in health care can lead to improved data synthesis (eg, patient data, medical literature) [<xref ref-type="bibr" rid="ref9">9</xref>]. Additionally, AI can process multimodal data (two or more different data sources such as histopathological images, radiological data, clinical variables, genomic markers, and other relevant data types) to provide more accurate diagnosis and predictions. Moreover, it offers means for augmenting human performance, as human limitations can prevent processing large quantities of information and making the optimal judgment at all times. Thus, AI can improve care consistency, increase precision, accelerate discoveries, and minimize disparities [<xref ref-type="bibr" rid="ref9">9</xref>].</p></sec><sec id="s1-2"><title>Research Gaps and Aim</title><p>The use of AI in the diagnosis of brain tumors and gliomas has been widely studied, with many reviews summarizing the results of numerous studies. However, existing reviews exhibit several limitations. For instance, many reviews focused on AI implementation in other diagnostic methods of gliomas, such as radiology-based [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], surgery-based [<xref ref-type="bibr" rid="ref12">12</xref>], and DNA-based diagnoses [<xref ref-type="bibr" rid="ref13">13</xref>]. As for reviews focusing on AI implementation in the molecular diagnosis of gliomas, they fall into one of the following categories: not systematic [<xref ref-type="bibr" rid="ref14">14</xref>], outdated [<xref ref-type="bibr" rid="ref15">15</xref>], restricted by databases or search terms [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref16">16</xref>], lacking subtyping of gliomas [<xref ref-type="bibr" rid="ref17">17</xref>], not focused on genetic markers (IDH mutations and 1p/19q codeletion) [<xref ref-type="bibr" rid="ref11">11</xref>], or limited to a specific subfield of AI (eg, deep learning or machine learning) [<xref ref-type="bibr" rid="ref16">16</xref>]. To address the limitations of the previous reviews, this study aims to systematically assess the performance of AI in detecting and classifying IDH mutation status and 1p/19q gene codeletion in adult-type gliomas using histopathological images.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><p>This review was conducted in accordance with the PRISMA-DTA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;Extension for Diagnostic Test Accuracy) [<xref ref-type="bibr" rid="ref18">18</xref>] (see <xref ref-type="supplementary-material" rid="app8">Checklist 1</xref>) and registered with the international Prospective Register of Systematic Reviews (PROSPERO) under the number CRD420250653668.</p><sec id="s2-1"><title>Search Strategy</title><p>The literature search was performed on November 15, 2024, using the following digital databases: MEDLINE, PsycINFO, Embase, IEEE Xplore, ACM Digital Library, Scopus, and Google Scholar. A biweekly automated search was set for 4 months, concluding on March 15, 2025. Given the large number of results from Google Scholar, which are ranked by relevance, we reviewed only the first 100 entries (10 pages). Our query utilized a combination of proper keywords and Boolean operators, including but not limited to ((&#x201C;Artificial Intelligence&#x201D; OR &#x201C;Machine Learning&#x201D; OR &#x201C;Deep Learning&#x201D;) AND (&#x201C;Histopatholog*&#x201D; OR &#x201C;Whole-Slide Imag*&#x201D;) AND (&#x201C;Glioma*&#x201D; OR &#x201C;Astrocytoma*&#x201D; OR &#x201C;Oligodendroglioma*&#x201D; OR &#x201C;Glioblastoma*&#x201D;)). The search was restricted to studies published between 2015 and 2025. The search query for each database is shown in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. To identify additional studies, we screened the reference lists of included studies (ie, backward reference list checking) and checked studies that cited the included studies (ie, forward reference list checking).</p></sec><sec id="s2-2"><title>Study Selection</title><p>The selection process of relevant studies consisted of three steps. The first step involved removing duplicates using EndNote. In the second step, the studies were assigned to two authors, who screened the papers independently by title and abstract. The final step involved each author reading the consequent papers in full text independently. Discussions were held to resolve any differences or ambiguities in steps two and three.</p></sec><sec id="s2-3"><title>Study Eligibility Criteria</title><p>Studies fulfilling the following criteria were included: (1) original articles, theses, dissertations, and conference papers written in English, (2) studies published in 2015 onward, (3) studies focused on patients with adult-type glioma with no restrictions on age, (4) studies that used histopathological images gathered from tissue biopsies, tumor excision, or digital histopathological images, (5) studies that assessed the performance of AI algorithms in the detection and classification of the molecular markers, specifically the IDH mutation status and the 1p/19q gene codeletion status using histopathology data, and (6) studies that reported results related to the performance of the developed AI models. No restriction was applied regarding study settings, study design, and country of publication. Studies were excluded if they (1) used data modalities other than histopathology images (eg, radiological images, stimulated Raman histology), (2) did not employ AI algorithms for molecular marker detection or classification, (3) were not human studies, or (4) were categorized as editorials, letters to editors, posters, conference abstracts, reviews, commentaries, short communications, and brief reports.</p></sec><sec id="s2-4"><title>Data Extraction</title><p>Two authors independently extracted data from the eligible studies using predefined tables in Microsoft Excel sheet. Any discrepancies in the extracted data were resolved through discussion. The extracted data included four main categories: (1) study characteristics, including surname of first author, publication type, publication year, and country of publication; (2) participants and dataset characteristics, including number of participants, mean age, female percentage, dataset size, data source, magnification, slide image resolution, level of analysis, number of classes, and ground truth assessor; (3) AI model characteristics, covering problem-solving approaches, AI classifiers, type of classification, aim of classifier, data input to AI algorithms, features extraction methods, type of validation, and performance metrics; (4) results, including accuracy, sensitivity, specificity, and area under the curve (AUC). The data extraction form is shown in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec><sec id="s2-5"><title>Risk of Bias Assessment</title><p>To evaluate the quality and reliability of the included studies, two reviewers independently conducted a risk of bias assessment using an adapted version of the QUADAS-2 (Quality Assessment of Diagnostic Accuracy Studies 2) tool [<xref ref-type="bibr" rid="ref19">19</xref>]. The original QUADAS-2 tool was modified to better suit the context of our review, which focuses on AI applications. Specifically, we incorporated relevant elements from the PROBAST (Prediction Model Risk of Bias Assessment Tool) to ensure that the assessment adequately addressed the methodological nuances of AI-based diagnostic models [<xref ref-type="bibr" rid="ref20">20</xref>]. Each domain included four tailored signaling questions designed to determine the potential for bias and to evaluate applicability to the review&#x2019;s research question. The risk of bias was rated separately for each domain, while concerns regarding applicability were assessed for the first three domains. For example, the &#x201C;Participants&#x201D; domain examined issues such as sample representativeness, exclusions, and subgroup balance. The &#x201C;Index Test&#x201D; domain evaluated the transparency and consistency of the AI model&#x2019;s design and feature use. The &#x201C;Reference Standard&#x201D; focused on the validity and consistency of outcome definitions and assessor qualifications, while the &#x201C;Analysis&#x201D; domain addressed data inclusion, preprocessing, set division, and model performance evaluation. In addition to risk of bias, we also assessed concerns regarding applicability in the first three domains, evaluating whether the participants, AI model, and outcome definitions were aligned with the review&#x2019;s objectives. To refine and validate our modified tool, we conducted a pilot assessment using five studies, which allowed us to fine-tune the criteria before full application. Subsequently, two reviewers independently assessed all included studies using the finalized tool (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). Any discrepancies in assessments were resolved through discussion and consensus.</p></sec><sec id="s2-6"><title>Data Synthesis</title><p>A narrative approach was used to synthesize the data extracted from the included studies. Specifically, we used texts and tables to summarize and describe the characteristics of the included studies and results. We could not perform a meta-analysis because fewer than two studies reported the confusion matrices or other necessary details (eg, number of cases and controls in the test set) required to calculate the numerators and denominators for the same performance measures (accuracy, sensitivity, specificity, and AUC), AI algorithms, and measured outcomes (detection of IDH and 1p/19q). Instead of meta-analysis, we calculated the traditional mean for each metric (ie, accuracy, sensitivity, specificity, and AUC).</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Search Results</title><p>As shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>, the literature searches identified a total of 2453 reports from databases. After removing duplicate records (n=583), we screened the titles and abstracts of 1870 reports. This led to the exclusion of 1753 reports. Of the remaining 117 reports, we could not retrieve the full text for 5 reports. Subsequently, we checked the eligibility of the remaining 112 reports. This led to the exclusion of 91 reports for the following reasons: not related to glioma (n=7), not related to molecular classification (n=53), not pertaining to histopathology (n=11), inappropriate publication type (n=19), and not in the English language (n=1). We included an additional study by checking the reference list of the included studies. Ultimately, only 22 reports met the inclusion criteria and were included in the review [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref42">42</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Flowchart of the study selection process.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78377_fig01.png"/></fig></sec><sec id="s3-2"><title>Characteristics of the Included Studies</title><p>The included studies were published between 2015 and 2024 (<xref ref-type="table" rid="table1">Table 1</xref>). The majority of studies were published in 2023 (9/22, 40.91%), followed by 2024 (5/22, 22.73%). Only 3 of the 22 studies (13.64%) are conference papers, whereas 16 (72.73%) are journal articles and 3 (13.64%) are preprints. Studies were carried out in different countries, with the United States and China being the most frequent contributors (6/22, 27.27%) and (5/22, 22.73%), respectively. The number of participants in the included studies ranges from 29 to 2845, with an average of 765.1 (SD 815.6). The mean of female participation rate is 42.45% with a range between 34.5% and 49.3%. The dataset size (ie, number of whole-slide image or patches) varied widely (47&#x2010;27,000), with an average of 2464 (SD 5851). It is worth mentioning that the unit of analysis used to compute performance metrics was predominantly whole-slide image level (21/22, 95.5%), while it was at the patch level in 1 (4.5%) study. The participants&#x2019; age ranged from 42.2 to 54.5 with an average of 48.03 (SD 4.2; <xref ref-type="table" rid="table1">Table 1</xref>). The characteristics of each included study are described in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of included studies.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Features</td><td align="left" valign="bottom">Number of studies</td><td align="left" valign="bottom">References</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Year of publication, n (%)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2024</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2023</td><td align="left" valign="top">9 (40.91)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2022</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2021</td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2020</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2019</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Publication type, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Journal article</td><td align="left" valign="top">16 (72.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Conference paper</td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Preprint</td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Country of publication, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>United States</td><td align="left" valign="top">6 (27.27)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>China</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Germany</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Australia</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>India</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Luxembourg</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Canada</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref26">26</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Switzerland</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref30">30</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>South Korea</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref31">31</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Saudi Arabia</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref33">33</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Japan</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">Number of participants<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup>, mean (SD); range (%)</td><td align="left" valign="top">765.1 (815.6); 29&#x2010;2845 (95.45)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">Female percentage<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> (%), mean (SD); range (%)</td><td align="left" valign="top">42.45 (0.03); 34.5&#x2010;49.3 (59.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">Dataset size<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>, mean (SD); range (%)</td><td align="left" valign="top">2464 (5851); 47&#x2010;27,000 (95.45)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">Mean age<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup> (year), mean (SD); range (%)</td><td align="left" valign="top">48.03 (4.2); 42.2&#x2010;54.5 (59.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>One study did not report the mean age [<xref ref-type="bibr" rid="ref23">23</xref>].</p></fn><fn id="table1fn2"><p><sup>b</sup>Studies did not report the female percentage [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>].</p></fn><fn id="table1fn3"><p><sup>c</sup>One study did not report the dataset size [<xref ref-type="bibr" rid="ref21">21</xref>].</p></fn><fn id="table1fn4"><p><sup>d</sup>Studies did not report the mean age [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref38">38</xref>].</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Features of Histopathological Images</title><p>As presented in <xref ref-type="table" rid="table2">Table 2</xref>, the histopathological images were captured at various magnification levels, including 2.5&#x00D7;, 5&#x00D7;, 10&#x00D7;, 20&#x00D7;, 40&#x00D7;, and 100&#x00D7;. Among these, 20&#x00D7; magnification was the most commonly used (12/22, 54.55%). While 14 (63.64%) studies focused on architectural-level analysis, 13 (59.09%) studies focused on cellular-level analysis. Slide resolutions varied across studies, with 256&#x00D7;256 pixels being the most common (8/22, 36.36%). Datasets were sourced from either open-source databases (16/22, 72.73%) or closed-source datasets (13/22, 59.09%) (ie, data were collected by the study authors or obtained from previous studies). The open datasets used include The Cancer Genome Atlas (15/22, 68.18%) and the Digital Brain Tumor Atlas (1/22, 4.55%), with some studies combining open and closed sources. The histopathological images were labeled into 2 classes in about two-thirds of the studies (14/22, 63.64%). The features of histopathological images in each study are described in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Features of histopathological images<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Features</td><td align="left" valign="bottom">Number of studies, n (%)</td><td align="left" valign="bottom">References</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Magnification</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2.5&#x00D7;</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref34">34</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>5&#x00D7;</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>10&#x00D7;</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>20&#x00D7;<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></italic></td><td align="left" valign="top"><italic>12</italic> (54.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>40&#x00D7;</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>100&#x00D7;</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not reported</td><td align="left" valign="top">7 (31.82)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Level of analysis</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>Architecture level</italic></td><td align="left" valign="top"><italic>14</italic> (63.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Cell level</td><td align="left" valign="top">13 (59.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not reported</td><td align="left" valign="top">7 (31.82)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Slide image resolution</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>224&#x00D7;224</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>256&#x00D7;256</italic></td><td align="left" valign="top"><italic>8</italic> (36.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>512&#x00D7;512</td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>1024&#x00D7;1024</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">7 (31.82)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not reported</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Data sources</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>Open source</italic></td><td align="left" valign="top"><italic>16</italic> (72.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Closed source</td><td align="left" valign="top">13 (59.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Open dataset name</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>TCGA</italic><sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup></td><td align="left" valign="top"><italic>15</italic> (68.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DBTA<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup></td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Not applicable</td><td align="left" valign="top">6 (27.27)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Number of classes</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>Two</italic></td><td align="left" valign="top"><italic>14</italic> (63.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Three</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Four</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Five</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Six</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref40">40</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>Studies used both data sources, open and closed [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. </p></fn><fn id="table2fn2"><p><sup>b</sup>Values in italics indicate highest score.</p></fn><fn id="table2fn3"><p><sup>c</sup>TCGA: The Cancer Genome Atlas.</p></fn><fn id="table2fn4"><p><sup>d</sup>DBTA: Digital Brain Tumor Atlas. </p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4"><title>Features of AI</title><p>AI was primarily used for binary classification in 86.36% (19/22) of studies, where models differentiate between two outcomes (eg, IDH-mutant vs wild-type), whereas only 22.73% (5/22) employed multiclass classification, which predicts among 3 or more glioma subtypes (<xref ref-type="table" rid="table3">Table 3</xref>). The majority of studies (20/22, 90.91%) used AI for IDH subtyping (ie, distinguishing between IDH mutant and IDH wild-type), whereas only 6 (27.27%) studies applied AI for subtyping IDH mutant tumors in relation to 1p/19q gene codeletion. Convolutional neural networks (CNNs) were the most commonly used classifier and feature extraction method, appearing in 54.55% (12/22) and 90.91% (20/22) of studies. For specific classifiers, CNNs are the most utilized, appearing in 54.55% (12/22) of studies, followed by multiple instance learning (MIL) (which handles weakly labeled slide-level data by learning from sets of image patches without requiring pixel-level annotations) at 50% (11/22), transformers at 18.18% (4/22), and hybrid models (ie, single end-to-end architectures that integrate convolutional components with attention-based mechanisms [eg, CNN-transformer models] within a unified computational graph and are trained jointly) also at 18.18% (4/22). For specific feature extraction models, ResNet50 leads with 46.67% (10/22) usage, followed by densely connected convolutional network 121 (DenseNet121) and InceptionV3, both at 19.05% (4/22). Among the included studies, most models were developed using histopathological images alone (17/22, 77.3%), while a smaller number (5/22, 22.7%) incorporated additional modalities such as MRI scans (2/22, 9.1%), demographic information (3/22, 13.6%), and clinical variables (2/22, 9.1%). The most widely used method for model validation was k-fold cross-validation, used in 50% (11/22) of studies, followed by the train-test split approach in 45.45% (10/22) of studies. AI model performance was assessed using multiple metrics, with accuracy being the most frequently reported (19/22, 86.36%), followed by AUC and sensitivity, each used in 68.18% (15/22) of studies. A detailed summary of the AI models used in each study is provided in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Features of artificial intelligence (AI).</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feature</td><td align="left" valign="bottom">Number of studies, n (%)</td><td align="left" valign="bottom">References</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Type of classification</td></tr><tr><td align="left" valign="top">&#x2003;<italic>Binary<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></italic></td><td align="left" valign="top"><italic>19</italic> (86.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Multiclass</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Aim of AI algorithms<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td></tr><tr><td align="left" valign="top">&#x2003;<italic>Subtyping IDH</italic><sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup></td><td align="left" valign="top"><italic>20</italic> (90.91)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Subtyping 1p/19q codeletion</td><td align="left" valign="top">6 (27.27)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Classifier</td></tr><tr><td align="left" valign="top">&#x2003;<italic>Convolutional neural networks</italic></td><td align="left" valign="top"><italic>12</italic> (54.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref38">38</xref>-<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Multiple instance learning</td><td align="left" valign="top">11 (50)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Transformers</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Hybrid models</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Ensemble model</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Logistic regression</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Random forest</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Multilayer perceptron</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Specific classifier</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>AttMIL</italic><sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup></td><td align="left" valign="top"><italic>7</italic> (31.82)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Traditional MIL<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup></td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ViT<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup></td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DenseNet121<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup></td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup></td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Features extraction methods</td></tr><tr><td align="left" valign="top">&#x2003;<italic>Convolutional neural networks</italic></td><td align="left" valign="top"><italic>20</italic> (90.91)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Transformers</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Hybrid models</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;MIL</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Random forest</td><td align="left" valign="top">1 (4.55)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Specific features extraction model</td></tr><tr><td align="left" valign="top">&#x2003;<italic>ResNet50</italic></td><td align="left" valign="top"><italic>10</italic> (45.45)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref35">35</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;DenseNet121</td><td align="left" valign="top">5 (22.73)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;InceptionV3</td><td align="left" valign="top">4 (18.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;ResNet18</td><td align="left" valign="top">3 (13.64)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;VGG19</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;ViT</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Not reported</td><td align="left" valign="top">2 (9.09)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Data modality</td></tr><tr><td align="left" valign="top">&#x2003;Unimodel data</td><td align="left" valign="top">17 (77.3)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Multimodel data</td><td align="left" valign="top">5 (22.7)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Types of validation</td></tr><tr><td align="left" valign="top">&#x2003;<italic>k-fold cross-validation</italic></td><td align="left" valign="top"><italic>11</italic> (50)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Training-test split</td><td align="left" valign="top">10 (45.45)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;External validation</td><td align="left" valign="top">7 (31.82)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top" colspan="3">Performance measures</td></tr><tr><td align="left" valign="top">&#x2003;<italic>Accuracy</italic></td><td align="left" valign="top"><italic>19</italic> (86.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;AUC<sup><xref ref-type="table-fn" rid="table3fn9">i</xref></sup></td><td align="left" valign="top">15 (68.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Sensitivity</td><td align="left" valign="top">15 (68.18)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;<italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">9 (40.91)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Precision</td><td align="left" valign="top">8 (36.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Specificity</td><td align="left" valign="top">8 (36.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]</td></tr><tr><td align="left" valign="top">&#x2003;Others</td><td align="left" valign="top">8 (36.36)</td><td align="char" char="." valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>Values in italic indicate highest score.</p></fn><fn id="table3fn2"><p><sup>b</sup>Studies reported results for both IDH mutation status and 1p/19q codeletion status [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>].</p></fn><fn id="table3fn3"><p><sup>c</sup>IDH: isocitrate dehydrogenase.</p></fn><fn id="table3fn4"><p><sup>d</sup>AttMIL: attention-based multiple instance learning.</p></fn><fn id="table3fn5"><p><sup>e</sup>MIL: multiple instance learning.</p></fn><fn id="table3fn6"><p><sup>f</sup>ViT: vision transformer.</p></fn><fn id="table3fn7"><p><sup>g</sup>DenseNet121: densely connected convolutional network 121.</p></fn><fn id="table3fn8"><p><sup>h</sup>VGG: visual geometry group.</p></fn><fn id="table3fn9"><p><sup>i</sup>AUC: area under the curve.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-5"><title>Results of the Risk of Bias and Applicability</title><p>In the selection of participants domain, nearly a quarter of the included studies (5/22, 23%) provided sufficient information to confirm the use of an appropriate consecutive or random sampling of eligible participants. Nearly all studies (21/22, 95%) reported a sufficient sample size. Additionally, 8 (36%) studies avoided inappropriate exclusions, while over half (12/22, 55%) demonstrated balanced subgroup representation. Consequently, a small proportion of the studies (2/22, 9%) were assessed as having a low risk of bias, while a larger portion (8/22, 36%) were rated as having an unclear risk of bias in the &#x201C;selection of participants&#x201D; domain, as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><p>In terms of matching participants to the predefined requirements of the review question, a low level of concern was identified in 95% (21/22) of the included studies, as shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Results of the assessment of risk of bias in the included studies.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78377_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Results of the assessment of applicability concerns in the included studies.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e78377_fig03.png"/></fig><p>A substantial majority of the included studies comprehensively detailed their AI models, with 21 of the 22 (95%) studies providing thorough descriptions. An overwhelming majority, all 22 studies (100%) clearly reported the features (predictors) used. Moreover, all 22 studies (100%) ensured that these features were sourced without prior knowledge of the outcome data. Consistency in feature assessment across participants was observed in 21 of the 22 (95%) studies. Consequently, the potential for bias in the &#x201C;index test&#x201D; domain was assessed as low in 20 of the 22 (91%) studies<bold>,</bold> as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>. In addition, all 22 (100%) studies were found to have minimal concerns regarding the alignment between the model&#x2019;s predictors and the review question&#x2019;s criteria, as illustrated in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><p>In most of the included studies (20/22, 91%), the outcome of interest, histopathology image analysis, was conducted by an expert (eg, pathologist or neuropathologist), ensuring the necessary expertise to accurately classify the outcomes. Nearly all studies (22/22, 100%) applied consistent and uniform outcome definitions across all participants, with outcome classification (ie, image labeling) blinded to the AI model predictions. An overwhelming majority (22/22, 100%) determined the outcome without prior knowledge of the predictor information. Furthermore, in all 22 (100%) studies, the diagnostic process was carried out over an appropriate duration, using the same diagnostic criteria for all participants. As a result, the risk of bias in the &#x201C;reference standard&#x201D; domain was considered low in the majority of studies (20/22, 91%), as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>. Additionally, a similar proportion of studies (21/22, 95%) exhibited minimal concerns regarding discrepancies in outcome definition, timing, and determination methods, as shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><p>Finally, a significant majority of the studies (22/22, 100%) ensured the inclusion of all enrolled participants in the data analysis. A substantial number of these studies (21/22, 95%) reported no missing values, or if missing values were present, they were handled appropriately (eg, through multiple imputation).</p><p>Similarly, a high proportion of studies (22/22, 100%) adopted suitable measures to evaluate the performance of their models. The confusion matrix was presented, or more than one evaluation measure was used, and the selected measures were deemed appropriate.</p><p>Nearly half of the studies (21/22, 95%) demonstrated an appropriate split among training, validation, and test sets. The chosen distribution aligned with best practices in the field (eg, 70%&#x2010;80% for training, 10%&#x2010;15% for validation, and 10%&#x2010;20% for testing).</p><p>Consequently, the conduct and interpretation of the analysis did not introduce bias in most studies, with 21/22 (95%) having a low risk of bias in the &#x201C;analysis&#x201D; domain, as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>. A detailed breakdown of the &#x201C;risk of bias&#x201D; and &#x201C;applicability concerns&#x201D; for each domain in every study is available in <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>.</p></sec><sec id="s3-6"><title>Results of the Included Studies</title><p>As shown in <xref ref-type="table" rid="table4">Table 4</xref>, the accuracy of AI models in detecting or subtyping molecular markers in adult-type gliomas ranged from 58% to 100%, with a mean accuracy of 85.46%. Sensitivity ranged from 62% to 100%, averaging 84.55%, while specificity ranged from 46% to 100%, with a mean of 86.03%. The AUC values ranged from 46% to 99%, with an average of 86.53%. The hybrid model demonstrated the highest accuracy (92.80%) and sensitivity (89.62%), while the CNN model achieved the highest specificity (89.30%). Logistic regression outperformed other models in terms of AUC (93.05%). Among specific CNN architectures, DenseNet121 achieved the highest accuracy (90.82%), sensitivity (89.28%), and AUC (91.05%). Moreover, AI models that used multimodal data outperformed those that used unimodal data in terms of sensitivity (90.15% vs 84.31%) and AUC (88.93% vs 86.29%). In outcome prediction, AI models showed better performance in detecting IDH mutations compared to 1p/19q codeletions, with higher accuracy (86.13% vs 81.63%), specificity (86.61% vs 78.11%), and AUC (86.74% vs 85.15%). AI models also performed better in IDH subtyping for multiclass classification compared to binary classification in terms of accuracy (91.98% vs 84.02%) and sensitivity (93.41% vs 80.18%). Conversely, AI models for binary classification yielded a lower performance than multiclass classification in terms of accuracy (91.98% vs 84.02%) and sensitivity (93.41% vs 80.18%). However, these differences should be interpreted as descriptive trends rather than statistically validated superiority, as formal between-group comparisons were not feasible.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Summary of overall performance of all models based on number of classes, type of data, and artificial intelligence (AI) algorithms in the target variable<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup>.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2">Groups</td><td align="left" valign="bottom" colspan="3">Accuracy</td><td align="left" valign="bottom" colspan="3">Sensitivity</td><td align="left" valign="bottom" colspan="3">Specificity</td><td align="left" valign="bottom" colspan="3">AUC<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td></tr><tr><td align="left" valign="bottom">Number of studies</td><td align="left" valign="bottom">Mean % (SD)</td><td align="left" valign="bottom">Range</td><td align="left" valign="bottom">Number of studies</td><td align="left" valign="bottom">Mean % (SD)</td><td align="left" valign="bottom">Range</td><td align="left" valign="bottom">Number of studies</td><td align="left" valign="bottom">Mean % (SD)</td><td align="left" valign="bottom">Range</td><td align="left" valign="bottom">Number of studies</td><td align="left" valign="bottom">Mean % (SD)</td><td align="left" valign="bottom">Range</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="13">Algorithms</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>CNNs</italic><bold><sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup></bold></td><td align="left" valign="top">8</td><td align="left" valign="top">85.60 (0.09)</td><td align="left" valign="top">0.72&#x2010;1.00</td><td align="left" valign="top">8</td><td align="left" valign="top">84.09 (0.11)</td><td align="left" valign="top">0.64&#x2010;1.00</td><td align="left" valign="top">7</td><td align="left" valign="top"><italic>89.30</italic><sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup> (0.09)</td><td align="left" valign="top">0.75&#x2010;1.00</td><td align="left" valign="top">8</td><td align="left" valign="top">86.54 (0.06)</td><td align="left" valign="top">0.76&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MIL<sup><xref ref-type="table-fn" rid="table4fn5">e</xref></sup></td><td align="left" valign="top">10</td><td align="left" valign="top">80.83 (0.09)</td><td align="left" valign="top">0.58&#x2010;0.91</td><td align="left" valign="top">8</td><td align="left" valign="top">82.54 (0.13)</td><td align="left" valign="top">0.62&#x2010;0.99</td><td align="left" valign="top">3</td><td align="left" valign="top">68.69 (0.23)</td><td align="left" valign="top">0.46&#x2010;0.94</td><td align="left" valign="top">10</td><td align="left" valign="top">81.73 (0.16)</td><td align="left" valign="top">0.46&#x2010;0.97</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Transformer</td><td align="left" valign="top">4</td><td align="left" valign="top">88.27 (0.03)</td><td align="left" valign="top">0.83&#x2010;0.92</td><td align="left" valign="top">3</td><td align="left" valign="top">81.62 (0.10)</td><td align="left" valign="top">0.69&#x2010;0.92</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">3</td><td align="left" valign="top">92.37 (0.04)</td><td align="left" valign="top">0.89&#x2010;0.96</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>Hybrid model</italic></td><td align="left" valign="top">3</td><td align="left" valign="top"><italic>92.80</italic> (0.05)</td><td align="left" valign="top">0.85&#x2010;0.97</td><td align="left" valign="top">3</td><td align="left" valign="top"><italic>89.62</italic> (0.08)</td><td align="left" valign="top">0.81&#x2010;0.97</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">2</td><td align="left" valign="top">92.20 (0.04)</td><td align="left" valign="top">0.89&#x2010;0.95</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Ensemble model</td><td align="left" valign="top">2</td><td align="left" valign="top">86.35 (0.12)</td><td align="left" valign="top">0.78&#x2010;0.95</td><td align="left" valign="top">2</td><td align="left" valign="top">89.60 (0.05)</td><td align="left" valign="top">0.86&#x2010;0.93</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">2</td><td align="left" valign="top">92.00 (0.09)</td><td align="left" valign="top">0.85&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>Logistic regression</italic></td><td align="left" valign="top">2</td><td align="left" valign="top">87.15 (0.01)</td><td align="left" valign="top">0.86&#x2010;0.88</td><td align="left" valign="top">2</td><td align="left" valign="top">86.00 (0.12)</td><td align="left" valign="top">0.78&#x2010;0.94</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">2</td><td align="left" valign="top"><italic>93.05</italic> (0.00)</td><td align="left" valign="top">0.93&#x2010;0.931</td></tr><tr><td align="left" valign="top" colspan="13">CNN models</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>ResNet (ResNet,18,50)</italic></td><td align="left" valign="top">7</td><td align="left" valign="top">87.04 (0.09)</td><td align="left" valign="top">0.72&#x2010;1.00</td><td align="left" valign="top">6</td><td align="left" valign="top">87.51 (0.11)</td><td align="left" valign="top">0.69&#x2010;1.00</td><td align="left" valign="top">5</td><td align="left" valign="top"><italic>91.48</italic> (0.07)</td><td align="left" valign="top">0.84&#x2010;1.00</td><td align="left" valign="top">6</td><td align="left" valign="top">90.19 (0.06)</td><td align="left" valign="top">0.81&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG<sup><xref ref-type="table-fn" rid="table4fn6">f</xref></sup> (VGG16,19)</td><td align="left" valign="top">4</td><td align="left" valign="top">89.67 (0.09)</td><td align="left" valign="top">0.79&#x2010;1.00</td><td align="left" valign="top">4</td><td align="left" valign="top">87.03 (0.13)</td><td align="left" valign="top">0.69&#x2010;1.00</td><td align="left" valign="top">2</td><td align="left" valign="top">90.65 (0.13)</td><td align="left" valign="top">0.81&#x2010;1.00</td><td align="left" valign="top">2</td><td align="left" valign="top">81.55 (0.01)</td><td align="left" valign="top">0.81&#x2010;0.82</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><italic>DenseNet121</italic><bold><sup><xref ref-type="table-fn" rid="table4fn7">g</xref></sup></bold></td><td align="left" valign="top">3</td><td align="left" valign="top"><italic>90.82</italic> (0.10)</td><td align="left" valign="top">0.79&#x2010;0.97</td><td align="left" valign="top">3</td><td align="left" valign="top"><italic>89.28</italic> (0.10)</td><td align="left" valign="top">0.78&#x2010;0.97</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">2</td><td align="left" valign="top"><italic>91.05</italic> (0.08)</td><td align="left" valign="top">0.86&#x2010;0.97</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">3</td><td align="left" valign="top">80.15 (0.05)</td><td align="left" valign="top">0.77&#x2010;0.86</td><td align="left" valign="top">3</td><td align="left" valign="top">75.82 (0.11)</td><td align="left" valign="top">0.64&#x2010;0.86</td><td align="left" valign="top">&#x2014;-</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">3</td><td align="left" valign="top">83.58 (0.03)</td><td align="left" valign="top">0.80&#x2010;0.87</td></tr><tr><td align="left" valign="top" colspan="13">Type of data</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Unimodel data</td><td align="left" valign="top">20</td><td align="left" valign="top"><italic>85.67</italic> (0.09)</td><td align="left" valign="top">0.58&#x2010;1.00</td><td align="left" valign="top">16</td><td align="left" valign="top">84.31 (0.11)</td><td align="left" valign="top">0.62&#x2010;1.00</td><td align="left" valign="top">9</td><td align="left" valign="top"><italic>86.27</italic> (0.15)</td><td align="left" valign="top">0.46&#x2010;1.00</td><td align="left" valign="top">16</td><td align="left" valign="top">86.29 (0.10)</td><td align="left" valign="top">0.46&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multimodel data</td><td align="left" valign="top">5</td><td align="left" valign="top">83.64 (0.05)</td><td align="left" valign="top">0.78&#x2010;0.90</td><td align="left" valign="top">2</td><td align="left" valign="top"><italic>90.15</italic> (0.06)</td><td align="left" valign="top">0.86&#x2010;0.94</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">4</td><td align="left" valign="top"><italic>88.93</italic> (0.03)</td><td align="left" valign="top">0.85&#x2010;0.93</td></tr><tr><td align="left" valign="top" colspan="13">Outcome prediction</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>1p/19q</td><td align="left" valign="top">6</td><td align="left" valign="top">81.63 (0.12)</td><td align="left" valign="top">0.58&#x2010;1.00</td><td align="left" valign="top">6</td><td align="left" valign="top"><italic>87.64</italic> (0.09)</td><td align="left" valign="top">0.75&#x2010;1.00</td><td align="left" valign="top">3</td><td align="left" valign="top">78.11 (0.28)</td><td align="left" valign="top">0.46&#x2010;1.00</td><td align="left" valign="top">5</td><td align="left" valign="top">85.15 (0.19)</td><td align="left" valign="top">0.46&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>IDH<sup><xref ref-type="table-fn" rid="table4fn8">h</xref></sup></td><td align="left" valign="top">19</td><td align="left" valign="top"><italic>86.13</italic> (0.08)</td><td align="left" valign="top">0.72&#x2010;1.00</td><td align="left" valign="top">14</td><td align="left" valign="top">83.96 (0.11)</td><td align="left" valign="top">0.62&#x2010;1.00</td><td align="left" valign="top">8</td><td align="left" valign="top"><italic>86.61</italic> (0.12)</td><td align="left" valign="top">0.52&#x2010;1.00</td><td align="left" valign="top">16</td><td align="left" valign="top"><italic>86.74</italic> (0.08)</td><td align="left" valign="top">0.52&#x2010;0.99</td></tr><tr><td align="left" valign="top" colspan="13">Class prediction</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Binary IDH</td><td align="left" valign="top">16</td><td align="left" valign="top">84.02 (0.07)</td><td align="left" valign="top">0.72&#x2010;1.00</td><td align="left" valign="top">12</td><td align="left" valign="top">80.18 (0.11)</td><td align="left" valign="top">0.62&#x2010;1.00</td><td align="left" valign="top">8</td><td align="left" valign="top"><italic>86.61</italic> (0.12)</td><td align="left" valign="top">0.52&#x2010;1.00</td><td align="left" valign="top">15</td><td align="left" valign="top"><italic>86.87</italic> (0.08)</td><td align="left" valign="top">0.52&#x2010;0.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multiclass IDH</td><td align="left" valign="top">3</td><td align="left" valign="top"><italic>91.98</italic> (0.07)</td><td align="left" valign="top">0.75&#x2010;1.00</td><td align="left" valign="top">2</td><td align="left" valign="top"><italic>93.41</italic> (0.05)</td><td align="left" valign="top">0.85&#x2010;1.00</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"><italic>Overall</italic></td><td align="left" valign="top"><italic>20</italic></td><td align="left" valign="top"><italic>85.46</italic> (0.08)</td><td align="left" valign="top"><italic>0.58&#x2010;1.00</italic></td><td align="left" valign="top"><italic>16</italic></td><td align="left" valign="top"><italic>84.55</italic> (0.01)</td><td align="left" valign="top"><italic>0.62&#x2010;1.00</italic></td><td align="left" valign="top"><italic>9</italic></td><td align="left" valign="top"><italic>86.03</italic> (0.15)</td><td align="left" valign="top"><italic>0.46&#x2010;1.00</italic></td><td align="left" valign="top"><italic>17</italic></td><td align="left" valign="top"><italic>86.53</italic> (0.10)</td><td align="left" valign="top"><italic>0.46&#x2010;0.99</italic></td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>Specificity not reported in all studies.</p></fn><fn id="table4fn2"><p><sup>b</sup>AUC: area under the curve.</p></fn><fn id="table4fn3"><p><sup>c</sup>CNNs: convolutional neural networks.</p></fn><fn id="table4fn4"><p><sup>d</sup>Values in italics indicate highest score. </p></fn><fn id="table4fn5"><p><sup>e</sup>MIL: multiple instance learning.</p></fn><fn id="table4fn6"><p><sup>f</sup>VGG: visual geometry group.</p></fn><fn id="table4fn7"><p><sup>g</sup>DenseNet121: densely connected convolutional network 121.</p></fn><fn id="table4fn8"><p><sup>h</sup>IDH: isocitrate dehydrogenase.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This review aimed to systematically evaluate the performance of AI models in detecting and classifying IDH mutation status and 1p/19q codeletion in adult-type gliomas using histopathological images. Our analysis indicates that these models demonstrate promising diagnostic capabilities, achieving pooled mean values of 85.46% for accuracy, 84.55% for sensitivity, 86.03% for specificity, and 86.53% for the area under the receiver operating characteristic curve. As noticed, the performance across all metrics was comparable, suggesting that AI models offer balanced diagnostic capabilities. Despite these promising results, they remain below the commonly cited thresholds for clinical adoption, which often exceed 90% for high-stakes diagnostic tasks [<xref ref-type="bibr" rid="ref42">42</xref>]. As such, the current evidence is not sufficient to support stand-alone clinical decision-making. Therefore, the results of this meta-analysis should be interpreted as early indicators of feasibility rather than clinically actionable performance, underscoring the substantial methodological and validation work still required before these models can be safely integrated into clinical workflows.</p><p>These results are generally consistent with prior reviews. Specifically, a systematic review conducted by Farahani et al [<xref ref-type="bibr" rid="ref43">43</xref>] reported a similar performance in IDH prediction using MRI, with pooled sensitivity of 84% and specificity of 87% and an AUC of 0.89. Further, Chen et al [<xref ref-type="bibr" rid="ref44">44</xref>] found pooled sensitivity of 83%, specificity of 84%, and AUC of 0.90 for predicting IDH mutations using machine learning&#x2013;based radiomics. Taken together, these comparisons suggest that our findings are in line with the broader body of research, reinforcing the reliability and clinical promise of AI-based histopathological approaches for glioma molecular classification.</p><p>Individual studies showed considerable variability across performance metrics; for instance, specificity values ranged from 46% to 100%. This variability likely stems from population differences, sample sizes, model architectures, and the inherent challenge of predicting rare molecular alterations (eg, telomerase reverse transcriptase mutations) from histopathology images, compounded by class imbalance, domain shift (ie, performance degradation when models are applied to data from different sources), and the absence of distinct histological correlates. While accuracy and sensitivity remain higher for well-characterized targets (eg, IDH, 1p/19q), specificity suffers when the model encounters rare or ambiguous cases in external validation.</p><p>Hybrid models achieved the highest mean accuracy (92.80%) and sensitivity (89.62%), while logistic regression models achieved the top mean AUC just slightly higher than hybrid models (93.05% vs 92.20%). These results suggest that hybrid models deliver the strongest overall performance. Hybrid models are defined as AI frameworks that integrate multiple deep learning architectures to leverage the complementary strengths of each component. In the context of glioma diagnosis, most recent reviews have highlighted the effectiveness of hybrid models combining CNNs with transformer-based architectures [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>]. CNNs excel at extracting local spatial features from histopathological images, such as cellular morphology and texture patterns, while transformers are adept at capturing long-range dependencies and global contextual information through their self-attention mechanisms. By combining these capabilities, hybrid CNN-transformer models can achieve a more comprehensive understanding of complex tissue structures and molecular characteristics within gliomas, leading to improved predictive performance. Our findings align with a recent review by Redlich et al [<xref ref-type="bibr" rid="ref45">45</xref>], which analyzed 83 studies on AI-based methods for whole-slide histopathology image analysis of gliomas. The review highlighted that hybrid models combining CNNs with other architectures, including transformers, consistently outperform single-architecture models in tasks such as subtyping, grading, and molecular marker prediction [<xref ref-type="bibr" rid="ref45">45</xref>]</p><p>Among pure CNN backbones, DenseNet121 demonstrated the best performance, achieving an accuracy of 90.82%, sensitivity of 89.28%, and an AUC of 91.05%. DenseNet121&#x2019;s superior performance may be due to its unique architectural features, notably dense connectivity, which enhance training dynamics, feature representation, and parameter efficiency.</p><p>In terms of data modalities employed by AI models, our analysis demonstrated that multimodal approaches exhibited superior performance in sensitivity (90.15% vs 84.31%) and AUC (88.93% vs 86.29%) compared to unimodal models. This suggests that multimodal models benefit from richer and more diverse feature representations, thereby enhancing their diagnostic capability. However, unimodal models achieved slightly higher accuracy (85.67% vs 83.64%), which may be attributed to the fact that none of the five multimodal studies included in the accuracy analysis employed hybrid modeling. Instead, they utilized simple model ensembles or comparisons involving architectures such as multilayer perceptron, logistic regression, random forests, and CNNs. Our findings are consistent with existing literature. For instance, a systematic review by Alleman et al [<xref ref-type="bibr" rid="ref48">48</xref>] concluded that multimodal deep learning approaches enhance prognostic accuracy in glioma patients compared to unimodal models, particularly in predicting overall survival. Similarly, d&#x2019;Este et al [<xref ref-type="bibr" rid="ref49">49</xref>] demonstrated that combining multimodal imaging with AI techniques improves visualization of glioma infiltration, surpassing the capabilities of single-modality approaches. Furthermore, the GlioMT model, introduced by Byeon et al [<xref ref-type="bibr" rid="ref50">50</xref>], which integrates imaging and clinical data, exhibited superior performance in predicting adult-type diffuse gliomas compared to conventional CNNs and visual transformers.</p><p>Regarding outcome prediction and AI model performance in detecting molecular markers, our analysis revealed that AI models demonstrated stronger performance in identifying IDH mutations compared to 1p/19q codeletions. Specifically, IDH detection yielded higher accuracy (86.13% vs 81.63%), specificity (86.61% vs 78.11%), and AUC (86.74% vs 85.15%). In contrast, AI models achieved slightly better sensitivity for 1p/19q codeletion detection (87.64% vs 83.96%). The discrepancy in performance between IDH mutation and 1p/19q codeletion detection may be attributed to several factors. IDH mutations often result in more distinct histopathological features, making them more amenable to detection by AI models. In contrast, 1p/19q codeletions may not produce as pronounced morphological changes, posing a greater challenge for AI-based detection. The findings of published studies align with our results. For example, the review by Farahani et al [<xref ref-type="bibr" rid="ref43">43</xref>] showed higher performance in identifying IDH mutations compared to 1p/19q codeletions using MRI in terms of sensitivity (84% vs 76%) and specificity (87% vs 85%).</p><p>In our systematic review, CNNs and MIL algorithms were the most predominantly used in AI models, likely due to their suitability for histopathological image analysis. CNNs are designed to capture spatial hierarchies in image data through convolutional layers, making them effective at identifying morphological patterns such as cellular structures and tissue architecture that are critical in tumor classification. Histopathological slides of gliomas are typically of high resolution and contain complex visual features, which CNNs can learn to recognize with high accuracy. Meanwhile, MIL frameworks address the challenge of weakly labeled data, which is common in medical imaging, where only slide-level labels (eg, tumor type or mutation status) are available, rather than precise annotations at the pixel or region level. MIL allows models to learn from sets of image patches (instances) while associating predictions with the whole slide (bag), making it particularly useful in computational pathology where exhaustive labeling is impractical.</p></sec><sec id="s4-2"><title>Practical and Research Implications</title><p>While our results are encouraging, they remain not optimal and arguably lower than that acceptable in clinical practice [<xref ref-type="bibr" rid="ref42">42</xref>]. Thus, our findings should be interpreted with caution due to several limitations. First, 15 of the 22 studies included in the review utilized the same dataset, raising concerns regarding the generalizability of the results. Second, many of the studies were based on small sample sizes, limiting the robustness and reliability of the conclusions. Third, several key findings were derived from fewer than 5 studies, reducing the strength of the evidence. Lastly, the aggregation of results using simple arithmetic means rather than formal meta-analytic techniques may have limited the ability to draw definitive conclusions about the overall efficacy of AI models.</p><p>Given these limitations, AI should not be used as a stand-alone diagnostic tool for the detection or subtyping of adult-type gliomas but rather as a complementary approach alongside conventional methods. Future research should prioritize the development of AI models using larger and more diverse datasets to enhance performance and generalizability. Ensuring adequate sample sizes and improving the transparency of reporting, such as including confusion matrices, will also facilitate more rigorous meta-analyses. Notably, the majority of studies reviewed developed AI models exclusively from histopathological images. Developing multimodal AI models that incorporate additional data types (eg, radiological and clinical information) may further enhance diagnostic accuracy. Thus, continued efforts are needed to advance the integration of multimodal data in AI model development for the detection and subtyping of adult-type gliomas.</p><p>In this review, the majority of studies used AI algorithms that were either MIL or CNNs, indicating a research gap in exploring the performance and effectiveness of other types of AI algorithms (eg, transformers, hybrid models, ensemble models [ie, approaches that combine predictions from multiple independently trained models using averaging, voting, or weighted fusion strategies], or logistic regression). Additionally, we observed a lack of research assessing the performance of AI models that use multimodal data, which may have affected the accuracy of the results. Moreover, the number of studies evaluating AI models&#x2019; performance in detecting IDH mutations was noticeably greater than those that used 1p/19q as a predicted outcome. These studies predominantly employed binary IDH classification (ie, IDH mutation vs IDH wild-type), whereas studies using IDH multiclass classification (eg, astrocytoma, oligodendroglioma) were markedly fewer. Future research should address these gaps to produce more accurate and representative findings.</p><p>Our findings that AI hybrid models performed better in detecting adult-type gliomas than other models underscore the need for more advanced approaches to explore their effectiveness and their potential to achieve diagnostic outcomes comparable to those of histopathologists. However, their real-world applicability requires consideration of computational and operational demands. These models often require greater processing power (eg, GPU-enabled servers or high-performance cloud computing) and longer inference times compared to conventional CNNs. Such requirements may pose challenges for pathology laboratories lacking advanced digital infrastructure. Nonetheless, many modern clinical centers are increasingly equipped with GPU-accelerated digital pathology ecosystems, and cloud-based deployment can significantly reduce the need for on-site hardware. From a cost-benefit perspective, the additional computational resources required by hybrid models may be justified given their superior accuracy and robustness, particularly for molecular prediction tasks where diagnostic errors carry serious clinical implications. However, successful implementation in routine practice will depend on streamlined model optimization, efficient inference pipelines, and prospective validation of runtime performance to ensure that diagnostic gains meaningfully outweigh operational costs.</p><p>As our findings indicated superior performance of multimodal AI models in sensitivity and AUC, we suggest a complementary approach: multimodal AI models may serve as effective initial diagnostic tools due to their comprehensive data integration, while unimodal models can be employed subsequently for more accurate and specific outcome determination. Moreover, our results reflect a stronger performance of AI models in the detection of IDH mutations compared to 1p/19q codeletion in all metrics except sensitivity. Therefore, we suggest that while IDH mutation detection should remain a primary focus for model refinement and deployment due to its balanced high performance, models targeting 1p/19q codeletions may serve as effective initial diagnostic tools in adult-type gliomas due to their higher sensitivity, which is critical in early disease detection.</p><p>Given that the current review focused on IDH mutations and 1p/19q codeletions, we encourage researchers to undertake further reviews covering other glioma molecular markers, including MGMT methylation, ATRX, and telomerase reverse transcriptase mutations. Moreover, further research should extend to other types of brain tumors, such as meningiomas, pituitary adenomas, and schwannomas. These tumors differ significantly in terms of biological behavior, treatment options, and clinical outcomes, warranting a comprehensive evaluation of AI&#x2019;s potential in diagnosing and subtyping each tumor type.</p></sec><sec id="s4-3"><title>Limitations</title><p>This systematic review is subject to several limitations. First, the generalizability of the findings is limited, as approximately 68.18% of the included studies relied on the same dataset, and the number of included studies is relatively small (n=22). This raises concerns about the external validity of the results and their applicability to the broader population of adult-type glioma patients. To address the potential duplication of patients or slides across studies (ie, those relying on The Cancer Genome Atlas), we reviewed the data sources reported by each study to identify where overlap was likely. Because individual-level data were not available, we were unable to verify or remove duplicate cases. Instead, we treated studies drawing from the same dataset as nonindependent samples and interpreted pooled estimates with caution. As a result, our summary metrics should be viewed as descriptive indicators rather than independent, unbiased effect estimates. Second, the review focused exclusively on adult-type gliomas, thereby restricting the ability to draw conclusions about the performance of AI in detecting other adult brain tumors or brain tumors in the general population. Third, the analysis was confined to studies utilizing histopathological images, limiting the assessment of AI performance with alternative imaging modalities, such as MRI or computed tomography scans. Fourth, meta-analysis was not feasible due to incomplete reporting of key performance metrics across studies. Instead, mean values were calculated to provide a descriptive summary, though this approach lacks the statistical rigor and precision of formal meta-analysis. Finally, the inclusion of only English-language studies may have introduced language bias and led to the exclusion of relevant research published in other languages, potentially affecting the comprehensiveness of the review.</p></sec><sec id="s4-4"><title>Conclusion</title><p>This systematic review demonstrates that AI models applied to histopathological images show strong potential for the molecular classification of adult-type gliomas, particularly in detecting IDH mutations and 1p/19q codeletions. Our findings indicate that while overall diagnostic performance is promising&#x2014;especially for IDH mutations&#x2014;variability in model performance and methodological limitations across studies temper the generalizability of results. Hybrid models and multimodal approaches emerged as particularly effective strategies, offering enhanced sensitivity and discriminative capability. However, the limited use of diverse datasets, the predominance of unimodal designs, and the lack of standardized reporting remain critical barriers to clinical translation. Given these constraints, AI models should currently serve as complementary tools rather than stand-alone diagnostic systems, functioning primarily to assist pathologists in pattern recognition and decision support while final diagnoses remain clinician-led. Future research should prioritize methodological standardization, broader data integration, and validation across larger, heterogeneous populations to support the clinical deployment of AI in glioma diagnostics.</p></sec></sec></body><back><ack><p>The authors declare the use of generative AI in the research and writing process. According to the GAIDeT taxonomy (2025), the following tasks were delegated to generative artificial intelligence (GAI) tools under full human supervision: Proofreading and editing. The GAI tool used was ChatGPT-4. Responsibility for the final manuscript lies entirely with the authors. GAI tools are not listed as authors and do not bear responsibility for the final outcomes. All intellectual content, study design, data collection, analysis, and final interpretations are the sole responsibility of the authors.</p></ack><notes><sec><title>Funding</title><p>The publication of this article was funded by the Weill Cornell Medicine&#x2013;Qatar Health Sciences Library.</p></sec><sec><title>Data Availability</title><p>The datasets generated during and/or analyzed during the current study are available from the corresponding author upon reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Concept for the paper and experimental design: AA, RAD, and OA</p><p>Data collection and extraction: AA, RAD, and OA</p><p>Data analysis: AA and OA</p><p>Risk of bias assessment: AT</p><p>Writing original draft preparation: AO, RAD, and OA</p><p>Writing-review and editing: AA, AO, RAD, and OA</p><p>Supervision: AA</p><p>All authors agreed to submission of the manuscript.</p></fn><fn fn-type="conflict"><p>AA is an associate editor of <italic>JMIR Nursing</italic> at the time of publication. All other authors declare no conflict of interest.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb3">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb4">CNS</term><def><p>central nervous system</p></def></def-item><def-item><term id="abb5">DBTA</term><def><p>Digital Brain Tumor Atlas</p></def></def-item><def-item><term id="abb6">DenseNet121</term><def><p>densely connected convolutional network 121</p></def></def-item><def-item><term id="abb7">IDH</term><def><p>isocitrate dehydrogenase</p></def></def-item><def-item><term id="abb8">MIL</term><def><p>multiple instance learning</p></def></def-item><def-item><term id="abb9">MRI</term><def><p>magnetic resonance imaging</p></def></def-item><def-item><term id="abb10">PRISMA-DTA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses&#x2013;Extension for Diagnostic Test Accuracy</p></def></def-item><def-item><term id="abb11">PROBAST</term><def><p>Prediction Model Risk of Bias Assessment Tool</p></def></def-item><def-item><term id="abb12">PROSPERO</term><def><p>International Prospective Register of Systematic Reviews</p></def></def-item><def-item><term id="abb13">QUADAS-2</term><def><p>Quality Assessment of Diagnostic Accuracy Studies 2</p></def></def-item><def-item><term id="abb14">WHO</term><def><p>World Health Organization</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ostrom</surname><given-names>QT</given-names> </name><name name-style="western"><surname>Cioffi</surname><given-names>G</given-names> </name><name name-style="western"><surname>Waite</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kruchko</surname><given-names>C</given-names> </name><name name-style="western"><surname>Barnholtz-Sloan</surname><given-names>JS</given-names> </name></person-group><article-title>CBTRUS statistical report: primary brain and other central nervous system tumors diagnosed in the United States in 2014&#x2013;2018</article-title><source>Neuro Oncol</source><year>2021</year><month>10</month><day>5</day><volume>23</volume><issue>12 Suppl 2</issue><fpage>iii1</fpage><lpage>iii105</lpage><pub-id pub-id-type="doi">10.1093/neuonc/noab200</pub-id><pub-id pub-id-type="medline">34608945</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Reynoso-Nover&#x00F3;n</surname><given-names>N</given-names> </name><name name-style="western"><surname>Mohar-Betancourt</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ortiz-Rafael</surname><given-names>J</given-names> </name></person-group><article-title>Epidemiology of brain tumors</article-title><source>Principles of Neuro-Oncology</source><year>2020</year><publisher-name>Springer</publisher-name><fpage>15</fpage><lpage>25</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-54879-7_2</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lapointe</surname><given-names>S</given-names> </name><name name-style="western"><surname>Perry</surname><given-names>A</given-names> </name><name name-style="western"><surname>Butowski</surname><given-names>NA</given-names> </name></person-group><article-title>Primary brain tumours in adults</article-title><source>The Lancet</source><year>2018</year><month>08</month><volume>392</volume><issue>10145</issue><fpage>432</fpage><lpage>446</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(18)30990-5</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perry</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wesseling</surname><given-names>P</given-names> </name></person-group><article-title>Histologic classification of gliomas</article-title><source>Handb Clin Neurol</source><year>2016</year><volume>134</volume><fpage>71</fpage><lpage>95</lpage><pub-id pub-id-type="doi">10.1016/B978-0-12-802997-8.00005-0</pub-id><pub-id pub-id-type="medline">26948349</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Smith-Cohn</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Colman</surname><given-names>H</given-names> </name></person-group><article-title>Glioma subclassifications and their clinical significance</article-title><source>Neurotherapeutics</source><year>2017</year><month>04</month><volume>14</volume><issue>2</issue><fpage>284</fpage><lpage>297</lpage><pub-id pub-id-type="doi">10.1007/s13311-017-0519-x</pub-id><pub-id pub-id-type="medline">28281173</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kalidindi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Or</surname><given-names>R</given-names> </name><name name-style="western"><surname>Babak</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mason</surname><given-names>W</given-names> </name></person-group><article-title>Molecular classification of diffuse gliomas</article-title><source>Can J Neurol Sci</source><year>2020</year><month>07</month><volume>47</volume><issue>4</issue><fpage>464</fpage><lpage>473</lpage><pub-id pub-id-type="doi">10.1017/cjn.2020.10</pub-id><pub-id pub-id-type="medline">31918786</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Antonelli</surname><given-names>M</given-names> </name><name name-style="western"><surname>Poliani</surname><given-names>PL</given-names> </name></person-group><article-title>Adult type diffuse gliomas in the new 2021 WHO Classification</article-title><source>Pathologica</source><year>2022</year><month>12</month><volume>114</volume><issue>6</issue><fpage>397</fpage><lpage>409</lpage><pub-id pub-id-type="doi">10.32074/1591-951X-823</pub-id><pub-id pub-id-type="medline">36534419</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hollon</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Chowdury</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Artificial-intelligence-based molecular classification of diffuse gliomas using rapid, label-free optical imaging</article-title><source>Nat Med</source><year>2023</year><month>04</month><volume>29</volume><issue>4</issue><fpage>828</fpage><lpage>832</lpage><pub-id pub-id-type="doi">10.1038/s41591-023-02252-4</pub-id><pub-id pub-id-type="medline">36959422</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Koski</surname><given-names>E</given-names> </name><name name-style="western"><surname>Murphy</surname><given-names>J</given-names> </name></person-group><article-title>AI in healthcare</article-title><source>Stud Health Technol Inform</source><year>2021</year><month>12</month><day>15</day><volume>284</volume><fpage>295</fpage><lpage>299</lpage><pub-id pub-id-type="doi">10.3233/SHTI210726</pub-id><pub-id pub-id-type="medline">34920529</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Kempen</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Post</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mannil</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Accuracy of machine learning algorithms for the classification of molecular features of gliomas on MRI: a systematic literature review and meta-analysis</article-title><source>Cancers (Basel)</source><year>2021</year><month>05</month><day>26</day><volume>13</volume><issue>11</issue><fpage>2606</fpage><pub-id pub-id-type="doi">10.3390/cancers13112606</pub-id><pub-id pub-id-type="medline">34073309</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farahani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hejazi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Moradizeyveh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Di Ieva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fatemizadeh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name></person-group><article-title>Diagnostic accuracy of deep learning models in predicting glioma molecular markers: a systematic review and meta-analysis</article-title><source>Diagnostics (Basel)</source><year>2025</year><month>03</month><day>21</day><volume>15</volume><issue>7</issue><fpage>797</fpage><pub-id pub-id-type="doi">10.3390/diagnostics15070797</pub-id><pub-id pub-id-type="medline">40218147</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Puustinen</surname><given-names>S</given-names> </name><name name-style="western"><surname>Vrz&#x00E1;kov&#x00E1;</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hyttinen</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Hyperspectral imaging in brain tumor surgery-evidence of machine learning-based performance</article-title><source>World Neurosurg</source><year>2023</year><month>07</month><volume>175</volume><fpage>e614</fpage><lpage>e635</lpage><pub-id pub-id-type="doi">10.1016/j.wneu.2023.03.149</pub-id><pub-id pub-id-type="medline">37030483</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sahu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mohan</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Current status of DNA methylation profiling in neuro-oncology as a diagnostic support tool: a review</article-title><source>Neuro oncol Pract</source><year>2023</year><month>12</month><volume>10</volume><issue>6</issue><fpage>518</fpage><lpage>526</lpage><pub-id pub-id-type="doi">10.1093/nop/npad040</pub-id><pub-id pub-id-type="medline">38009119</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>M</given-names> </name></person-group><article-title>Deep learning in precision medicine and focus on glioma</article-title><source>Bioeng Transl Med</source><year>2023</year><month>09</month><volume>8</volume><issue>5</issue><fpage>e10553</fpage><pub-id pub-id-type="doi">10.1002/btm2.10553</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>J</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Song</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy and potential covariates for machine learning to identify IDH mutations in glioma patients: evidence from a meta-analysis</article-title><source>Eur Radiol</source><year>2020</year><month>08</month><volume>30</volume><issue>8</issue><fpage>4664</fpage><lpage>4674</lpage><pub-id pub-id-type="doi">10.1007/s00330-020-06717-9</pub-id><pub-id pub-id-type="medline">32193643</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lv</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>M</given-names> </name></person-group><article-title>Insight into deep learning for glioma IDH medical image analysis: a systematic review</article-title><source>Medicine (Baltimore)</source><year>2024</year><month>02</month><day>16</day><volume>103</volume><issue>7</issue><fpage>e37150</fpage><pub-id pub-id-type="doi">10.1097/MD.0000000000037150</pub-id><pub-id pub-id-type="medline">38363910</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guha</surname><given-names>A</given-names> </name><name name-style="western"><surname>Halder</surname><given-names>S</given-names> </name><name name-style="western"><surname>Shinde</surname><given-names>SH</given-names> </name><etal/></person-group><article-title>How does deep learning/machine learning perform in comparison to radiologists in distinguishing glioblastomas (or grade IV astrocytomas) from primary CNS lymphomas? A meta-analysis and systematic review</article-title><source>Clin Radiol</source><year>2024</year><month>06</month><volume>79</volume><issue>6</issue><fpage>460</fpage><lpage>472</lpage><pub-id pub-id-type="doi">10.1016/j.crad.2024.03.007</pub-id><pub-id pub-id-type="medline">38614870</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>MDF</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thombs</surname><given-names>BD</given-names> </name><etal/></person-group><article-title>Preferred reporting items for a systematic review and meta-analysis of diagnostic test accuracy studies: the PRISMA-DTA statement</article-title><source>JAMA</source><year>2018</year><month>01</month><day>23</day><volume>319</volume><issue>4</issue><fpage>388</fpage><lpage>396</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id><pub-id pub-id-type="medline">29362800</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Westwood</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title><source>Ann Intern Med</source><year>2011</year><month>10</month><day>18</day><volume>155</volume><issue>8</issue><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id><pub-id pub-id-type="medline">22007046</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wolff</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name><name name-style="western"><surname>Riley</surname><given-names>RD</given-names> </name><etal/></person-group><article-title>Probast: a tool to assess the risk of bias and applicability of prediction model studies</article-title><source>Ann Intern Med</source><year>2019</year><month>01</month><day>1</day><volume>170</volume><issue>1</issue><fpage>51</fpage><lpage>58</lpage><pub-id pub-id-type="doi">10.7326/M18-1376</pub-id><pub-id pub-id-type="medline">30596875</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Albuquerque</surname><given-names>T</given-names> </name><name name-style="western"><surname>Fang</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Wiestler</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Multimodal context-aware detection of glioma biomarkers using MRI and WSI</article-title><year>2023</year><conf-name>Medical Image Computing and Computer Assisted Intervention &#x2013; MICCAI 2023 Workshops</conf-name><conf-date>Oct 8-12, 2023</conf-date><conf-loc>Vancouver, BC, Canada</conf-loc><fpage>157</fpage><lpage>167</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-47425-5_15</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chitnis</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dash</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Domain-specific pre-training improves confidence in whole slide image classification</article-title><year>2023</year><conf-name>2023 45th Annual International Conference of the IEEE Engineering in Medicine &#x0026; Biology Society (EMBC)</conf-name><conf-date>Jul 24-27, 2023</conf-date><pub-id pub-id-type="doi">10.1109/EMBC40787.2023.10340659</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cui</surname><given-names>D</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>G</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>L</given-names> </name></person-group><article-title>A multiple-instance learning-based convolutional neural network model to detect the IDH1 mutation in the histopathology images of glioma tissues</article-title><source>J Comput Biol</source><year>2020</year><month>08</month><volume>27</volume><issue>8</issue><fpage>1264</fpage><lpage>1272</lpage><pub-id pub-id-type="doi">10.1089/cmb.2019.0410</pub-id><pub-id pub-id-type="medline">31905004</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Despotovic</surname><given-names>V</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>SY</given-names> </name><name name-style="western"><surname>Hau</surname><given-names>AC</given-names> </name><etal/></person-group><article-title>Glioma subtype classification from histopathological images using in-domain and out-of-domain transfer learning: an experimental study</article-title><source>Heliyon</source><year>2024</year><month>03</month><day>15</day><volume>10</volume><issue>5</issue><fpage>e27515</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e27515</pub-id><pub-id pub-id-type="medline">38562501</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Fang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Deep learning predicts biomarker status and discovers related histomorphology characteristics for low-grade glioma</article-title><source>arXiv</source><comment>Preprint posted online on  Oct 11, 2023</comment><pub-id pub-id-type="doi">10.48550/arXiv.2310.07464</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Faust</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Dent</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Integrating morphologic and molecular histopathological features through whole slide image registration and deep learning</article-title><source>Neurooncol Adv</source><year>2022</year><volume>4</volume><issue>1</issue><fpage>vdac001</fpage><pub-id pub-id-type="doi">10.1093/noajnl/vdac001</pub-id><pub-id pub-id-type="medline">35156037</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hewitt</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>L&#x00F6;ffler</surname><given-names>CML</given-names> </name><name name-style="western"><surname>Muti</surname><given-names>HS</given-names> </name><etal/></person-group><article-title>Direct image to subtype prediction for brain tumors using deep learning</article-title><source>Neurooncol Adv</source><year>2023</year><volume>5</volume><issue>1</issue><fpage>vdad139</fpage><pub-id pub-id-type="doi">10.1093/noajnl/vdad139</pub-id><pub-id pub-id-type="medline">38106649</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Innani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Baheti</surname><given-names>B</given-names> </name><name name-style="western"><surname>Nasrallah</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Bakas</surname><given-names>S</given-names> </name></person-group><article-title>Weakly supervised IDH-status glioma classification from H&#x0026;E-stained whole slide images</article-title><year>2024</year><conf-name>2024 IEEE International Symposium on Biomedical Imaging (ISBI)</conf-name><conf-date>May 27-30, 2024</conf-date><pub-id pub-id-type="doi">10.1109/ISBI56570.2024.10635869</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zanazzi</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Hassanpour</surname><given-names>S</given-names> </name></person-group><article-title>Predicting prognosis and IDH mutation status for patients with lower-grade gliomas using whole slide images</article-title><source>Sci Rep</source><year>2021</year><month>08</month><day>19</day><volume>11</volume><issue>1</issue><fpage>16849</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-95948-x</pub-id><pub-id pub-id-type="medline">34413349</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jungo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Hewer</surname><given-names>E</given-names> </name></person-group><article-title>Code-free machine learning for classification of central nervous system histopathology images</article-title><source>J Neuropathol Exp Neurol</source><year>2023</year><month>02</month><day>21</day><volume>82</volume><issue>3</issue><fpage>221</fpage><lpage>230</lpage><pub-id pub-id-type="doi">10.1093/jnen/nlac131</pub-id><pub-id pub-id-type="medline">36734664</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ahn</surname><given-names>S</given-names> </name><name name-style="western"><surname>Uh</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>SH</given-names> </name></person-group><article-title>Efficient diagnosis of IDH-mutant gliomas: 1p/19qNET assesses 1p/19q codeletion status using weakly-supervised learning</article-title><source>NPJ Precis Onc</source><year>2023</year><volume>7</volume><issue>1</issue><fpage>94</fpage><pub-id pub-id-type="doi">10.1038/s41698-023-00450-4</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Krebs</surname><given-names>O</given-names> </name><name name-style="western"><surname>Agarwal</surname><given-names>S</given-names> </name><name name-style="western"><surname>Tiwari</surname><given-names>P</given-names> </name></person-group><article-title>Self-supervised deep learning to predict molecular markers from routine histopathology slides for high-grade glioma tumors</article-title><conf-name>Proceedings Volume 12471, Medical Imaging 2023: Digital and Computational Pathology</conf-name><conf-date>Feb 19-23, 2023</conf-date><pub-id pub-id-type="doi">10.1117/12.2653929</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Cong</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Vision transformer-based weakly supervised histopathological image analysis of primary brain tumors</article-title><source>iScience</source><year>2023</year><month>01</month><volume>26</volume><issue>1</issue><fpage>105872</fpage><pub-id pub-id-type="doi">10.1016/j.isci.2022.105872</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liechty</surname><given-names>B</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Machine learning can aid in prediction of IDH mutation from H&#x0026;E-stained histology slides in infiltrating gliomas</article-title><source>Sci Rep</source><year>2022</year><month>12</month><day>31</day><volume>12</volume><issue>1</issue><fpage>22623</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-26170-6</pub-id><pub-id pub-id-type="medline">36587030</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Diao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Abera</surname><given-names>DE</given-names> </name><name name-style="western"><surname>Racoceanu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Qin</surname><given-names>W</given-names> </name></person-group><article-title>Multi-scale feature fusion for prediction of IDH1 mutations in glioma histopathological images</article-title><source>Comput Methods Programs Biomed</source><year>2024</year><month>05</month><volume>248</volume><fpage>108116</fpage><pub-id pub-id-type="doi">10.1016/j.cmpb.2024.108116</pub-id><pub-id pub-id-type="medline">38518408</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Sav</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Isocitrate dehydrogenase (IDH) status prediction in histopathology images of gliomas using deep learning</article-title><source>Sci Rep</source><year>2020</year><volume>10</volume><issue>1</issue><fpage>7733</fpage><pub-id pub-id-type="doi">10.1038/s41598-020-64588-y</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nakagaki</surname><given-names>R</given-names> </name><name name-style="western"><surname>Debsarkar</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Kawanaka</surname><given-names>H</given-names> </name><name name-style="western"><surname>Aronow</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Prasath</surname><given-names>VBS</given-names> </name></person-group><article-title>Deep learning-based IDH1 gene mutation prediction using histopathological imaging and clinical data</article-title><source>Comput Biol Med</source><year>2024</year><month>09</month><volume>179</volume><fpage>108902</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.108902</pub-id><pub-id pub-id-type="medline">39038392</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pei</surname><given-names>L</given-names> </name><name name-style="western"><surname>Jones</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Shboul</surname><given-names>ZA</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Iftekharuddin</surname><given-names>KM</given-names> </name></person-group><article-title>Deep neural network analysis of pathology images with integrated molecular data for enhanced glioma classification and grading</article-title><source>Front Oncol</source><year>2021</year><volume>11</volume><fpage>668694</fpage><pub-id pub-id-type="doi">10.3389/fonc.2021.668694</pub-id><pub-id pub-id-type="medline">34277415</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rathore</surname><given-names>S</given-names> </name><name name-style="western"><surname>Iftikhar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nasrallah</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gurcan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rajpoot</surname><given-names>N</given-names> </name><name name-style="western"><surname>Mourelatos</surname><given-names>Z</given-names> </name></person-group><article-title>Tmod-35. Prediction of overall survival, and molecular markers in gliomas via analysis of digital pathology images using deep learning</article-title><source>Neuro Oncol</source><year>2019</year><month>11</month><day>11</day><volume>21</volume><issue>Supplement_6</issue><fpage>vi270</fpage><pub-id pub-id-type="doi">10.1093/neuonc/noz175.1134</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Teng</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Neuropathologist-level integrated classification of adult-type diffuse gliomas using deep learning from whole-slide pathological images</article-title><source>Nat Commun</source><year>2023</year><volume>14</volume><issue>1</issue><fpage>6359</fpage><pub-id pub-id-type="doi">10.1038/s41467-023-41195-9</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>C</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Automated machine-learning framework integrating histopathological and radiological information for predicting IDH1 mutation status in glioma</article-title><source>Front Bioinform</source><year>2021</year><volume>1</volume><fpage>718697</fpage><pub-id pub-id-type="doi">10.3389/fbinf.2021.718697</pub-id><pub-id pub-id-type="medline">36303770</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Ji</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Computational pathology for prediction of isocitrate dehydrogenase gene mutation from whole slide images in adult patients with diffuse glioma</article-title><source>Am J Pathol</source><year>2024</year><month>05</month><volume>194</volume><issue>5</issue><fpage>747</fpage><lpage>758</lpage><pub-id pub-id-type="doi">10.1016/j.ajpath.2024.01.009</pub-id><pub-id pub-id-type="medline">38325551</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farahani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hejazi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tabassum</surname><given-names>M</given-names> </name><name name-style="western"><surname>Di Ieva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mahdavifar</surname><given-names>N</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name></person-group><article-title>Diagnostic performance of deep learning for predicting glioma isocitrate dehydrogenase and 1p/19q co-deletion in MRI: a systematic review and meta-analysis</article-title><source>Eur Radiol</source><year>2025</year><month>08</month><day>16</day><pub-id pub-id-type="doi">10.1007/s00330-025-11898-2</pub-id><pub-id pub-id-type="medline">40817944</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><name name-style="western"><surname>Lei</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Gou</surname><given-names>L</given-names> </name></person-group><article-title>Diagnostic accuracy of a machine learning-based radiomics approach of MR in predicting IDH mutations in glioma patients: a systematic review and meta-analysis</article-title><source>Front Oncol</source><year>2024</year><volume>14</volume><fpage>1409760</fpage><pub-id pub-id-type="doi">10.3389/fonc.2024.1409760</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Redlich</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Feuerhake</surname><given-names>F</given-names> </name><name name-style="western"><surname>Weis</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Applications of artificial intelligence in the analysis of histopathology images of gliomas: a review</article-title><source>Npj Imaging</source><year>2024</year><month>07</month><day>1</day><volume>2</volume><issue>1</issue><fpage>16</fpage><pub-id pub-id-type="doi">10.1038/s44303-024-00020-8</pub-id><pub-id pub-id-type="medline">40603567</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nobel</surname><given-names>SMN</given-names> </name><name name-style="western"><surname>Swapno</surname><given-names>S</given-names> </name><name name-style="western"><surname>Islam</surname><given-names>MB</given-names> </name><etal/></person-group><article-title>A novel mixed convolution transformer model for the fast and accurate diagnosis of glioma subtypes</article-title><source>Adv Intell Syst</source><year>2025</year><month>05</month><volume>7</volume><issue>5</issue><fpage>2400566</fpage><pub-id pub-id-type="doi">10.1002/aisy.202400566</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Henry</surname><given-names>EU</given-names> </name><name name-style="western"><surname>Emebo</surname><given-names>O</given-names> </name><name name-style="western"><surname>Omonhinmin</surname><given-names>CA</given-names> </name></person-group><article-title>Vision transformers in medical imaging: a review</article-title><source>arXiv</source><comment>Preprint posted online on  Nov 18, 2022</comment><pub-id pub-id-type="doi">10.48550/arXiv.2211.10043</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alleman</surname><given-names>K</given-names> </name><name name-style="western"><surname>Knecht</surname><given-names>E</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lam</surname><given-names>S</given-names> </name><name name-style="western"><surname>DeCuypere</surname><given-names>M</given-names> </name></person-group><article-title>Multimodal deep learning-based prognostication in glioma patients: a systematic review</article-title><source>Cancers (Basel)</source><year>2023</year><month>01</month><day>16</day><volume>15</volume><issue>2</issue><fpage>545</fpage><pub-id pub-id-type="doi">10.3390/cancers15020545</pub-id><pub-id pub-id-type="medline">36672494</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>d&#x2019;Este</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Nielsen</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Hansen</surname><given-names>AE</given-names> </name></person-group><article-title>Visualizing glioma infiltration by the combination of multimodality imaging and artificial intelligence, a systematic review of the literature</article-title><source>Diagnostics (Basel)</source><year>2021</year><month>03</month><day>25</day><volume>11</volume><issue>4</issue><fpage>592</fpage><pub-id pub-id-type="doi">10.3390/diagnostics11040592</pub-id><pub-id pub-id-type="medline">33806195</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Byeon</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Park</surname><given-names>YW</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Interpretable multimodal transformer for prediction of molecular subtypes and grades in adult-type diffuse gliomas</article-title><source>NPJ Digit Med</source><year>2025</year><month>03</month><day>5</day><volume>8</volume><issue>1</issue><fpage>140</fpage><pub-id pub-id-type="doi">10.1038/s41746-025-01530-4</pub-id><pub-id pub-id-type="medline">40044878</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search strategy.</p><media xlink:href="jmir_v28i1e78377_app1.docx" xlink:title="DOCX File, 30 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Data extraction form.</p><media xlink:href="jmir_v28i1e78377_app2.docx" xlink:title="DOCX File, 23 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Modified version of QUADAS-2.</p><media xlink:href="jmir_v28i1e78377_app3.docx" xlink:title="DOCX File, 28 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Characteristics of each included study.</p><media xlink:href="jmir_v28i1e78377_app4.docx" xlink:title="DOCX File, 43 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Features of histopathological images in each study.</p><media xlink:href="jmir_v28i1e78377_app5.docx" xlink:title="DOCX File, 39 KB"/></supplementary-material><supplementary-material id="app6"><label>Multimedia Appendix 6</label><p>Features of artificial intelligence in each study.</p><media xlink:href="jmir_v28i1e78377_app6.docx" xlink:title="DOCX File, 44 KB"/></supplementary-material><supplementary-material id="app7"><label>Multimedia Appendix 7</label><p>Reviewers&#x2019; judgments about each domain in &#x201C;risk of bias&#x201D; and "applicability concerns" for each included study.</p><media xlink:href="jmir_v28i1e78377_app7.docx" xlink:title="DOCX File, 218 KB"/></supplementary-material><supplementary-material id="app8"><label>Checklist 1</label><p>PRISMA-DTA checklist.</p><media xlink:href="jmir_v28i1e78377_app8.doc" xlink:title="DOC File, 67 KB"/></supplementary-material></app-group></back></article>