<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e71091</article-id><article-id pub-id-type="doi">10.2196/71091</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Imaging-Based AI for Predicting Lymphovascular Space Invasion in Cervical Cancer: Systematic Review and Meta-Analysis</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>She</surname><given-names>Lizhen</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Yunfeng</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Hongyong</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Jun</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhao</surname><given-names>Yuechen</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Cui</surname><given-names>Jie</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Qiu</surname><given-names>Ling</given-names></name><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Department of Radiation Oncology, The Second Hospital of Jilin University</institution><addr-line>4026 Yatai Street</addr-line><addr-line>Changchun, Jilin Province</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Cahill</surname><given-names>Naomi</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Hassankhani</surname><given-names>Amir</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Akhlaghpasand</surname><given-names>Mohammadhosein</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ling Qiu, Department of Radiation Oncology, The Second Hospital of Jilin University, 4026 Yatai Street, Changchun, Jilin Province, 130022, China, 86 17843128126; <email>ql1433@jlu.edu.cn</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>16</day><month>6</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e71091</elocation-id><history><date date-type="received"><day>09</day><month>01</month><year>2025</year></date><date date-type="rev-recd"><day>23</day><month>04</month><year>2025</year></date><date date-type="accepted"><day>23</day><month>04</month><year>2025</year></date></history><copyright-statement>&#x00A9; Lizhen She, Yunfeng Li, Hongyong Wang, Jun Zhang, Yuechen Zhao, Jie Cui, Ling Qiu. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 16.6.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e71091"/><abstract><sec><title>Background</title><p>The role of artificial intelligence (AI) in enhancing the accuracy of lymphovascular space invasion (LVSI) detection in cervical cancer remains debated.</p></sec><sec><title>Objective</title><p>This meta-analysis aimed to evaluate the diagnostic accuracy of imaging-based AI for predicting LVSI in cervical cancer.</p></sec><sec sec-type="methods"><title>Methods</title><p>We conducted a comprehensive literature search across multiple databases, including PubMed, Embase, and Web of Science, identifying studies published up to November 9, 2024. Studies were included if they evaluated the diagnostic performance of imaging-based AI models in detecting LVSI in cervical cancer. We used a bivariate random-effects model to calculate pooled sensitivity and specificity with corresponding 95% confidence intervals. Study heterogeneity was assessed using the <italic>I</italic><sup>2</sup> statistic.</p></sec><sec sec-type="results"><title>Results</title><p>Of 403 studies identi&#xFB01;ed, 16 studies (2514 patients) were included. For the interval validation set, the pooled sensitivity, specificity, and area under the curve (AUC) for detecting LVSI were 0.84 (95% CI 0.79-0.87), 0.78 (95% CI 0.75-0.81), and 0.87 (95% CI 0.84-0.90). For the external validation set, the pooled sensitivity, specificity, and AUC for detecting LVSI were 0.79 (95% CI 0.70-0.86), 0.76 (95% CI 0.67-0.83), and 0.84 (95% CI 0.81-0.87). Using the likelihood ratio test for subgroup analysis, deep learning demonstrated significantly higher sensitivity compared to machine learning (<italic>P</italic>=.01). Moreover, AI models based on positron emission tomography/computed tomography exhibited superior sensitivity relative to those based on magnetic resonance imaging (<italic>P</italic>=.01).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Imaging-based AI, particularly deep learning algorithms, demonstrates promising diagnostic performance in predicting LVSI in cervical cancer. However, the limited external validation datasets and the retrospective nature of the research may introduce potential biases. These findings underscore AI&#x2019;s potential as an auxiliary diagnostic tool, necessitating further large-scale prospective validation.</p></sec><sec><title>Trial Registration</title><p>PROSPERO CRD42024612008; https://www.crd.york.ac.uk/PROSPERO/view/CRD42024612008</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>uterine cervical neoplasms</kwd><kwd>lymphovascular space invasion</kwd><kwd>diagnostic performance</kwd><kwd>meta-analysis</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Cervical cancer remains a predominant cause of cancer-related morbidity and mortality among women globally, with an estimated 660,000 new cases and 350,000 deaths reported in 2022 [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. The prognosis of cervical cancer is determined by a variety of influencing factors, including tumor stage, histopathological type, size, lymphovascular space invasion (LVSI), and the presence of distant metastasis [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Among these, LVSI is a critical prognostic factor, as its presence is associated with an increased risk of lymph node metastasis, disease recurrence, and poorer overall survival [<xref ref-type="bibr" rid="ref6">6</xref>]. It refers to the infiltration of cancer cells into the lymphatic system or blood vessels [<xref ref-type="bibr" rid="ref6">6</xref>]. Researchers have demonstrated that LVSI is linked to worse survival outcomes. According to Marchiol&#x00E9; et al [<xref ref-type="bibr" rid="ref7">7</xref>], the incidence of LVSI was 2-fold higher in patients with cervical cancer recurrence than in those without recurrence. Pol et al [<xref ref-type="bibr" rid="ref8">8</xref>] demonstrated that patients without LVSI exhibited significantly superior outcomes in both disease-free survival and overall survival.</p><p>The presence of LVSI significantly impacts treatment decisions in cervical cancer [<xref ref-type="bibr" rid="ref9">9</xref>]. Patients with LVSI-positive status often necessitate multimodal therapy, including radical surgery with lymphadenectomy combined with chemotherapy or radiotherapy, with the sequence determined by tumor stage and risk factors [<xref ref-type="bibr" rid="ref10">10</xref>]. Current diagnostic methods for cervical cancer include imaging techniques such as computed tomography (CT), positron emission tomography/computed tomography (PET/CT), and magnetic resonance imaging (MRI), which are used to evaluate LVSI. However, these imaging modalities have limitations in accurately detecting LVSI. Based on findings from Holtz and Dunton [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>] and Zhu et al [<xref ref-type="bibr" rid="ref11">12</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], CT demonstrated inferior soft tissue visualization and diagnostic performance in assessing cervical cancer tumor invasion compared to MRI. Specifically, PET/CT exhibits inherent constraints in soft tissue resolution, limiting its diagnostic performance for detecting minute neoplastic lesions [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. According to Park et al [<xref ref-type="bibr" rid="ref15">15</xref>], among stage IB1 cervical cancer patients with no visible tumors on MRI, 4.7% still exhibited LVSI, underscoring the limitations of MRI in detecting microscopic features. Similarly, Woo et al [<xref ref-type="bibr" rid="ref16">16</xref>] reported that MRI&#x2019;s diagnostic rate for postconization residual tumors was 32.7%, significantly lower than the actual occurrence rate of 54.5%, highlighting its restricted sensitivity in detecting residual disease. These limitations demonstrate the need for advanced diagnostic tools, such as artificial intelligence (AI), which can enhance diagnostic accuracy by detecting subtle imaging patterns and providing quantitative, reproducible analyses that surpass human interpretation.</p><p>Consequently, there is growing interest in applying image-based AI to improve the accuracy of LVSI detection. AI-based diagnostic tools have demonstrated variable performance in predicting LVSI. Some studies show high performance with area under the curve (AUC) values of 0.94 and 0.923, respectively [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. In contrast, other studies, such as Li et al [<xref ref-type="bibr" rid="ref19">19</xref>] and Wang et al [<xref ref-type="bibr" rid="ref20">20</xref>], observed considerably lower performance, with AUC values of 0.72 and 0.73, respectively. These discrepancies can be attributed to factors such as data quality, sample size, and model architecture. Low-quality datasets, such as retrospective studies or single-center studies, may introduce selection bias and limit the generalizability of models, thereby affecting the reliability of radiomics approaches in clinical practice [<xref ref-type="bibr" rid="ref21">21</xref>]. Additionally, smaller or less diverse sample sizes also restrict model generalizability [<xref ref-type="bibr" rid="ref22">22</xref>]. These discrepancies highlight the need for further research to assess the reliability and external validation of AI models in clinical practice.</p><p>Given the limitations of conventional imaging modalities and the inconsistent performance of AI tools, our systematic review and meta-analysis aim to comprehensively evaluate the diagnostic accuracy of imaging-based AI in predicting LVSI in cervical cancer, addressing critical gaps in current literature.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><p>This meta-analysis was conducted in strict accordance with the Preferred Reporting Items for Systematic Reviews and Meta-Analyses of Diagnostic Test Accuracy (PRISMA-DTA) guidelines [<xref ref-type="bibr" rid="ref23">23</xref>].</p><sec id="s2-1"><title>Search Strategy</title><p>A systematic literature search was conducted in PubMed, Embase, and Web of Science without date restrictions to identify relevant studies published up to November 9, 2024. The search strategy included 4 groups of terms: those related to AI (eg, &#x201C;artificial intelligence,&#x201D; &#x201C;machine learning,&#x201D; &#x201C;deep learning&#x201D;), cervical cancer (eg, &#x201C;uterine cervical neoplasms,&#x201D; &#x201C;cervix,&#x201D; &#x201C;cervical&#x201D;), lymph-vascular space invasion (eg, &#x201C;LVSI,&#x201D; &#x201C;lymphatic vessel invasion,&#x201D; &#x201C;lymphatic permeation&#x201D;) and imaging modalities (eg, &#x201C;MRI,&#x201D; &#x201C;magnetic resonance imaging,&#x201D; &#x201C;PET/CT,&#x201D; &#x201C;positron emission tomography-computed tomography&#x201D;). Both keywords and Medical Subject Headings terms were used, and complete strategy could be seen in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. We manually reviewed the reference lists of selected studies for additional articles and repeated the search on December 11, 2024, to ensure the inclusion of recent publications.</p></sec><sec id="s2-2"><title>Inclusion and Exclusion Criteria</title><p>The selection of studies adhered to the PICOS (Population, Intervention, Comparison, Outcome, Study Design) framework to ensure rigorous assessment and relevance.</p><list list-type="bullet"><list-item><p>Population (P): Studies involved adult patients diagnosed with cervical cancer who underwent LVSI assessment.</p></list-item><list-item><p>Intervention (I): Imaging-based AI models, including CT, MRI, ultrasound, and PET/CT, were used for LVSI evaluation.</p></list-item><list-item><p>Comparison (C): No diagnostic tool was used as a comparator.</p></list-item><list-item><p>Outcome (O): Studies providing quantitative diagnostic performance metrics, including sensitivity, specificity, and AUC, with extractable true positive (TP), true negative (TN), false positive (FP), and false negative (FN) data.</p></list-item><list-item><p>Study design (S): Only studies published in English and focused on pretreatment prediction models were included.</p></list-item></list><p>Studies were excluded if they met any of the following conditions: (1) irrelevant titles or abstracts; (2) focused solely on lymph node metastasis rather than LVSI; (3) inappropriate article types, including reviews, conference abstracts, case reports, or meta-analyses; case reports were excluded due to their limited sample size, typically involving only a single patient, which makes them unsuitable for statistical analysis or model evaluation; (4) studies with incomplete or uninterpretable data, where TP, FP, FN, and TN values for internal and external validation sets could not be extracted; and (5) animal or in vitro studies. The screening process was conducted in duplicate by 2 independent reviewers (LS and YL), who first evaluated titles and abstracts for relevance. Full-text articles were then assessed against the inclusion and exclusion criteria. Duplicate articles were identified and removed using EndNote&#x2019;s (Clarivate) duplicate detection tool, followed by manual verification. Discrepancies between reviewers during the screening process were resolved through discussion, and if consensus could not be reached, a third reviewer (HW) was consulted to make the final decision.</p></sec><sec id="s2-3"><title>Quality Assessment</title><p>To comprehensively assess the quality of included studies, we adapted a recognized tool, the revised Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2) [<xref ref-type="bibr" rid="ref24">24</xref>], by replacing certain criteria with more relevant ones from the Prediction Model Risk of Bias Assessment Tool (PROBAST) [<xref ref-type="bibr" rid="ref25">25</xref>]. The rationale for this adaptation lies in the complementary strengths of the 2 tools. QUADAS-2 is widely recognized for evaluating the risk of bias in diagnostic accuracy studies, but it does not fully address methodological considerations specific to prediction models, such as variable selection, outcome definitions, or overfitting. PROBAST, on the other hand, is specifically designed to assess risk of bias in prediction model studies. By combining elements from both tools, we tailored the modified QUADAS-2 tool to account for the unique characteristics of studies exploring image-based AI models for LVSI detection.</p><p>Our modified QUADAS-2 tool evaluates 4 domains: participants, index test (AI algorithm), reference standard, and analysis. In addition to assessing the risk of bias across each domain, we also evaluated applicability concerns in the first 3 domains. Two independent reviewers applied the modified QUADAS-2 tool to assess the risk of bias in each study, resolving any disagreements through discussion.</p></sec><sec id="s2-4"><title>Data Extraction</title><p>Two independent reviewers assessed study eligibility and performed data extraction, resolving any discrepancies through consensus, with a third reviewer serving as an adjudicator if needed. Data extraction encompassed the primary author&#x2019;s name, publication year, study design, and country of origin. Additionally, key elements such as imaging modality, reference standard, and total and LVSI+ counts of patients, lesions, or images in training, internal validation, and external validation sets were recorded. Further details included age, AI method, AI model algorithms, and performance outcomes, specifically the TP, FP, TN, and FN counts for both internal and external validation sets. Internal validation refers to the evaluation of model performance using a subset of data from the same source as the training data. External validation, in contrast, involves using an independent dataset that originates from a different source than the training set.</p></sec><sec id="s2-5"><title>Outcome Measures</title><p>The principal outcome measures used in this analysis were sensitivity, specificity, and AUC for both internal and external validation sets. Sensitivity was defined as the proportion of TP scans among the total number of positive cases (TP+FN) for patients, images, or lesions. Specificity was defined as the proportion of true negative (TN) scans among the total number of negative cases (TN+FP). The AUC, representing the area under the receiver operating characteristic curve, served as a summary measure of the model&#x2019;s diagnostic ability. We extracted AI performance data from internal validation sets and external validation sets. For studies reporting multiple machine learning or deep learning algorithms, only the best diagnostic performance model or algorithm (with the highest AUC value) was included as a representative of the study in the meta-analysis.</p></sec><sec id="s2-6"><title>Statistical Analysis</title><p>A bivariate random-effects analytical approach was used to generate pooled estimations of sensitivity and specificity outcomes for AI-based assessments of LVSI in both internal and external validation sets, accompanied by 95% CIs. We used a summary receiver operating characteristic model to generate the summary receiver operating characteristic curve and calculate the AUC. Additionally, Fagan plots were created to provide a visual representation of the clinical utility of the models.</p><p>To assess heterogeneity among studies, we calculated the <italic>I</italic>&#x00B2; statistic, interpreting values of 25%, 50%, and 75% as indicating low, moderate, and high heterogeneity, respectively. For internal validation sets, we performed subgroup analyses by the number of patients (&#x003E;150 vs &#x2264;150), region (single vs multiple centers), AI method (deep learning vs machine learning), AI algorithm (logistic regression vs support vector machine), and imaging modality (MRI vs PET/CT). Publication bias was evaluated using Deeks&#x2019; funnel plot. All statistical analyses were conducted in Stata 15.1, with significance set at <italic>P</italic>&#x003C;.05. Risk of bias for study quality was assessed using RevMan 5.4 (The Cochrane Collaboration) for comprehensive risk of bias assessment.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Study Selection</title><p>A systematic literature retrieval and analysis was methodically executed across 3 authoritative databases: PubMed, Embase, and Web of Science, yielding 568 studies. After removing 165 duplicates, 383 studies were excluded based on initial screening criteria. Subsequently, 20 full-text articles were further assessed. In total, 4 studies were excluded due to unavailable data (TP, TN, FP, and FN, n=1), non-English language (n=1), or not focusing on LVSI (n=2). Ultimately, 16 studies met all inclusion criteria and were included in the final analysis [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref37">37</xref>]. The PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flowchart detailing this selection process is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Flow chart of the screening process for included studies. TP: true positive; FP: false positive; FN: false negative; TN: true negative; LVSI: lymphovascular space invasion.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig01.png"/></fig></sec><sec id="s3-2"><title>Study Description and Quality Assessment</title><p>This meta-analysis included 16 studies, all of which included an internal validation set with a total of 1092 participants (ranging from 25 to 198 participants per study). In total, 4 studies included an external validation set [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref37">37</xref>], involving 203 participants (ranging from 26 to 102 participants). In total, 15 studies used MRI as the imaging modality, while 1 study used PET/CT [<xref ref-type="bibr" rid="ref37">37</xref>]. Machine learning techniques were applied in 15 studies, with 1 study using deep learning [<xref ref-type="bibr" rid="ref27">27</xref>]. Among the AI model algorithms, LR was the most common AI model algorithm, used in 9 studies [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>-<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>], followed by support vector machine (SVM) in 5 studies [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref36">36</xref>], and decision tree and convolutional neural network algorithms, each used in 1 study [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. The gold standard for diagnosis was all pathological examination. All studies were retrospective and conducted in China. Study and patient characteristics, as well as technical features, are summarized in <xref ref-type="table" rid="table1">Tables 1</xref><xref ref-type="table" rid="table2"/>-<xref ref-type="table" rid="table3">3</xref>.</p><p>Quality assessment was performed using the QUADAS-2 revised tool, as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref> [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref37">37</xref>]. For patient selection, one study was classified as &#x201C;high risk&#x201D; due to inappropriate exclusion criteria that may omit specific populations, such as those with a history of radiotherapy or chemotherapy [<xref ref-type="bibr" rid="ref29">29</xref>]. One study [<xref ref-type="bibr" rid="ref19">19</xref>] was rated as &#x201C;unclear&#x201D; because it was uncertain whether the patients were included consecutively. Regarding the index test in risk of bias, one study was rated as &#x201C;high risk&#x201D; for inadequate reporting of model training and evaluation processes [<xref ref-type="bibr" rid="ref32">32</xref>]. Overall, the quality of the included studies was deemed acceptable.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Study and patient characteristics of the included studies.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Author</td><td align="left" valign="bottom">Year</td><td align="left" valign="bottom">Country</td><td align="left" valign="bottom">Study design</td><td align="left" valign="bottom">Imaging modality</td><td align="left" valign="bottom">Reference standard</td></tr></thead><tbody><tr><td align="left" valign="top">Yu et al [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">MRI<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref9">9</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Jiang et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Hua et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">2020</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Shi et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Huang et al [<xref ref-type="bibr" rid="ref17">17</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Xiao et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Cui et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Ma et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">PET/CT<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Du et al [<xref ref-type="bibr" rid="ref18">18</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">China</td><td align="left" valign="top">Retro</td><td align="left" valign="top">MRI</td><td align="left" valign="top">Pathology</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Retro: retrospective.</p></fn><fn id="table1fn2"><p><sup>b</sup>MRI: magnetic resonance imaging.</p></fn><fn id="table1fn3"><p><sup>c</sup>PET/CT: positron emission tomography-computed tomography.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Patient characteristics of included studies.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2">Author</td><td align="left" valign="bottom" rowspan="2">Year</td><td align="left" valign="bottom" colspan="3">Patients, lesions, or images per set</td><td align="left" valign="bottom" rowspan="2">Age</td><td align="left" valign="bottom" rowspan="2">Number of LVSI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>+ patients, lesions, or images</td></tr><tr><td align="left" valign="bottom">Training</td><td align="left" valign="bottom">Internal validation</td><td align="left" valign="bottom">External validation</td></tr></thead><tbody><tr><td align="left" valign="top">Yu et al [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">120</td><td align="left" valign="top">60</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: median 53 (range 47-57)</p></list-item><list-item><p>Internal validation: median 50 (range 46-54)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 46</p></list-item><list-item><p>Internal validation: 27</p></list-item></list></td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">70</td><td align="left" valign="top">35</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 49.34 (SD 8.55); non-LVSI: mean 46.54 (SD 9.66)</p></list-item><list-item><p>Internal validation: LVSI+: mean 54.54 (SD 10.42); non-LVSI: mean 48.59 (SD 10.69)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 29</p></list-item><list-item><p>Internal validation: 13</p></list-item></list></td></tr><tr><td align="left" valign="top">Jiang et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">2056</td><td align="left" valign="top">2056</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Median 50.42 (range 27-70)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 862</p></list-item><list-item><p>Internal validation: 862</p></list-item></list></td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">56</td><td align="left" valign="top">56</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Median 50 (range 29&#x2010;67)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 31</p></list-item><list-item><p>Internal validation: 31</p></list-item></list></td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">129</td><td align="left" valign="top">129</td><td align="left" valign="top">39</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 48.07 (SD 10.16); non-LVSI: mean 53.18 (SD 9.47)</p></list-item><list-item><p>Internal validation: LVSI+: mean 48.07 (SD 10.16); non-LVSI: mean 53.18 (SD 9.47)</p></list-item><list-item><p>External validation: LVSI+: mean 52.08 (SD 12.24); non-LVSI: mean 52.81 (SD 9.46)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 46</p></list-item><list-item><p>Internal validation: 46</p></list-item><list-item><p>External validation: 12</p></list-item></list></td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">198</td><td align="left" valign="top">198</td><td align="left" valign="top">102</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 51.35 (SD 10.49); non-LVSI: mean 51.63 (SD 10.84)</p></list-item><list-item><p>Internal validation: LVSI+: mean 51.35 (SD 10.49); non-LVSI: mean 51.63 (SD 10.84)</p></list-item><list-item><p>External validation: LVSI+: mean 49.06 (SD 10.52); non-LVSI: mean 51.81 (SD 8.73)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 104</p></list-item><list-item><p>Internal validation: 104</p></list-item><list-item><p>External validation: 54</p></list-item></list></td></tr><tr><td align="left" valign="top">Hua et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">2020</td><td align="left" valign="top">111</td><td align="left" valign="top">56</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 49.90 (SD 10.06); non-LVSI: mean 51.34 (SD 8.73)</p></list-item><list-item><p>Internal validation: LVSI+: mean 50.57 (SD 7.57); non-LVSI: mean 52.67 (SD 10.04)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Internal validation: 23</p></list-item><list-item><p>Training: 44</p></list-item></list></td></tr><tr><td align="left" valign="top">Shi et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">160</td><td align="left" valign="top">44</td><td align="left" valign="top">36</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 49.97 (SD 9.78); non-LVSI: mean 52.68 (SD 8.72)</p></list-item><list-item><p>Internal validation: LVSI+: mean 54.62 (SD 9.07); non-LVSI: mean 49.53 (SD 9.85)</p></list-item><list-item><p>External validation: LVSI+: mean 50.21 (SD 12.67); non-LVSI: mean 55.36 (SD 10.36)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 65</p></list-item><list-item><p>Internal validation: 16</p></list-item><list-item><p>External validation: 14</p></list-item></list></td></tr><tr><td align="left" valign="top">Huang et al [<xref ref-type="bibr" rid="ref17">17</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">100</td><td align="left" valign="top">25</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Total: mean 47.94 (SD 9.01);</p></list-item><list-item><p>LVSI+: mean 47.23 (SD 8.23); non-LVSI: mean 48.20 (SD 9.31)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 29</p></list-item><list-item><p>Internal validation: 5</p></list-item></list></td></tr><tr><td align="left" valign="top">Xiao et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">154</td><td align="left" valign="top">79</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: mean 50.0 (SD 9.3)</p></list-item><list-item><p>Internal validation: mean 49.9 (SD 10.8);</p></list-item><list-item><p>LVSI+: mean 49.5 (SD 10.1); non-LVSI: mean 50.9 (SD 9.2)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 106</p></list-item><list-item><p>Internal validation: 45</p></list-item></list></td></tr><tr><td align="left" valign="top">Cui et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">108</td><td align="left" valign="top">55</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: mean 49.42 (SD 9.16); non-LVSI: mean 51.11 (SD 9.43)</p></list-item><list-item><p>Internal validation: LVSI+: mean 52.23 (SD 9.75); non-LVSI: mean 52.94 (SD 9.03)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 43</p></list-item><list-item><p>Internal validation: 22</p></list-item></list></td></tr><tr><td align="left" valign="top">Ma et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">86</td><td align="left" valign="top">38</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: median 52 (range 47.2&#x2010;57.5);</p></list-item><list-item><p>Non-LVSI: median 57.5 (range 51.2&#x2010;64.8)</p></list-item><list-item><p>Internal validation: LVSI+: mean 53.2 (SD 6.8); non-LVSI: mean 53.4 (SD 12.7)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 24</p></list-item><list-item><p>Internal validation: 8</p></list-item></list></td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">61</td><td align="left" valign="top">40</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>LVSI+: mean 53.25 (SD 9.72);</p></list-item><list-item><p>Non-LVSI: mean 53.57 (SD 10.36)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 34</p></list-item><list-item><p>Internal validation: 14</p></list-item></list></td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">61</td><td align="left" valign="top">25</td><td align="left" valign="top">26</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: median 50 (range 33-74)</p></list-item><list-item><p>Internal validation: median 51 (range 40-58)</p></list-item><list-item><p>External validation: median 52 (range 40-74)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 30</p></list-item><list-item><p>Internal validation: 12</p></list-item><list-item><p>External validation: 15</p></list-item></list></td></tr><tr><td align="left" valign="top">Du et al [<xref ref-type="bibr" rid="ref18">18</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">104</td><td align="left" valign="top">45</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: LVSI+: median 45 (range 37-53); non-LVSI: median 48 (range 40-56)</p></list-item><list-item><p>Internal validation: LVSI+: median 44 (range 35-53); non-LVSI: median 46 (range 42-50)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: 45</p></list-item><list-item><p>Internal validation: 22</p></list-item></list></td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">80</td><td align="left" valign="top">40</td><td align="left" valign="top">NR</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Training: median 49.20 (range 29-67)</p></list-item><list-item><p>Internal validation: mean 50.45 (SD 32-75)</p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Internal validation: 7</p></list-item><list-item><p>Training: 25</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>LVSI: lymphovascular space invasion.</p></fn><fn id="table2fn2"><p><sup>b</sup>NR not reported.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Technical aspects of included studies.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2">Author</td><td align="left" valign="bottom" rowspan="2">Year</td><td align="left" valign="bottom" rowspan="2">AI<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> method</td><td align="left" valign="bottom" rowspan="2">AI model algorithms</td><td align="left" valign="bottom" colspan="4">Interval validation sets</td><td align="left" valign="bottom" colspan="4">External validation sets</td></tr><tr><td align="left" valign="bottom">TP<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="bottom">FP<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup></td><td align="left" valign="bottom">FN<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup></td><td align="left" valign="bottom">TN<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup></td><td align="left" valign="bottom">TP</td><td align="left" valign="bottom">FP</td><td align="left" valign="bottom">FN</td><td align="left" valign="bottom">TN</td></tr></thead><tbody><tr><td align="left" valign="top">Yu et al [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">DT<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup></td><td align="left" valign="top">22</td><td align="left" valign="top">9</td><td align="left" valign="top">5</td><td align="left" valign="top">24</td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref9">9</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup></td><td align="left" valign="top">9</td><td align="left" valign="top">5</td><td align="left" valign="top">4</td><td align="left" valign="top">17</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Jiang et al [<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">Deep learning</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table3fn9">i</xref></sup></td><td align="left" valign="top">759</td><td align="left" valign="top">296</td><td align="left" valign="top">103</td><td align="left" valign="top">898</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">27</td><td align="left" valign="top">7</td><td align="left" valign="top">4</td><td align="left" valign="top">18</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref29">29</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">34</td><td align="left" valign="top">7</td><td align="left" valign="top">12</td><td align="left" valign="top">76</td><td align="left" valign="top">10</td><td align="left" valign="top">5</td><td align="left" valign="top">2</td><td align="left" valign="top">22</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref30">30</xref>]</td><td align="left" valign="top">2023</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">SVM<sup><xref ref-type="table-fn" rid="table3fn10">j</xref></sup></td><td align="left" valign="top">81</td><td align="left" valign="top">16</td><td align="left" valign="top">23</td><td align="left" valign="top">78</td><td align="left" valign="top">40</td><td align="left" valign="top">12</td><td align="left" valign="top">14</td><td align="left" valign="top">36</td></tr><tr><td align="left" valign="top">Hua et al [<xref ref-type="bibr" rid="ref31">31</xref>]</td><td align="left" valign="top">2020</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">SVM</td><td align="left" valign="top">17</td><td align="left" valign="top">11</td><td align="left" valign="top">6</td><td align="left" valign="top">22</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Shi et al [<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">14</td><td align="left" valign="top">8</td><td align="left" valign="top">2</td><td align="left" valign="top">20</td><td align="left" valign="top">13</td><td align="left" valign="top">7</td><td align="left" valign="top">1</td><td align="left" valign="top">15</td></tr><tr><td align="left" valign="top">Huang et al [<xref ref-type="bibr" rid="ref17">17</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">5</td><td align="left" valign="top">6</td><td align="left" valign="top">0</td><td align="left" valign="top">14</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Xiao et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">36</td><td align="left" valign="top">8</td><td align="left" valign="top">9</td><td align="left" valign="top">26</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Cui et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">2022</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">17</td><td align="left" valign="top">9</td><td align="left" valign="top">5</td><td align="left" valign="top">24</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Ma et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">6</td><td align="left" valign="top">7</td><td align="left" valign="top">2</td><td align="left" valign="top">23</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">2024</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">SVM</td><td align="left" valign="top">13</td><td align="left" valign="top">9</td><td align="left" valign="top">1</td><td align="left" valign="top">17</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">LR</td><td align="left" valign="top">8</td><td align="left" valign="top">0</td><td align="left" valign="top">4</td><td align="left" valign="top">13</td><td align="left" valign="top">12</td><td align="left" valign="top">2</td><td align="left" valign="top">3</td><td align="left" valign="top">9</td></tr><tr><td align="left" valign="top">Du et al [<xref ref-type="bibr" rid="ref18">18</xref>]</td><td align="left" valign="top">2021</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">SVM</td><td align="left" valign="top">18</td><td align="left" valign="top">3</td><td align="left" valign="top">4</td><td align="left" valign="top">20</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">2019</td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">SVM</td><td align="left" valign="top">6</td><td align="left" valign="top">12</td><td align="left" valign="top">1</td><td align="left" valign="top">21</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table3fn2"><p><sup>b</sup>TP: true positive.</p></fn><fn id="table3fn3"><p><sup>c</sup>FP: false positive.</p></fn><fn id="table3fn4"><p><sup>d</sup>FN: false negative.</p></fn><fn id="table3fn5"><p><sup>e</sup>TN: true negative.</p></fn><fn id="table3fn6"><p><sup>f</sup>DT: decision tree.</p></fn><fn id="table3fn7"><p><sup>g</sup>NR: not reported.</p></fn><fn id="table3fn8"><p><sup>h</sup>LR: logistic regression.</p></fn><fn id="table3fn9"><p><sup>i</sup>CNN: convolutional neural network.</p></fn><fn id="table3fn10"><p><sup>j</sup>SVM: support vector machine.</p></fn></table-wrap-foot></table-wrap><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Risk of bias and applicability assessment of included studies using the revised Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2) tool [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref37">37</xref>].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig02.png"/></fig></sec><sec id="s3-3"><title>Diagnostic Performance of Internal Validation Sets and External Validation Sets for Artificial Intelligence in Predicting Lymph Vascular Space Invasion of Cervical Cancer</title><p>For internal validation sets, the sensitivity in detecting LVSI of cervical cancer was 0.84 (95% CI 0.79&#x2010;0.87), and the specificity was 0.79 (95% CI 0.75&#x2010;0.82; <xref ref-type="fig" rid="figure3">Figure 3</xref> [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref37">37</xref>]), with an AUC of 0.88 (95% CI 0.84&#x2010;0.90; <xref ref-type="fig" rid="figure4">Figure 4A</xref>). Using a pretest probability of 20%, the Fagan nomogram demonstrates a positive likelihood ratio of 49% and a negative likelihood ratio of 5% (<xref ref-type="fig" rid="figure5">Figure 5A</xref>).</p><p>For external validation sets, the sensitivity in detecting LVSI was 0.79 (95% CI 0.70&#x2010;0.86), and the specificity was 0.76 (95% CI 0.67&#x2010;0.83; <xref ref-type="fig" rid="figure6">Figure 6</xref> [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]), with an AUC of 0.84 (95% CI 0.81&#x2010;0.87; <xref ref-type="fig" rid="figure4">Figure 4B</xref>). Using a pretest probability of 20%, the Fagan nomogram demonstrates a positive likelihood ratio of 45% and a negative likelihood ratio of 6% (<xref ref-type="fig" rid="figure5">Figure 5B</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Forest plot of sensitivity and specificity for artificial intelligence&#x2013;based lymphovascular space invasion diagnosis in cervical cancer: internal validation set [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref37">37</xref>].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig03.png"/></fig><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Receiver operating characteristic (ROC) curves for artificial intelligence&#x2013;based lymphovascular space invasion prediction in cervical cancer: (<bold>A</bold>) internal validation set and (<bold>B</bold>) external validation set. AUC: area under the curve; SENS: sensitivity; SPEC: specificity; SROC: summary receiver operating characteristic.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Fagan&#x2019;s nomograms for artificial intelligence&#x2013;based lymphovascular space invasion diagnostic performance in cervical cancer: (<bold>A</bold>) internal validation set and (<bold>B</bold>) external validation set. LR: logistic regression.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig05.png"/></fig><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Forest plot of external validation set for sensitivity and specificity of artificial intelligence in diagnosing lymphovascular space invasion in cervical cancer [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref37">37</xref>].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig06.png"/></fig></sec><sec id="s3-4"><title>Subgroup Analysis of Internal Validation Sets of Image-Based AI for Lymphovascular Space Invasion in Cervical Cancer</title><p>In the subgroup analysis, we evaluated diagnostic performance across the number of internal validation patients, region, AI methods, AI algorithms, imaging modalities, and data source (<xref ref-type="table" rid="table4">Table 4</xref>). Number of internal validation patients (&#x003E;150 vs &#x2264;150) showed no statistically significant differences in sensitivity (<italic>P</italic>=.35) and specificity (<italic>P</italic>=.08). Single-center studies demonstrated significantly different specificities compared to multicenter studies (<italic>P</italic>&#x003C;.001). Deep learning (1 study) exhibited higher sensitivity compared to machine learning (15 studies, <italic>P</italic>=.01), with no significant differences in specificity (<italic>P</italic>=.29). Among AI algorithms, LR (9 studies) and SVM (5 studies) showed comparable diagnostic performance, with no significant differences in sensitivity (<italic>P</italic>=.77) or specificity (<italic>P</italic>=.36). Imaging modality analysis revealed a significant difference in sensitivity between MRI (15 studies) and PET/CT (1 study, <italic>P</italic>=.01), while specificity remained consistent (<italic>P</italic>=.29). The radiomic model showed a higher sensitivity, and the radiomic and clinical model showed higher specificity; the differences were statistically significant (<italic>P</italic>=.05 for both sensitivity and specificity comparisons).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Subgroup analysis of imaging-based artificial intelligence performance in internal validation cohorts for lymphovascular space invasion detection in cervical cancer.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Subgroup</td><td align="left" valign="bottom">Studies, n</td><td align="left" valign="bottom">Sensitivity (95% CI)</td><td align="left" valign="bottom">Subgroup difference <italic>P</italic> value</td><td align="left" valign="bottom">Specificity (95% CI)</td><td align="left" valign="bottom">Subgroup difference <italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Number of internal validation patients</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.35</td><td align="left" valign="top"/><td align="left" valign="top">.08</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x003E;150</td><td align="left" valign="top">8</td><td align="left" valign="top">0.81 (0.75&#x2010;0.86)</td><td align="left" valign="top"/><td align="left" valign="top">0.80 (0.75&#x2010;0.84)</td><td align="left" valign="top"/></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>&#x2264;150</td><td align="left" valign="top">8</td><td align="left" valign="top">0.84 (0.78&#x2010;0.88)</td><td align="left" valign="top"/><td align="left" valign="top">0.73 (0.65&#x2010;0.79)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Region</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.09</td><td align="left" valign="top"/><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Single center</td><td align="left" valign="top">12</td><td align="left" valign="top">0.84 (0.79&#x2010;0.89)</td><td align="left" valign="top"/><td align="left" valign="top">0.74 (0.70&#x2010;0.78)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multiple centers</td><td align="left" valign="top">4</td><td align="left" valign="top">0.77 (0.69&#x2010;0.84)</td><td align="left" valign="top"/><td align="left" valign="top">0.85 (0.80&#x2010;0.89)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">AI<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup> method</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.01</td><td align="left" valign="top"/><td align="left" valign="top">.29</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Deep learning</td><td align="left" valign="top">1</td><td align="left" valign="top">0.88 (0.86&#x2010;0.90)</td><td align="left" valign="top"/><td align="left" valign="top">0.75 (0.73&#x2010;0.78)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Machine learning</td><td align="left" valign="top">15</td><td align="left" valign="top">0.79 (0.75&#x2010;0.83)</td><td align="left" valign="top"/><td align="left" valign="top">0.78 (0.74&#x2010;0.81)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">AI algorithms</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.77</td><td align="left" valign="top"/><td align="left" valign="top">.36</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>LR<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">9</td><td align="left" valign="top">0.79 (0.72&#x2010;0.84)</td><td align="left" valign="top"/><td align="left" valign="top">0.79 (0.73&#x2010;0.84)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>SVM<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup></td><td align="left" valign="top">5</td><td align="left" valign="top">0.80 (0.73&#x2010;0.86)</td><td align="left" valign="top"/><td align="left" valign="top">0.75 (0.66&#x2010;0.81)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Imaging-based</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.01</td><td align="left" valign="top"/><td align="left" valign="top">.29</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>PET/CT<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup></td><td align="left" valign="top">1</td><td align="left" valign="top">0.88 (0.86&#x2010;0.90)</td><td align="left" valign="top"/><td align="left" valign="top">0.75 (0.73&#x2010;0.78)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MRI<sup><xref ref-type="table-fn" rid="table4fn5">e</xref></sup></td><td align="left" valign="top">15</td><td align="left" valign="top">0.79 (0.75&#x2010;0.83)</td><td align="left" valign="top"/><td align="left" valign="top">0.78 (0.74&#x2010;0.81)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Data source</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">.05</td><td align="left" valign="top"/><td align="left" valign="top">.05</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Radiomic</td><td align="left" valign="top">7</td><td align="left" valign="top">0.86 (0.81&#x2010;0.90&#xFF09;</td><td align="left" valign="top"/><td align="left" valign="top">0.75 (0.72&#x2010;0.78&#xFF09;</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Radiomic and clinical</td><td align="left" valign="top">9</td><td align="left" valign="top">0.78 (0.71&#x2010;0.83&#xFF09;</td><td align="left" valign="top"/><td align="left" valign="top">0.81 (0.76&#x2010;0.85&#xFF09;</td><td align="left" valign="top"/></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table4fn2"><p><sup>b</sup>LR: logistic regression.</p></fn><fn id="table4fn3"><p><sup>c</sup>SVM: support vector machine.</p></fn><fn id="table4fn4"><p><sup>d</sup>PET/CT: positron emission tomography-computed tomography.</p></fn><fn id="table4fn5"><p><sup>e</sup>MRI: magnetic resonance imaging.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-5"><title>Publication Bias</title><p>Deeks&#x2019; funnel plot asymmetry test showed that there is no significant publication bias both for the internal validation sets (<italic>P</italic>=.07; <xref ref-type="fig" rid="figure7">Figure 7A</xref>) and external validation sets (<italic>P</italic>=.09; <xref ref-type="fig" rid="figure7">Figure 7B</xref>) for AI.</p><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Deeks&#x2019; funnel plots for publication bias assessment in artificial intelligence&#x2013;based lymphovascular space invasion prediction: (<bold>A</bold>) internal validation set and (<bold>B</bold>) external validation set.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e71091_fig07.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>Based on our comprehensive literature review, this is the first meta-analysis comprehensively evaluating imaging-based AI for detecting LVSI in cervical cancer. The pooled sensitivity, specificity, and AUC in our analysis for interval validation were 0.84, 0.79, and 0.88, respectively, while for external validation, these metrics were slightly lower at 0.79, 0.76, and 0.84. The slightly lower performance in external validation compared to internal validation is expected when applying AI models to independent datasets. This decline may be due to differences in patient populations, imaging protocols, data quality, and clinical practices across institutions. Despite this, the proposed AI models still show reasonable generalizability. They demonstrate potential for robust performance in diverse datasets and clinical settings.</p><p>Deep learning showed superior sensitivity (0.88) compared to machine learning (0.79), likely due to its advanced capabilities in processing complex imaging data. Deep learning models, particularly convolutional neural networks, excel at autonomously extracting hierarchical features from imaging datasets, revealing subtle patterns that manual feature extraction often misses [<xref ref-type="bibr" rid="ref27">27</xref>]. And by integrating diverse imaging variables like tissue texture, morphological characteristics, and intensity variations, deep learning enables more precise LVSI-positive case differentiation [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. However, given the limited number of deep learning studies, the observed higher sensitivity may be insufficiently generalizable. Larger-scale, multicenter studies are essential to validate the potential superiority of deep learning approaches in LVSI detection. Additionally, deep learning is challenged by high overfitting risk and significant computational demands, which can hinder its practical application in clinical settings [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref40">40</xref>].</p></sec><sec id="s4-2"><title>Comparison to Prior Work</title><p>Comparative imaging analysis of LVSI detection revealed differential sensitivity across modalities. MRI-based AI demonstrated a pooled sensitivity of 0.79, whereas PET/CT-based AI reported a marginally superior sensitivity of 0.88. The potential enhanced diagnostic performance of PET/CT can be attributed to its sophisticated capacity for integrating functional and anatomical data, thereby facilitating more nuanced metabolic and structural characterization [<xref ref-type="bibr" rid="ref41">41</xref>]. However, it is important to note that only 1 study used PET/CT in this analysis, and the limited sample size may have influenced the reported sensitivity. This warrants cautious interpretation of the results and highlights the need for further studies with larger PET/CT-based datasets to confirm these findings.</p><p>In 2024, Zhang et al [<xref ref-type="bibr" rid="ref22">22</xref>] conducted a meta-analysis specifically examining MRI-based radiomics models for predicting LVSI in cervical cancer. Their findings reported a sensitivity of 0.79, specificity of 0.73, and an AUC of 0.83. In contrast, our study achieved superior results in the internal validation set, with a sensitivity of 0.84, specificity of 0.79, and an AUC of 0.88. The improved performance in our research could be attributed to the incorporation of more recent studies and a larger sample size, which likely enhanced the accuracy and robustness of the model. Similarly, a recent meta-analysis by Zhao et al [<xref ref-type="bibr" rid="ref21">21</xref>] reported comparable diagnostic performance metrics, with a sensitivity of 0.83, specificity of 0.74, and an AUC of 0.86. Unlike their studies focusing only on MRI, our meta-analysis integrated multiple imaging modalities, including MRI and PET/CT, thus offering a more comprehensive evaluation of AI-based diagnostic approaches. Moreover, we pioneered the approach of stratifying the analysis into internal and external validation cohorts, enabling a more rigorous evaluation of the models&#x2019; diagnostic performance and generalizability. The slight decline in external validation cohorts indicates acceptable generalizability and provides clinicians with realistic expectations regarding model performance in real-world clinical scenarios.</p></sec><sec id="s4-3"><title>Heterogeneity</title><p>Our meta-analysis demonstrated no significant heterogeneity in both internal and external validation datasets. However, the Deeks&#x2019; funnel plot revealed a borderline <italic>P</italic> value of approximately 0.07, suggesting the potential presence of publication bias. Therefore, we used a bivariate random-effects model for sensitivity and specificity pooling, acknowledging inherent clinical heterogeneity and potential bias. Subgroup analyses were strategically conducted based on the number of patients, region, AI method, AI algorithms, imaging modalities, and data source. The results indicated that both the region and the AI methods significantly influenced the pooled outcomes. However, several additional factors may also contribute to clinical heterogeneity, including variations in AI algorithm architectures, hyperparameter optimization strategies, image acquisition protocols, preprocessing techniques, feature selection methods, and discrepancies in LVSI assessment criteria among pathologists. As reported in Huang et al [<xref ref-type="bibr" rid="ref17">17</xref>], variations in image acquisition protocols, such as differences in imaging resolution or contrast enhancement methods, may impact the diagnostic performance. Similarly, heterogeneity arising from AI algorithm design, such as the use of different convolutional neural network algorithms, has been shown to influence overall results [<xref ref-type="bibr" rid="ref27">27</xref>].</p></sec><sec id="s4-4"><title>Future Directions</title><p>Despite these challenges, our findings revealed promising diagnostic performance of AI-based approaches for LVSI detection in cervical cancer, suggesting their potential to streamline clinical workflows and enhance diagnostic accuracy. The implementation of AI-driven approaches in cervical cancer surveillance has increased over the years, yielding encouraging therapeutic prospects [<xref ref-type="bibr" rid="ref42">42</xref>]. The implementation of imaging-based AI tools could particularly benefit primary health care systems, especially in resource-limited settings where specialist expertise may be scarce. However, challenges include limited computing infrastructure, data storage constraints, and the need for tailored training programs [<xref ref-type="bibr" rid="ref43">43</xref>]. Additionally, AI systems could optimize screening efficiency and support treatment planning decisions [<xref ref-type="bibr" rid="ref44">44</xref>]. There is the lack of direct comparisons between the performance of AI models and that of radiologists. Such comparisons are critical to understanding the relative strengths and weaknesses of AI in clinical practice. Future studies should aim to investigate head-to-head comparisons between AI algorithms and radiologists.</p></sec><sec id="s4-5"><title>Limitations</title><p>There are several limitations in our meta-analysis. First, external validation was performed in only 4 of 16 studies, raising concerns about model generalizability. The limited external validation increases the risk of overfitting, where models may perform exceptionally well on training datasets but demonstrate reduced performance on independent datasets. Second, the inherently retrospective design of the included studies may introduce selection biases and limitations in data collection. Second, all studies were conducted in China, which raises concerns about the generalizability of our findings to other populations and health care settings. These factors underscore the critical need for future prospective investigations that not only address the biases associated with retrospective designs but also validate and strengthen the robustness of our findings across diverse demographics and clinical environments. Third, the absence of direct comparative analyses between AI and radiologist interpretations represents a significant research gap; further comparisons are needed.</p><p>In conclusion, imaging-based AI, particularly deep learning algorithms, demonstrates promising diagnostic performance in predicting LVSI in cervical cancer. However, the limited external validation datasets and the retrospective nature of the research may introduce potential biases. These findings underscore AI&#x2019;s potential as an auxiliary diagnostic tool, necessitating further large-scale prospective validation.</p></sec></sec></body><back><ack><p>During the preparation of this work, we used the generative artificial intelligence tool Sider in order to improve readability and language quality. After using this tool, the authors reviewed and edited the content as needed and take full responsibility for the content of the publication.</p></ack><notes><sec><title>Data Availability</title><p>All primary research findings are presented within this manuscript. For additional information or clarification, please contact the corresponding investigators.</p></sec></notes><fn-group><fn fn-type="con"><p>LS and LQ established the investigative framework and research methodology. LS, YL, HW, JZ, YZ, and JC extracted and analyzed the data, while LS composing the initial manuscript draft. All contributing authors participated in the manuscript preparation and approved for the final version before submission.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb3">CT</term><def><p>computed tomography</p></def></def-item><def-item><term id="abb4">FN</term><def><p>false negative</p></def></def-item><def-item><term id="abb5">FP</term><def><p>false positive</p></def></def-item><def-item><term id="abb6">LVSI</term><def><p>lymphovascular space invasion</p></def></def-item><def-item><term id="abb7">MRI</term><def><p>magnetic resonance imaging</p></def></def-item><def-item><term id="abb8">PET/CT</term><def><p>positron emission tomography/computed tomography</p></def></def-item><def-item><term id="abb9">PICOS</term><def><p>Population, Intervention, Comparison, Outcome, Study Design</p></def></def-item><def-item><term id="abb10">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb11">PRISMA-DTA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses of Diagnostic Test Accuracy</p></def></def-item><def-item><term id="abb12">PROBAST</term><def><p>Prediction Model Risk of Bias Assessment Tool</p></def></def-item><def-item><term id="abb13">QUADAS-2</term><def><p>Quality Assessment of Diagnostic Accuracy Studies-2</p></def></def-item><def-item><term id="abb14">SVM</term><def><p>support vector machine</p></def></def-item><def-item><term id="abb15">TN</term><def><p>true negative</p></def></def-item><def-item><term id="abb16">TP</term><def><p>true positive</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>D</given-names> </name><name name-style="western"><surname>Vignat</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lorenzoni</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Global estimates of incidence and mortality of cervical cancer in 2020: a baseline analysis of the WHO Global Cervical Cancer Elimination Initiative</article-title><source>Lancet Glob Health</source><year>2023</year><month>02</month><volume>11</volume><issue>2</issue><fpage>e197</fpage><lpage>e206</lpage><pub-id pub-id-type="doi">10.1016/S2214-109X(22)00501-0</pub-id><pub-id pub-id-type="medline">36528031</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bray</surname><given-names>F</given-names> </name><name name-style="western"><surname>Laversanne</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sung</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Global cancer statistics 2022: GLOBOCAN estimates of incidence and mortality worldwide for 36 cancers in 185 countries</article-title><source>CA Cancer J Clin</source><year>2024</year><month>05</month><volume>74</volume><issue>3</issue><fpage>229</fpage><lpage>263</lpage><pub-id pub-id-type="doi">10.3322/caac.21834</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meng</surname><given-names>X</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name></person-group><article-title>Conditional survival analysis and real-time prognosis prediction for cervical cancer patients below the age of 65 years</article-title><source>Front Oncol</source><year>2022</year><volume>12</volume><fpage>1049531</fpage><pub-id pub-id-type="doi">10.3389/fonc.2022.1049531</pub-id><pub-id pub-id-type="medline">36698403</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xie</surname><given-names>L</given-names> </name><name name-style="western"><surname>Chu</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Prognostic assessment of cervical cancer patients by clinical staging and surgical-pathological factor: a support vector machine-based approach</article-title><source>Front Oncol</source><year>2020</year><volume>10</volume><fpage>1353</fpage><pub-id pub-id-type="doi">10.3389/fonc.2020.01353</pub-id><pub-id pub-id-type="medline">32850433</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>L</given-names> </name></person-group><article-title>Prognostic value of lymphovascular space invasion in stage IA to IIB cervical cancer: a meta-analysis</article-title><source>Medicine (Baltimore)</source><year>2023</year><month>04</month><day>14</day><volume>102</volume><issue>15</issue><fpage>e33547</fpage><pub-id pub-id-type="doi">10.1097/MD.0000000000033547</pub-id><pub-id pub-id-type="medline">37058045</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weyl</surname><given-names>A</given-names> </name><name name-style="western"><surname>Illac</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lusque</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Prognostic value of lymphovascular space invasion in early-stage cervical cancer</article-title><source>Int J Gynecol Cancer</source><year>2020</year><month>10</month><volume>30</volume><issue>10</issue><fpage>1493</fpage><lpage>1499</lpage><pub-id pub-id-type="doi">10.1136/ijgc-2020-001274</pub-id><pub-id pub-id-type="medline">32565486</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Marchiol&#x00E9;</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bu&#x00E9;nerd</surname><given-names>A</given-names> </name><name name-style="western"><surname>Benchaib</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nezhat</surname><given-names>K</given-names> </name><name name-style="western"><surname>Dargent</surname><given-names>D</given-names> </name><name name-style="western"><surname>Mathevet</surname><given-names>P</given-names> </name></person-group><article-title>Clinical significance of lympho vascular space involvement and lymph node micrometastases in early-stage cervical cancer: a retrospective case-control surgico-pathological study</article-title><source>Gynecol Oncol</source><year>2005</year><month>06</month><volume>97</volume><issue>3</issue><fpage>727</fpage><lpage>732</lpage><pub-id pub-id-type="doi">10.1016/j.ygyno.2005.01.004</pub-id><pub-id pub-id-type="medline">15943983</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pol</surname><given-names>FJM</given-names> </name><name name-style="western"><surname>Zusterzeel</surname><given-names>PLM</given-names> </name><name name-style="western"><surname>van Ham</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kuijpers</surname><given-names>DAT</given-names> </name><name name-style="western"><surname>Bulten</surname><given-names>J</given-names> </name><name name-style="western"><surname>Massuger</surname><given-names>L</given-names> </name></person-group><article-title>Satellite lymphovascular space invasion: an independent risk factor in early stage cervical cancer</article-title><source>Gynecol Oncol</source><year>2015</year><month>09</month><volume>138</volume><issue>3</issue><fpage>579</fpage><lpage>584</lpage><pub-id pub-id-type="doi">10.1016/j.ygyno.2015.06.035</pub-id><pub-id pub-id-type="medline">26126782</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>P</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>P</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Hazard ratio analysis of laparoscopic radical hysterectomy for IA1 With LVSI-IIA2 cervical cancer: identifying the possible contraindications of laparoscopic surgery for cervical cancer</article-title><source>Front Oncol</source><year>2020</year><volume>10</volume><pub-id pub-id-type="doi">10.3389/fonc.2020.01002</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abu-Rustum</surname><given-names>NR</given-names> </name><name name-style="western"><surname>Yashar</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Arend</surname><given-names>R</given-names> </name><etal/></person-group><article-title>NCCN Guidelines&#x00AE; Insights: cervical cancer, version 1.2024</article-title><source>J Natl Compr Canc Netw</source><year>2023</year><month>12</month><volume>21</volume><issue>12</issue><fpage>1224</fpage><lpage>1233</lpage><pub-id pub-id-type="doi">10.6004/jnccn.2023.0062</pub-id><pub-id pub-id-type="medline">38081139</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holtz</surname><given-names>DO</given-names> </name><name name-style="western"><surname>Dunton</surname><given-names>C</given-names> </name></person-group><article-title>Traditional management of invasive cervical cancer</article-title><source>Obstet Gynecol Clin North Am</source><year>2002</year><month>12</month><volume>29</volume><issue>4</issue><fpage>645</fpage><lpage>657</lpage><pub-id pub-id-type="doi">10.1016/s0889-8545(02)00023-2</pub-id><pub-id pub-id-type="medline">12509089</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Li</surname><given-names>G</given-names> </name></person-group><article-title>CT, MRI, and PET imaging features in cervical cancer staging and lymph node metastasis</article-title><source>Am J Transl Res</source><year>2021</year><volume>13</volume><issue>9</issue><fpage>10536</fpage><lpage>10544</lpage><pub-id pub-id-type="doi">10.1259/bjr.20160363</pub-id><pub-id pub-id-type="medline">34650724</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cabello</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ziegler</surname><given-names>SI</given-names> </name></person-group><article-title>Advances in PET/MR instrumentation and image reconstruction</article-title><source>Br J Radiol</source><year>2018</year><month>01</month><volume>91</volume><issue>1081</issue><fpage>20160363</fpage><pub-id pub-id-type="doi">10.1259/bjr.20160363</pub-id><pub-id pub-id-type="medline">27376170</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lucia</surname><given-names>F</given-names> </name><name name-style="western"><surname>Visvikis</surname><given-names>D</given-names> </name><name name-style="western"><surname>Desseroit</surname><given-names>MC</given-names> </name><etal/></person-group><article-title>Prediction of outcome using pretreatment 18F-FDG PET/CT and MRI radiomics in locally advanced cervical cancer treated with chemoradiotherapy</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2018</year><month>05</month><volume>45</volume><issue>5</issue><fpage>768</fpage><lpage>786</lpage><pub-id pub-id-type="doi">10.1007/s00259-017-3898-7</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Park</surname><given-names>BK</given-names> </name><etal/></person-group><article-title>Postoperative outcomes of MR-invisible stage IB1 cervical cancer</article-title><source>Am J Obstet Gynecol</source><year>2014</year><month>08</month><volume>211</volume><issue>2</issue><fpage>168</fpage><pub-id pub-id-type="doi">10.1016/j.ajog.2014.02.032</pub-id><pub-id pub-id-type="medline">24607752</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Woo</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>HS</given-names> </name><name name-style="western"><surname>Chung</surname><given-names>HH</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>SY</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>JY</given-names> </name></person-group><article-title>Early stage cervical cancer: role of magnetic resonance imaging after conization in determining residual tumor</article-title><source>Acta Radiol</source><year>2016</year><month>10</month><volume>57</volume><issue>10</issue><fpage>1268</fpage><lpage>1276</lpage><pub-id pub-id-type="doi">10.1177/0284185115620948</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>G</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Multi-parametric magnetic resonance imaging-based radiomics analysis of cervical cancer for preoperative prediction of lymphovascular space invasion</article-title><source>Front Oncol</source><year>2021</year><volume>11</volume><fpage>663370</fpage><pub-id pub-id-type="doi">10.3389/fonc.2021.663370</pub-id><pub-id pub-id-type="medline">35096556</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Du</surname><given-names>W</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Preoperative prediction of lymphovascular space invasion in cervical cancer with radiomics -based nomogram</article-title><source>Front Oncol</source><year>2021</year><volume>11</volume><fpage>637794</fpage><pub-id pub-id-type="doi">10.3389/fonc.2021.637794</pub-id><pub-id pub-id-type="medline">34322375</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><etal/></person-group><article-title>MR-based radiomics nomogram of cervical cancer in prediction of the lymph-vascular space invasion preoperatively</article-title><source>J Magn Reson Imaging</source><year>2019</year><month>05</month><volume>49</volume><issue>5</issue><fpage>1420</fpage><lpage>1426</lpage><pub-id pub-id-type="doi">10.1002/jmri.26531</pub-id><pub-id pub-id-type="medline">30362652</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Z</given-names> </name></person-group><article-title>Radiomics analysis on T2-MR image to predict lymphovascular space invasion in cervical cancer</article-title><conf-name>Medical Imaging 2019: Computer-Aided Diagnosis</conf-name><conf-date>2019</conf-date><conf-loc>San Diego, California, USA</conf-loc></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>M</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Gu</surname><given-names>X</given-names> </name><etal/></person-group><article-title>The role of radiomics for predicting of lymph-vascular space invasion in cervical cancer patients based on artificial intelligence: a systematic review and meta-analysis</article-title><source>J Gynecol Oncol</source><year>2024</year><volume>36</volume><issue>2</issue><pub-id pub-id-type="doi">10.3802/jgo.2025.36.e26</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Teng</surname><given-names>C</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>MRI-based radiomics models for noninvasive evaluation of lymphovascular space invasion in cervical cancer: a systematic review and meta-analysis</article-title><source>Clin Radiol</source><year>2024</year><month>11</month><volume>79</volume><issue>11</issue><fpage>e1372</fpage><lpage>e1382</lpage><pub-id pub-id-type="doi">10.1016/j.crad.2024.07.018</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>MDF</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thombs</surname><given-names>BD</given-names> </name><etal/></person-group><article-title>Preferred reporting items for a systematic review and meta-analysis of diagnostic test accuracy studies: The PRISMA-DTA statement</article-title><source>J Am Med Assoc</source><year>2018</year><month>01</month><day>23</day><volume>319</volume><issue>4</issue><fpage>388</fpage><lpage>396</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id><pub-id pub-id-type="medline">29362800</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Westwood</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title><source>Ann Intern Med</source><year>2011</year><month>10</month><day>18</day><volume>155</volume><issue>8</issue><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id><pub-id pub-id-type="medline">22007046</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wolff</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Moons</surname><given-names>KGM</given-names> </name><name name-style="western"><surname>Riley</surname><given-names>RD</given-names> </name><etal/></person-group><article-title>PROBAST: a tool to assess the risk of bias and applicability of prediction model studies</article-title><source>Ann Intern Med</source><year>2019</year><month>01</month><day>1</day><volume>170</volume><issue>1</issue><fpage>51</fpage><lpage>58</lpage><pub-id pub-id-type="doi">10.7326/M18-1376</pub-id><pub-id pub-id-type="medline">30596875</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhihui</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Linrui</surname><given-names>L</given-names> </name><name name-style="western"><surname>Long</surname><given-names>L</given-names> </name><name name-style="western"><surname>Qibing</surname><given-names>W</given-names> </name></person-group><article-title>Machine learning-based models for assessing postoperative risk factors in patients with cervical cancer</article-title><source>Acad Radiol</source><year>2024</year><month>04</month><volume>31</volume><issue>4</issue><fpage>1410</fpage><lpage>1418</lpage><pub-id pub-id-type="doi">10.1016/j.acra.2023.09.031</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kan</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>MRI based radiomics approach with deep learning for prediction of vessel invasion in early-stage cervical cancer</article-title><source>IEEE/ACM Trans Comput Biol Bioinform</source><year>2021</year><volume>18</volume><issue>3</issue><fpage>995</fpage><lpage>1002</lpage><pub-id pub-id-type="doi">10.1109/TCBB.2019.2963867</pub-id><pub-id pub-id-type="medline">31905143</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>D</given-names> </name><name name-style="western"><surname>Dou</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Radiomics analysis of multiparametric MRI evaluates the pathological features of cervical squamous cell carcinoma</article-title><source>J Magn Reson Imaging</source><year>2019</year><month>04</month><volume>49</volume><issue>4</issue><fpage>1141</fpage><lpage>1148</lpage><pub-id pub-id-type="doi">10.1002/jmri.26301</pub-id><pub-id pub-id-type="medline">30230114</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>A multicenter study on preoperative assessment of lymphovascular space invasion in early-stage cervical cancer based on multimodal MR radiomics</article-title><source>J Magn Reson Imaging</source><year>2023</year><month>11</month><volume>58</volume><issue>5</issue><fpage>1638</fpage><lpage>1648</lpage><pub-id pub-id-type="doi">10.1002/jmri.28676</pub-id><pub-id pub-id-type="medline">36929220</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Habitat-based radiomics enhances the ability to predict lymphovascular space invasion in cervical cancer: a multi-center study</article-title><source>Front Oncol</source><year>2023</year><volume>13</volume><fpage>1252074</fpage><pub-id pub-id-type="doi">10.3389/fonc.2023.1252074</pub-id><pub-id pub-id-type="medline">37954078</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hua</surname><given-names>W</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Lymph-vascular space invasion prediction in cervical cancer: exploring radiomics and deep learning multilevel features of tumor and peritumor tissue on multiparametric MRI</article-title><source>Biomed Signal Process Control</source><year>2020</year><month>04</month><volume>58</volume><fpage>101869</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2020.101869</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><etal/></person-group><article-title>MRI-based intratumoral and peritumoral radiomics on prediction of lymph-vascular space invasion in cervical cancer: a multi-center study</article-title><source>Biomed Signal Process Control</source><year>2022</year><month>02</month><volume>72</volume><fpage>103373</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2021.103373</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xiao</surname><given-names>M</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>F</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>G</given-names> </name><name name-style="western"><surname>Qiang</surname><given-names>J</given-names> </name></person-group><article-title>Multiparametric MRI radiomics nomogram for predicting lymph-vascular space invasion in early-stage cervical cancer</article-title><source>Br J Radiol</source><year>2022</year><month>06</month><day>1</day><volume>95</volume><issue>1134</issue><fpage>20211076</fpage><pub-id pub-id-type="doi">10.1259/bjr.20211076</pub-id><pub-id pub-id-type="medline">35312379</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cui</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Luo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>X</given-names> </name></person-group><article-title>Multi-parametric MRI-based peritumoral radiomics on prediction of lymph-vascular space invasion in early-stage cervical cancer</article-title><source>Diagn Interv Radiol</source><year>2022</year><month>07</month><volume>28</volume><issue>4</issue><fpage>312</fpage><lpage>321</lpage><pub-id pub-id-type="doi">10.5152/dir.2022.20657</pub-id><pub-id pub-id-type="medline">35731710</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>NN</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lv</surname><given-names>YN</given-names> </name><name name-style="western"><surname>Li</surname><given-names>SD</given-names> </name></person-group><article-title>An MRI radiomics-based model for the prediction of invasion of the lymphovascular space in patients with cervical cancer</article-title><source>Front Oncol</source><year>2024</year><volume>14</volume><pub-id pub-id-type="doi">10.3389/fonc.2024.1394427</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Multi-parametric MRI combined with radiomics for the evaluation of lymphovascular space invasion in cervical cancer</article-title><source>Clin Exp Obstet Gynecol</source><year>2024</year><volume>51</volume><issue>4</issue><pub-id pub-id-type="doi">10.31083/j.ceog5104081</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>C</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>H</given-names> </name></person-group><article-title>Prediction of lymphovascular space invasion using a combination of tenascin-C, cox-2, and PET/CT radiomics in patients with early-stage cervical squamous cell carcinoma</article-title><source>BMC Cancer</source><year>2021</year><pub-id pub-id-type="doi">10.1186/s12885-021-08596-9</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mahmood</surname><given-names>T</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Akhtar</surname><given-names>F</given-names> </name></person-group><article-title>An automated in-depth feature learning algorithm for breast abnormality prognosis and robust characterization from mammography images using deep transfer learning</article-title><source>Biology (Basel)</source><year>2021</year><month>09</month><day>2</day><volume>10</volume><issue>9</issue><fpage>859</fpage><pub-id pub-id-type="doi">10.3390/biology10090859</pub-id><pub-id pub-id-type="medline">34571736</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tran</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Kondrashova</surname><given-names>O</given-names> </name><name name-style="western"><surname>Bradley</surname><given-names>A</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>ED</given-names> </name><name name-style="western"><surname>Pearson</surname><given-names>JV</given-names> </name><name name-style="western"><surname>Waddell</surname><given-names>N</given-names> </name></person-group><article-title>Deep learning in cancer diagnosis, prognosis and treatment selection</article-title><source>Genome Med</source><year>2021</year><month>09</month><day>27</day><volume>13</volume><issue>1</issue><fpage>152</fpage><pub-id pub-id-type="doi">10.1186/s13073-021-00968-x</pub-id><pub-id pub-id-type="medline">34579788</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bejani</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Ghatee</surname><given-names>M</given-names> </name></person-group><article-title>A systematic review on overfitting control in shallow and deep neural networks</article-title><source>Artif Intell Rev</source><year>2021</year><month>12</month><volume>54</volume><issue>8</issue><fpage>6391</fpage><lpage>6438</lpage><pub-id pub-id-type="doi">10.1007/s10462-021-09975-1</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adam</surname><given-names>JA</given-names> </name><name name-style="western"><surname>van Diepen</surname><given-names>PR</given-names> </name><name name-style="western"><surname>Mom</surname><given-names>CH</given-names> </name><name name-style="western"><surname>Stoker</surname><given-names>J</given-names> </name><name name-style="western"><surname>van Eck-Smit</surname><given-names>BLF</given-names> </name><name name-style="western"><surname>Bipat</surname><given-names>S</given-names> </name></person-group><article-title>[<sup>18</sup>F]FDG-PET or PET/CT in the evaluation of pelvic and para-aortic lymph nodes in patients with locally advanced cervical cancer: a systematic review of the literature</article-title><source>Gynecol Oncol</source><year>2020</year><month>11</month><volume>159</volume><issue>2</issue><fpage>588</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1016/j.ygyno.2020.08.021</pub-id><pub-id pub-id-type="medline">32921477</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vargas-Cardona</surname><given-names>HD</given-names> </name><name name-style="western"><surname>Rodriguez-Lopez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Arrivillaga</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Artificial intelligence for cervical cancer screening: scoping review, 2009-2022</article-title><source>Int J Gynaecol Obstet</source><year>2024</year><month>05</month><volume>165</volume><issue>2</issue><fpage>566</fpage><lpage>578</lpage><pub-id pub-id-type="doi">10.1002/ijgo.15179</pub-id><pub-id pub-id-type="medline">37811597</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Allahqoli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lagan&#x00E0;</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Mazidimoradi</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Diagnosis of cervical cancer and pre-cancerous lesions by artificial intelligence: a systematic review</article-title><source>Diagnostics (Basel)</source><year>2022</year><volume>12</volume><issue>11</issue><fpage>2771</fpage><pub-id pub-id-type="doi">10.3390/diagnostics12112771</pub-id><pub-id pub-id-type="pmid">37811597</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esteva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Robicquet</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ramsundar</surname><given-names>B</given-names> </name><etal/></person-group><article-title>A guide to deep learning in healthcare</article-title><source>Nat Med</source><year>2019</year><month>01</month><volume>25</volume><issue>1</issue><fpage>24</fpage><lpage>29</lpage><pub-id pub-id-type="doi">10.1038/s41591-018-0316-z</pub-id><pub-id pub-id-type="medline">30617335</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Additional materials.</p><media xlink:href="jmir_v27i1e71091_app1.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>PRISMA (2020) checklist.</p><media xlink:href="jmir_v27i1e71091_app2.pdf" xlink:title="PDF File, 74 KB"/></supplementary-material></app-group></back></article>