<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v28i1e85414</article-id><article-id pub-id-type="doi">10.2196/85414</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Multimodal AI for Alzheimer Disease Diagnosis: Systematic Review of Datasets, Models, and Modalities</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Yu</surname><given-names>Ziwen</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Mulholland</surname><given-names>Anthony</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Huang</surname><given-names>Tianyan</given-names></name><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Liu</surname><given-names>Qiang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>School of Engineering Mathematics and Technology, University of Bristol</institution><addr-line>Tankard's Close, Ada Lovelace Building</addr-line><addr-line>Bristol</addr-line><country>United Kingdom</country></aff><aff id="aff2"><institution>Medical Physics and Biomedical Engineering, University College London</institution><addr-line>London</addr-line><country>United Kingdom</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Brini</surname><given-names>Stefano</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Elkourdi</surname><given-names>Farah</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sikder</surname><given-names>Mohammad Mamun</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Oyetunji</surname><given-names>Oladayo</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Jiang</surname><given-names>Shan</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Qiang Liu, PhD, School of Engineering Mathematics and Technology, University of Bristol, Tankard's Close, Ada Lovelace Building, Bristol, BS8 1TW, United Kingdom, 44 01173746653; <email>qiang.liu@bristol.ac.uk</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>25</day><month>3</month><year>2026</year></pub-date><volume>28</volume><elocation-id>e85414</elocation-id><history><date date-type="received"><day>07</day><month>10</month><year>2025</year></date><date date-type="rev-recd"><day>09</day><month>01</month><year>2026</year></date><date date-type="accepted"><day>09</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Ziwen Yu, Anthony Mulholland, Tianyan Huang, Qiang Liu. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 25.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2026/1/e85414"/><abstract><sec><title>Background</title><p>Early detection of Alzheimer disease (AD) is essential for timely intervention; yet, diagnostic performance varies widely across modalities and datasets. Recent multimodal artificial intelligence (AI) models have made significant progress, but the evidence base remains fragmented due to heterogeneous datasets, modeling frameworks, and reporting quality.</p></sec><sec><title>Objective</title><p>This systematic review aimed to analyze studies on multimodal AI models for AD diagnosis, prognosis, and risk prediction over 5 years. We evaluated dataset characteristics, modality combinations, modeling strategies, performance metrics, and methodological limitations. We further discuss real-world implications and translational pathways.</p></sec><sec sec-type="methods"><title>Methods</title><p>Following PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 guidelines, we systematically searched PubMed, IEEE Xplore, Scopus, ACM Digital Library, Cochrane, and arXiv, with the final datasets last searched on November 15, 2025. Studies applying multimodal machine learning or deep learning to AD, mild cognitive impairment, and dementia outcomes were included, whereas studies using a single modality or lacking sufficient methodological detail were excluded. QUADAS-2 (Revised Quality Assessment of Diagnostic Accuracy Studies tool) assessed risk of bias. Extracted performance results were synthesized across 4 major multimodal dataset families.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 66 studies met the inclusion criteria. Across datasets, multimodal models consistently outperformed single-modal baselines. Alzheimer&#x2019;s Disease Neuroimaging Initiative&#x2013;based diagnosis achieved an average accuracy of 92.5% (SD 3.8%), while mild cognitive impairment&#x2013;conversion models achieved an average area under the curve (AUC) of 0.922 (SD 0.045), and several fusion architectures reported AUCs above 0.95. In contrast, UK Biobank risk-prediction studies reported an average AUC of 0.84 (SD 0.056), and this reflects performance in large, population-based datasets. DementiaBank speech-language studies achieved an average AUC of 0.813 (SD 0.042), and cross-lingual AD detection achieved an accuracy of 77% (SD 6.5%). Self-collected multimodal datasets demonstrated average accuracies around 96% (SD 2.4%), but their generalizability is limited due to small sample sizes and single-center designs.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This systematic review demonstrates that multimodal AI models consistently outperform single-modal models for AD diagnosis, prognosis, and risk prediction by integrating complementary biological, clinical, and behavioral information. Unlike prior reviews, this review provides a unified synthesis across heterogeneous clinical, imaging, genetic, and linguistic datasets, enabling cross-domain comparison of modeling strategies and performance. However, the generalizability of reported performance was limited due to substantial heterogeneity in dataset composition, outcome definitions, and validation, and prevalent risks of bias. By evaluating these factors, this review clarifies where current evidence is robust and where caution is warranted. The findings highlight the need for standardized multimodal benchmarks, transparent evaluation protocols, and clinically grounded model design to enable reliable real-world deployment. Overall, this work advances the field by framing multimodal AI not only as a performance-driven tool but also as a translational framework for equitable, interpretable, and scalable AD diagnosis.</p></sec><sec><title>Trial Registration</title><p>PROSPERO CRD420251241895; <ext-link ext-link-type="uri" xlink:href="https://www.crd.york.ac.uk/PROSPE-RO/view/CRD420251241895">https://www.crd.york.ac.uk/PROSPE-RO/view/CRD420251241895</ext-link></p></sec></abstract><kwd-group><kwd>alzheimer&#x2019;s disease, neurodegenerative disease</kwd><kwd>multimodal dataset</kwd><kwd>machine learning</kwd><kwd>deep learning</kwd><kwd>multimodal fusion</kwd><kwd>computer-aided diagnosis</kwd><kwd>early diagnosis</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Alzheimer disease (AD) is the most prevalent neurodegenerative disorder and the leading cause of dementia worldwide [<xref ref-type="bibr" rid="ref1">1</xref>]. With an aging global population, AD has become one of the most costly and deadly diseases of the 21st century, imposing profound emotional, financial, and caregiving burdens on patients, families, and health systems. By 2050, the number of people with AD is projected to rise from 55 million in 2020 to approximately 139 million [<xref ref-type="bibr" rid="ref1">1</xref>].</p><p>The progression of AD includes the preclinical stage, mild cognitive impairment (MCI), and symptomatic stages, with varying degrees of symptom severity. The preclinical stage is a key window for intervention, during which neuropathological changes have commenced, but clinical symptoms remain largely undetectable [<xref ref-type="bibr" rid="ref2">2</xref>]. Despite advances in awareness and screening, up to 75% of dementia cases remain undiagnosed worldwide, particularly in low- and middle-income countries [<xref ref-type="bibr" rid="ref3">3</xref>]. This persistent diagnostic gap highlights the need for low-cost, scalable, and accurate early detection tools to enable timely intervention and slow disease progression [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>Artificial intelligence (AI) has emerged as a promising approach for improving the early detection and management of AD. By systematically integrating and analyzing multimodal data, AI-based diagnostic frameworks offer powerful tools to enhance early detection accuracy and facilitate timely intervention.</p><p>Recent work has used transformer-based models to integrate imaging, genetic, and linguistic data. Multimodal transformers combining magnetic resonance imaging (MRI) or positron emission tomography (PET) with clinical features and cognitive assessments have reported improved diagnostic accuracy and interpretability [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. In parallel, GPT-style architectures, BERT (Bidirectional Encoder Representations From Transformers) variants, and domain-adapted language models improve extraction of linguistic and semantic markers linked to early cognitive decline [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Self-supervised speech models also perform strongly for detecting MCI and early AD from spontaneous speech [<xref ref-type="bibr" rid="ref10">10</xref>]. Together, these advances reflect a shift toward unified, more interpretable, and clinically translatable multimodal systems that capture both biological and behavioral aspects of AD.</p><p>Traditional machine learning (ML) [<xref ref-type="bibr" rid="ref11">11</xref>], ensemble methods [<xref ref-type="bibr" rid="ref12">12</xref>], deep learning [<xref ref-type="bibr" rid="ref13">13</xref>], and reinforcement learning (RL) [<xref ref-type="bibr" rid="ref14">14</xref>] can perform well on unimodal data, but clinical diagnosis integrates structural and behavioral information [<xref ref-type="bibr" rid="ref15">15</xref>]. Unimodal AI can therefore diverge from clinical workflows and miss complementary signals (eg, MRI for structural change plus speech features for cognitive decline [<xref ref-type="bibr" rid="ref16">16</xref>]), increasing the risk of modality-specific overfitting and poorer real-world performance. Accordingly, recent work has shifted toward multimodal integration for AD diagnosis, yet many studies emphasize incremental accuracy gains while underaddressing generalizability, interpretability, and cost-effectiveness needed for adoption. The literature also remains fragmented: recent reviews often cover multimodal clinical phenotyping datasets [<xref ref-type="bibr" rid="ref17">17</xref>] and multimodal linguistic cognitive-impairment datasets [<xref ref-type="bibr" rid="ref18">18</xref>] separately, obscuring cross-modal insights such as how imaging and speech biomarkers might jointly improve early detection.</p><p>Recent multimodal methods have substantially improved AD detection. However, a comprehensive systematic review that integrates evidence across both clinical and linguistic modalities, fusion strategies, and critically evaluates methodological quality, dataset diversity, and reporting transparency is still lacking. To address these gaps, this review investigates how multimodal models are applied to AD diagnosis, prognosis, and risk prediction and compares performance across different modality combinations and dataset families published between 2019 and 2025. We also examine modeling and fusion strategies alongside validation practices and assess methodological quality and risk of bias using QUADAS-2 (Revised Quality Assessment of Diagnostic Accuracy Studies tool). Furthermore, key multimodal combinations within public datasets are analyzed in relation to their diagnostic performance, and datasets are categorized to evaluate their suitability for AD research and clinical translation. Overall, this review provides a comprehensive synthesis of multimodal AI in AD diagnosis, bridges previously disconnected research streams, and offers practical guidance for future model development and clinical adoption.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>This review was conducted in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 guidelines [<xref ref-type="bibr" rid="ref19">19</xref>], with the search procedures reported following PRISMA-S (Preferred Reporting Items for Systematic Reviews and Meta-Analyses literature search extension) [<xref ref-type="bibr" rid="ref20">20</xref>] and developed using the principles outlined in the Cochrane Handbook [<xref ref-type="bibr" rid="ref21">21</xref>]. These methods were applied to systematically identify and evaluate studies on computer-aided AD diagnosis, with a particular focus on those using multimodal clinical phenotyping datasets and multimodal linguistic-based cognitive impairment datasets.</p></sec><sec id="s2-2"><title>Source of the Study and Search Criteria</title><p>We developed and internally reviewed independent search strategies (no external peer review). We manually searched multiple databases to identify AI-driven multimodal approaches for AD diagnosis, rather than using an integrated multidatabase platform. As this review targets methodological advances mainly reported in peer-reviewed computational literature, we did not search trial registries (ClinicalTrials.gov, World Health Organization&#x2019;s International Clinical Trials Registry Platform). We also avoided validated or published filters, instead iteratively refining customized controlled-vocabulary and free-text terms for AD or dementia, multimodal data, and AI through pilot screening to maximize sensitivity.</p><p>Searches were performed in PubMed (447 records; January 1, 2019, to November 13, 2025), Scopus (1086 records; all years through November 13, 2025, filtered to PUBYEAR &#x003E; 2018), IEEE Xplore (2229 records; January 1, 2020 to November 13, 2025), ACM Digital Library (2067 records; 1 January 2020, to 15 November 2025), Cochrane Library (1061 records; all available years through November 15, 2025), and arXiv (1081 records; all available years through November 15, 2025). We included the verbatim search strings for all databases, and because arXiv does not support bulk export, an arXiv search Python (Python Software Foundation) script is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref44">44</xref>].</p></sec><sec id="s2-3"><title>Eligibility Criteria</title><p>The inclusion criteria were if studies were considered eligible if they met all the following conditions: (1) focused on AD, MCI, or related dementias as the primary clinical outcome; (2) applied AI or ML methods for computer-aided diagnosis, classification, or prediction; (3) used multimodal data, defined as any combination of at least two distinct modalities (eg, neuroimaging, clinical phenotyping, genetics, or linguistic features); (4) reported quantitative evaluation metrics; and (5) written in English.</p><p>The exclusion criteria were if studies met any of the following conditions: (1) single-modal approaches using only a single imaging modality, cognitive test, or biomarker, without any multimodal integration; (2) works without reported performance metrics or with insufficient methodological detail; (3) works not addressing diagnosis, classification, or prediction (eg, treatment response, drug trials, and lifestyle interventions); (4) duplicate publications or overlapping datasets without providing additional methodological contribution; and (5) non-English publications.</p></sec><sec id="s2-4"><title>Selection Process</title><p>The study selection process followed the PRISMA 2020 guidelines, and the protocol was registered. The final search update was conducted in November 2025. All records retrieved from the databases were first imported into Zotero, where duplicates were automatically detected and removed.</p><p>The initial search identified 7435 records. After removing 3047 duplicates, 4388 records remained for title and abstract screening. A total of 4021 records were obviously irrelevant at the title and abstract level, 252 studies were excluded for the following main reasons:</p><list list-type="bullet"><list-item><p>Focused on outcomes unrelated to AD diagnosis, classification, or prediction (eg, drug trials, treatment response, lifestyle interventions; n=140).</p></list-item><list-item><p>Used unimodal data without multimodal integration (n=46).</p></list-item><list-item><p>No sufficient methodological details (n=47).</p></list-item></list><p>Finally, 66 studies were included in the systematic synthesis, and all were successfully retrieved (reports not retrieved=0).</p></sec><sec id="s2-5"><title>Overview of AI-Assisted AD Diagnosis</title><p>The workflow of AI-assisted AD diagnosis involves 3 stages, as illustrated in <xref ref-type="fig" rid="figure1">Figure 1</xref>. The initial stage involves comprehensive data acquisition, where information is collected from multiple modalities, including neuroimaging, biomarkers, genetics, and speech or behavioral signals. The second stage involves feature extraction and model development, followed by an interpretable and explainable analysis to ensure that AI models can effectively support clinical decision-making.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Overview of the AI pipeline for AD diagnosis. Multimodal inputs undergo preprocessing and feature extraction before model training for classification or regression tasks. Model interpretability supports explanation and performance evaluation. AD: Alzheimer disease; AI: artificial intelligence; LIME: Local Interpretable Model-Agnostic Explanations; SHAP: Shapley Additive Explanations; XAI: explainable artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e85414_fig01.png"/></fig></sec><sec id="s2-6"><title>Performance Evaluation Metrics</title><p>To ensure the clinical applicability and scientific rigor of computer-aided diagnosis models for AD, it is essential to systematically evaluate their performance using a variety of quantitative metrics. We have summarized all performance evaluation metrics in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec><sec id="s2-7"><title>Risk of Bias and Quality Assessment</title><p>We assessed methodological quality with QUADAS-2 [<xref ref-type="bibr" rid="ref45">45</xref>], evaluating risk of bias in 4 domains. Patient selection raised the main concern: 61% (40/66) of outcomes were high risk due to poor reporting or nonrepresentative sampling. For the index test, 76% (50/66) were unclear risk because procedures and decision thresholds were insufficiently described, and 20% (13/66) were high risk. The reference standard showed 76% (50/66) unclear risk from limited methodological detail, with no high-risk ratings. Flow and timing had the greatest uncertainty: 85% (56/66) were unclear owing to missing information on testing intervals and participant flow. <xref ref-type="fig" rid="figure2">Figure 2</xref> summarizes domain-level risk distributions; <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> reports study-level assessments.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Summary of the QUADAS-2 plot across the 66 included studies in the domain. QUADAS-2: Revised Quality Assessment of Diagnostic Accuracy Studies tool.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e85414_fig02.png"/></fig><p>Given frequent unclear and high risk in key domains, we interpreted diagnostic performance cautiously, especially without external validation or a clearly defined reference standard. Future benchmarking should emphasize transparent reporting, prespecified thresholds, and multicenter evaluation to reduce bias and improve reproducibility.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>Following study selection (the complete selection process is summarized in the PRISMA 2020 flow diagram, <xref ref-type="fig" rid="figure3">Figure 3</xref>), we first summarize the overall profile of the included literature to contextualize the subsequent synthesis. <xref ref-type="fig" rid="figure4">Figure 4</xref> provides a temporal overview (2019&#x2010;2025) of modeling approaches across included studies, illustrating how methodological focus has shifted over time and informing interpretation of the evidence base.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Flow diagram of PRISMA. AD: Alzheimer disease; PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e85414_fig03.png"/></fig><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Temporal trends of machine-learning methods used for AD diagnosis (2019&#x2010;2025). AD: Alzheimer disease.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v28i1e85414_fig04.png"/></fig></sec><sec id="s3-2"><title>Single Modality</title><sec id="s3-2-1"><title>Overview</title><p>If readers are already familiar with traditional ML and deep learning approaches, they may wish to proceed directly to the next section, which focuses on multimodal data integration for AD diagnosis.</p><p>A concise overview of these baseline methods is provided in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>, which also includes a summary of RL. As most RL studies address sequential decision-making tasks rather than direct diagnostic modeling, their methodological details are presented in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>, to maintain focus on multimodal diagnostic frameworks in the main text.</p></sec><sec id="s3-2-2"><title>Deep Learning</title><p>Compared with traditional ML, deep learning enables hierarchical feature extraction, capturing complex patterns in high-dimensional data. It is therefore widely used to process and integrate AD-related multimodal inputs, including neuroimaging, clinical scores, genetics, and speech. Key approaches and findings are summarized below.</p><p>Recurrent neural networks are effective for modeling sequential data such as longitudinal clinical records and speech signals, but they are susceptible to vanishing gradients in long sequences [<xref ref-type="bibr" rid="ref46">46</xref>-<xref ref-type="bibr" rid="ref49">49</xref>]. Long short-term memory networks address this limitation through gated memory mechanisms, enabling more stable training and improved capture of long-term dependencies. Consequently, long short-term memory models have been widely applied in AD research for analyzing temporal and sequential modalities [<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref53">53</xref>].</p><p>The transformer model, which leverages attention mechanisms, dynamically assigns different weights to input features based on their relative importance. Each layer of the transformer consists of multiple attention heads, allowing the model to capture diverse feature representations by attending to various aspects. Transformer models use attention mechanisms to weight input features and capture diverse representations through multihead attention, enabling efficient and scalable training [<xref ref-type="bibr" rid="ref54">54</xref>]. Owing to these advantages, they have been widely adopted in AD diagnosis and multimodal learning, where their encoder-decoder architecture facilitates effective integration of heterogeneous data sources [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref57">57</xref>].</p></sec></sec><sec id="s3-3"><title>Ensemble Learning</title><p>Ensemble learning improves generalization and robustness by combining multiple base models, including bagging and boosting methods such as AdaBoost (Adaptive Boosting), XGBoost (Extreme Gradient Boosting), and LightGBM (Light Gradient-Boosting Machine), and has been widely applied in AD detection and progression prediction [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref58">58</xref>-<xref ref-type="bibr" rid="ref60">60</xref>]. However, ensemble models may introduce redundant features, offer limited gains on small datasets, and incur higher computational costs, which can restrict real-time or resource-constrained deployment.</p></sec><sec id="s3-4"><title>Summarization for Single Modality</title><p>Traditional single-modality ML approaches can achieve high performance in AD-related tasks; however, they are constrained by several inherent limitations:</p><p>First, regarding information completeness, structural MRI alone has limited sensitivity to functional and molecular changes and cannot fully capture AD-related cognitive and behavioral alterations. Combining MRI with PET, neuropsychological tests, speech, electroencephalography (EEG), and genetic or biomarker data provides a more complete, multidimensional view of disease progression and patient heterogeneity [<xref ref-type="bibr" rid="ref61">61</xref>].</p><p>Second, regarding model robustness, in multimodal data, residual noise in one modality may persist despite denoising, but other modalities can provide complementary signals that improve robustness. Leveraging multisensory-style integration, multimodal models better reflect biological cognition and can yield more reliable decisions [<xref ref-type="bibr" rid="ref62">62</xref>].</p><p>Third, in cross-modal learning, transformer architectures use cross-modal attention to learn associations between modalities. Some studies apply them in weakly supervised or cross-modal guided settings, using one modality to constrain or guide representation learning in another [<xref ref-type="bibr" rid="ref63">63</xref>].</p><p>Fourth, in real-world decision-making, multimodal learning better matches real-world diagnosis, which integrates multiple information sources. Using diverse modalities aligns models with clinical workflows and improves translational potential in practice [<xref ref-type="bibr" rid="ref64">64</xref>].</p><p>Therefore, this review analyzes the methodological strengths and limitations of multimodal models for computer-assisted AD diagnosis, focusing on how dataset grouping and classification choices affect evaluation. By classifying datasets, we enable model comparisons under a unified setup, allowing for more direct assessment of generalizability and cross-dataset stability.</p></sec><sec id="s3-5"><title>Multimodal Data</title><sec id="s3-5-1"><title>Multimodal Dataset Overview</title><p>High-quality data plays a pivotal role in training AI models for computer-aided diagnosis and detection. Robust datasets not only enhance the generalization ability of models but also help mitigate the risk of overfitting. Commonly used datasets for AI-assisted diagnosis of AD can be broadly categorized into 2 types. The first type of multimodal clinical phenotyping dataset, such as the Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI), UK Biobank, and the Open Access Series of Imaging Studies (OASIS), focuses on neuroimaging modalities, including MRI, functional MRI, genetic data, and electronic health records. The second type&#x2014;multimodal cognitive-linguistic behavioral dataset centers on sequential data modalities, including audio, video, and transcribed language data, such as the Pitt Corpus and ADReSS (Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech). Each dataset type provides unique features that contribute to the comprehensive modeling of AD progression and diagnosis. The commonly used datasets, along with their population demographics and associated modalities, are summarized in <xref ref-type="table" rid="table1">Table 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Commonly used dataset.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Dataset</td><td align="left" valign="bottom" colspan="2">Population demographics</td><td align="left" valign="bottom">Modalities</td><td align="left" valign="bottom">Link</td></tr><tr><td align="left" valign="bottom" colspan="2"/><td align="left" valign="bottom">Male</td><td align="left" valign="bottom">Age (years)</td><td align="left" valign="bottom"/><td align="left" valign="bottom"/></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">UK Biobank</td><td align="left" valign="top">23,000 (46)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">56.5 (8.1)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>, fMRI<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup>, genetic, lifestyle scores, activity monitor, and EHR<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup></td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref65">65</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">ADNI<sup><xref ref-type="table-fn" rid="table1fn6">f</xref></sup></td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADNI-1</td><td align="left" valign="top">469 (57.3)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">75 (6.9)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET<sup><xref ref-type="table-fn" rid="table1fn7">g</xref></sup>, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref66">66</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADNI-GO<sup><xref ref-type="table-fn" rid="table1fn8">h</xref></sup> and 2</td><td align="left" valign="top">473 (53)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">72.5 (7.2)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref66">66</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADNI-3</td><td align="left" valign="top">471 (49)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">74.9 (8.1)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref66">66</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">OASIS<sup><xref ref-type="table-fn" rid="table1fn9">i</xref></sup></td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>OASIS-1</td><td align="left" valign="top">177 (42.5)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">57 (39)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, CT, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref67">67</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>OASIS-2</td><td align="left" valign="top">60 (40)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">78 (18)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, CT, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref67">67</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>OASIS-3</td><td align="left" valign="top">622 (45)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">69 (26.56)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, CT, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref67">67</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>OASIS-4</td><td align="left" valign="top">663 subjects</td><td align="left" valign="top">57.5 (36.5)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, CT, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref67">67</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">NACC<sup><xref ref-type="table-fn" rid="table1fn10">j</xref></sup></td><td align="left" valign="top">23,625 (44.62)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">73.3 (10.5)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, genetics, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref68">68</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">FHS<sup><xref ref-type="table-fn" rid="table1fn11">k</xref></sup></td><td align="left" valign="top">718 (42)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">80.76 (8.2)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref69">69</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">AIBL<sup><xref ref-type="table-fn" rid="table1fn12">l</xref></sup></td><td align="left" valign="top">289 (43.72)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">73.5 (7.03)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">MRI, PET, genetic, and EHR</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref70">70</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">Dementia bank</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Pitt corpus</td><td align="left" valign="top">HC<sup><xref ref-type="table-fn" rid="table1fn13">m</xref></sup>: 104, AD<sup><xref ref-type="table-fn" rid="table1fn14">n</xref></sup>: 208/552</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table1fn15">o</xref></sup></td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref71">71</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADReSS<sup><xref ref-type="table-fn" rid="table1fn16">p</xref></sup></td><td align="left" valign="top">HC: 78, AD: 78/156</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref71">71</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADReSSo<sup><xref ref-type="table-fn" rid="table1fn17">q</xref></sup></td><td align="left" valign="top">HC: 115, AD: 122/237</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref71">71</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADReSS-M<sup><xref ref-type="table-fn" rid="table1fn18">r</xref></sup></td><td align="left" valign="top">HC: 143, AD: 148/291</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref71">71</xref>]</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>TAUKADIAL<sup><xref ref-type="table-fn" rid="table1fn19">s</xref></sup></td><td align="left" valign="top">106/507</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref71">71</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">Multimodal dementia corpus</td><td align="left" valign="top">HC: 10, AD: 12/816</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Audio, typed, and hand-written</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref72">72</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">ADReFV<sup><xref ref-type="table-fn" rid="table1fn20">t</xref></sup></td><td align="left" valign="top">AD: 25</td><td align="left" valign="top">66.68 (2.08)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">Video</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref73">73</xref>]</td></tr><tr><td align="left" valign="top" colspan="2">GENCODE<sup><xref ref-type="table-fn" rid="table1fn21">u</xref></sup></td><td align="left" valign="top">Human: 78,686</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">Genetics</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref74">74</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>n (%).</p></fn><fn id="table1fn2"><p><sup>b</sup>Mean (SD).</p></fn><fn id="table1fn3"><p><sup>c</sup>MRI: magnetic resonance imaging.</p></fn><fn id="table1fn4"><p><sup>d</sup>fMRI: functional magnetic resonance imaging.</p></fn><fn id="table1fn5"><p><sup>e</sup>EHR: electronic health record.</p></fn><fn id="table1fn6"><p><sup>f</sup>ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative.</p></fn><fn id="table1fn7"><p><sup>g</sup>PET: positron emission tomography.</p></fn><fn id="table1fn8"><p><sup>h</sup>ADNI-GO: Alzheimer&#x2019;s Disease Neuroimaging Initiative &#x2013; Grand Opportunity.</p></fn><fn id="table1fn9"><p><sup>i</sup>OASIS: Open Access Series of Imaging Studies.</p></fn><fn id="table1fn10"><p><sup>j</sup>NACC: National Alzheimer&#x2019;s Coordinating Centre.</p></fn><fn id="table1fn11"><p><sup>k</sup>FHS: Framingham Heart Study. </p></fn><fn id="table1fn12"><p><sup>l</sup>AIBL: Australian Imaging, Biomarkers and Lifestyle Study.</p></fn><fn id="table1fn13"><p><sup>m</sup>HC: health control.</p></fn><fn id="table1fn14"><p><sup>n</sup>AD: Alzheimer disease.</p></fn><fn id="table1fn15"><p><sup>o</sup>Not available<italic>.</italic></p></fn><fn id="table1fn16"><p><sup>p</sup>ADReSS: Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech.</p></fn><fn id="table1fn17"><p><sup>q</sup>ADReSSo: Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech 2021 Challenge.</p></fn><fn id="table1fn18"><p><sup>r</sup>ADReSS-M: Multilingual Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech Challenge.</p></fn><fn id="table1fn19"><p><sup>s</sup>TAUKADIAL: Speech-Based Cognitive Assessment in Chinese and English. </p></fn><fn id="table1fn20"><p><sup>t</sup>ADReFV: Alzheimer&#x2019;s Disease Recognition from Face &#x0026; Voice.</p></fn><fn id="table1fn21"><p><sup>u</sup>GENCODE: gene code.</p></fn></table-wrap-foot></table-wrap><p>To better understand current research trends, this paper reviewed studies from the past 5 years on multimodal AI models for AD diagnosis. The literature was categorized by dataset type. (1) Multimodal clinical phenotyping datasets: ADNI dominates this category, used in about 80% of studies, while others, such as UK Biobank, OASIS, National Alzheimer&#x2019;s Coordinating Centre (NACC), Framingham Heart Study, and Australian Imaging, Biomarkers and Lifestyle Study, account for the remaining 20%, mainly for supplementary analysis or external validation. (2) Multimodal cognitive-linguistic behavioral datasets: The ADReSS series is most widely used, representing around 70% of studies. The remaining 30% use datasets often for complementary analysis or benchmarking.</p></sec><sec id="s3-5-2"><title>Multimodal Clinical Phenotyping Datasets</title><p>Multimodal clinical phenotyping datasets integrate MRI, PET, or diffusion tensor imaging, biomarkers from blood, cerebrospinal fluid, or genomics, and standardized cognitive assessments. This review summarizes representative resources, highlighting their modalities, distinguishing features, and contributions to diagnostic modeling (<xref ref-type="table" rid="table2">Table 2</xref>).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Multimodal clinical phenotyping datasets related paper. The exceptionally high performances reported in some of these studies can be attributed to specific methodological factors: (1) for two studies [<xref ref-type="bibr" rid="ref75">75</xref>] and [<xref ref-type="bibr" rid="ref76">76</xref>], the absence of external validation likely inflated the results; (2) for another study [<xref ref-type="bibr" rid="ref77">77</xref>], the use of a small but highly controlled dataset, extensive sample expansion, multimodal feature fusion, and pronounced disease-related electrophysiological signatures contributed to the elevated accuracy; and (3) for yet another study [<xref ref-type="bibr" rid="ref78">78</xref>], the integration of rich gait features with optimized machine learning techniques in a controlled experimental setting facilitated unusually high performance.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Datasets</td><td align="left" valign="bottom">Model type</td><td align="left" valign="bottom">Type of task</td><td align="left" valign="bottom">Modalities</td><td align="left" valign="bottom">Outcomes</td><td align="left" valign="bottom">Validation</td><td align="left" valign="bottom">Results</td><td align="left" valign="bottom">Limitation</td></tr></thead><tbody><tr><td align="left" valign="top">Xue et al [<xref ref-type="bibr" rid="ref79">79</xref>], 2024</td><td align="left" valign="top">NACC<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>, ADNI<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>, AIBL<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup>, FHS<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup>, PPMI<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup>, and OASIS<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup></td><td align="left" valign="top">Transformer-based multimodal model</td><td align="left" valign="top">Differential diagnosis of 10 etiologies</td><td align="left" valign="top">MRI<sup><xref ref-type="table-fn" rid="table2fn7">g</xref></sup> (T1, T2, FLAIR<sup><xref ref-type="table-fn" rid="table2fn8">h</xref></sup>), clinical, neuropsychological tests, and PET<sup><xref ref-type="table-fn" rid="table2fn9">i</xref></sup></td><td align="left" valign="top">Differential diagnosis probabilities and AD<sup><xref ref-type="table-fn" rid="table2fn10">j</xref></sup>/MCI<sup><xref ref-type="table-fn" rid="table2fn11">k</xref></sup>/NC<sup><xref ref-type="table-fn" rid="table2fn12">l</xref></sup> classification</td><td align="left" valign="top">Internal: NACC held out; external: ADNI and FHS</td><td align="left" valign="top">Etiology classification AUROC<sup><xref ref-type="table-fn" rid="table2fn13">m</xref></sup> 0.96; strong alignment with PET biomarkers and neuropathology</td><td align="left" valign="top">Imbalanced etiologies, training label subjectivity, and limited racial diversity</td></tr><tr><td align="left" valign="top">Shi et al [<xref ref-type="bibr" rid="ref80">80</xref>], 2018</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">MM-SDPN<sup><xref ref-type="table-fn" rid="table2fn14">n</xref></sup></td><td align="left" valign="top">Classification (AD vs NC, MCI vs NC)</td><td align="left" valign="top">T1 MRI and FDG-PET<sup><xref ref-type="table-fn" rid="table2fn15">o</xref></sup></td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Comparative vs single-modality and state-of-the-art models</td><td align="left" valign="top">Outperformed single-modality DPN<sup><xref ref-type="table-fn" rid="table2fn16">p</xref></sup>/SDPN<sup><xref ref-type="table-fn" rid="table2fn17">q</xref></sup> and concatenated models</td><td align="left" valign="top">ADNI-only dataset (limits generalizability); ROI<sup><xref ref-type="table-fn" rid="table2fn18">r</xref></sup>-based features rather than voxel-wise</td></tr><tr><td align="left" valign="top">Allwright et al [<xref ref-type="bibr" rid="ref81">81</xref>], 2023</td><td align="left" valign="top">UK Biobank</td><td align="left" valign="top">XGBoost<sup><xref ref-type="table-fn" rid="table2fn19">s</xref></sup></td><td align="left" valign="top">Risk prediction (Incident AD)</td><td align="left" valign="top">Demographics, lifestyle, genetics, and medical history</td><td align="left" valign="top">Prediction of incident AD (2-10 years) and risk factor ranking</td><td align="left" valign="top">Internal: nested 3-fold cross-validation; external: held-out validation set</td><td align="left" valign="top">AUROC 0.77, APOE-&#x03B5;4<sup><xref ref-type="table-fn" rid="table2fn20">t</xref></sup> identified as the strongest risk factor, and liver enzymes or frailty as predictors</td><td align="left" valign="top"><italic>ICD-10</italic><sup><xref ref-type="table-fn" rid="table2fn21">u</xref></sup> underascertainment, healthy volunteer bias, and observational design</td></tr><tr><td align="left" valign="top">Gu et al [<xref ref-type="bibr" rid="ref82">82</xref>], 2025</td><td align="left" valign="top">UK Biobank</td><td align="left" valign="top">LightGBM<sup><xref ref-type="table-fn" rid="table2fn22">v</xref></sup></td><td align="left" valign="top">Risk prediction (incident dementia in ASCVD<sup><xref ref-type="table-fn" rid="table2fn23">w</xref></sup> patients)</td><td align="left" valign="top">Clinical, biological assays, cognitive tests, and physical measures</td><td align="left" valign="top">All-cause incident dementia, AD, and VD<sup><xref ref-type="table-fn" rid="table2fn24">x</xref></sup> incidence</td><td align="left" valign="top">Temporal: train (2006-2009) and test (2010 cohort)</td><td align="left" valign="top">5-year dementia AUC<sup><xref ref-type="table-fn" rid="table2fn25">y</xref></sup> 0.903, AD AUC 0.775, and accuracy 0.851</td><td align="left" valign="top">Sample mostly European descent, static baseline features, and potential overfitting</td></tr><tr><td align="left" valign="top">You et al [<xref ref-type="bibr" rid="ref83">83</xref>], 2022</td><td align="left" valign="top">UK Biobank</td><td align="left" valign="top">LightGBM</td><td align="left" valign="top">Risk prediction (5/10-year horizon)</td><td align="left" valign="top">Demographics, lifestyle, blood biomarkers, and genetics</td><td align="left" valign="top">Incident all-cause dementia and AD prediction</td><td align="left" valign="top">Internal: 5-fold cross-validation</td><td align="left" valign="top">AUC 0.848 (all-cause), 0.862 (AD), and outperformed CAIDE<sup><xref ref-type="table-fn" rid="table2fn26">z</xref></sup> and DRS<sup><xref ref-type="table-fn" rid="table2fn27">aa</xref></sup></td><td align="left" valign="top">Limited external validation, population predominantly White, and feature selection fully data-driven</td></tr><tr><td align="left" valign="top">Calvo et al [<xref ref-type="bibr" rid="ref84">84</xref>], 2024</td><td align="left" valign="top">UK Biobank</td><td align="left" valign="top">Multivariable logistic regression</td><td align="left" valign="top">Risk association analysis</td><td align="left" valign="top">Questionnaire, ICD<sup><xref ref-type="table-fn" rid="table2fn28">ab</xref></sup> records, and genotypes</td><td align="left" valign="top">Odds of AD related to menopause type</td><td align="left" valign="top">Single cohort: multivariable adjustment</td><td align="left" valign="top">Early bilateral oophorectomy associated with 4-fold AD odds (OR<sup><xref ref-type="table-fn" rid="table2fn29">ac</xref></sup> 4.12) and HT<sup><xref ref-type="table-fn" rid="table2fn30">ad</xref></sup> use protective</td><td align="left" valign="top">Low case numbers in subgroups; self-reported HT use and healthy volunteer bias</td></tr><tr><td align="left" valign="top">Yi et al [<xref ref-type="bibr" rid="ref85">85</xref>], 2025</td><td align="left" valign="top">UK Biobank, ADNI, PPMI, and IXI<sup><xref ref-type="table-fn" rid="table2fn31">ae</xref></sup></td><td align="left" valign="top">3D-ViT<sup><xref ref-type="table-fn" rid="table2fn32">af</xref></sup></td><td align="left" valign="top">BAG<sup><xref ref-type="table-fn" rid="table2fn33">ag</xref></sup> estimation and GWAS<sup><xref ref-type="table-fn" rid="table2fn34">ah</xref></sup></td><td align="left" valign="top">T1-weighted MRI, genetics (SNP<sup><xref ref-type="table-fn" rid="table2fn35">ai</xref></sup> and xQTL<sup><xref ref-type="table-fn" rid="table2fn36">aj</xref></sup>)</td><td align="left" valign="top">BAG and drug target prioritization</td><td align="left" valign="top">External: ADNI, PPMI, and IXI</td><td align="left" valign="top">MAE<sup><xref ref-type="table-fn" rid="table2fn37">ak</xref></sup> &#x2248; 2.6 and identified 7 high-confidence drug targets (eg, MAPT<sup><xref ref-type="table-fn" rid="table2fn38">al</xref></sup> and TNFSF12<sup><xref ref-type="table-fn" rid="table2fn39">am</xref></sup>)</td><td align="left" valign="top">European-ancestry bias; lack of biological &#x201C;ground truth&#x201D; for brain age</td></tr><tr><td align="left" valign="top">Yousefzadeh et al [<xref ref-type="bibr" rid="ref86">86</xref>], 2024</td><td align="left" valign="top">UK Biobank (retina cohort)</td><td align="left" valign="top">VGG-16<sup><xref ref-type="table-fn" rid="table2fn40">an</xref></sup> classifier + LAVA<sup><xref ref-type="table-fn" rid="table2fn41">ao</xref></sup> (XAI<sup><xref ref-type="table-fn" rid="table2fn42">ap</xref></sup>)</td><td align="left" valign="top">Binary classification and explainability</td><td align="left" valign="top">Retinal fundus images</td><td align="left" valign="top">AD vs NC classification and neuron-level explanations</td><td align="left" valign="top">Internal: nested 5-fold cross-validation</td><td align="left" valign="top">Accuracy 71.4% and identified 7 latent clusters linking vascular and cognitive decline</td><td align="left" valign="top">Small AD sample size (n=100), cross-sectional design, and UK Biobank volunteer bias</td></tr><tr><td align="left" valign="top">Gong et al [<xref ref-type="bibr" rid="ref87">87</xref>], 2023</td><td align="left" valign="top">UK Biobank</td><td align="left" valign="top">SuperBigFLICA (semisupervised Bayesian fusion)</td><td align="left" valign="top">Phenotype discovery</td><td align="left" valign="top">Multimodal MRI (47 modalities)</td><td align="left" valign="top">Latent components predictive of nonimaging phenotypes</td><td align="left" valign="top">Internal: train, validation, or test split</td><td align="left" valign="top">Up to 46% improvement over expert IDPs<sup><xref ref-type="table-fn" rid="table2fn43">aq</xref></sup> and interpretable multimodal modes</td><td align="left" valign="top">Linear modeling constraints and UK Biobank population bias</td></tr><tr><td align="left" valign="top">Lian et al [<xref ref-type="bibr" rid="ref88">88</xref>], 2022</td><td align="left" valign="top">ADNI-1, ADNI-2, and AIBL</td><td align="left" valign="top">Attention-guided HybNet (3D FCN<sup><xref ref-type="table-fn" rid="table2fn44">ar</xref></sup> + hybrid network)</td><td align="left" valign="top">Diagnosis and prognosis</td><td align="left" valign="top">Structural T1 MRI</td><td align="left" valign="top">AD vs NC classification; pMCI<sup><xref ref-type="table-fn" rid="table2fn45">as</xref></sup> vs sMCI<sup><xref ref-type="table-fn" rid="table2fn46">at</xref></sup> prediction</td><td align="left" valign="top">External: trained ADNI-1, and validated ADNI-2 and AIBL</td><td align="left" valign="top">ADNI-2 AD vs NC accuracy 0.919 (AUC 0.965) and outperformed ROI/VBM<sup><xref ref-type="table-fn" rid="table2fn47">au</xref></sup> methods</td><td align="left" valign="top">Heavy preprocessing reliance, limited demographic diversity, and potential overfitting</td></tr><tr><td align="left" valign="top">Lian et al [<xref ref-type="bibr" rid="ref89">89</xref>], 2022</td><td align="left" valign="top">ADNI-1 and ADNI-2</td><td align="left" valign="top">MWAN<sup><xref ref-type="table-fn" rid="table2fn48">av</xref></sup></td><td align="left" valign="top">Joint regression of clinical scores</td><td align="left" valign="top">Structural T1 MRI and clinical scores</td><td align="left" valign="top">MMSE<sup><xref ref-type="table-fn" rid="table2fn49">aw</xref></sup>, CDRSB<sup><xref ref-type="table-fn" rid="table2fn50">ax</xref></sup>, and ADAS-Cog<sup><xref ref-type="table-fn" rid="table2fn51">ay</xref></sup> prediction</td><td align="left" valign="top">Cross-validation: across ADNI-1 and ADNI-2</td><td align="left" valign="top">Lower RMSE<sup><xref ref-type="table-fn" rid="table2fn52">az</xref></sup> and higher correlation coefficients than single-task baselines</td><td align="left" valign="top">Restricted to the ADNI cohorts and potential overfitting to the modest sample size</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref90">90</xref>], 2019</td><td align="left" valign="top">ADNI-1, ADNI-GO/2<sup><xref ref-type="table-fn" rid="table2fn53">ba</xref></sup>, and AIBL</td><td align="left" valign="top">3D CNN<sup><xref ref-type="table-fn" rid="table2fn54">bb</xref></sup> + Cox proportional hazards</td><td align="left" valign="top">Time-to-event prognosis</td><td align="left" valign="top">Hippocampal MRI patches and clinical variables</td><td align="left" valign="top">Progression from MCI to AD and risk stratification</td><td align="left" valign="top">External: trained ADNI-1, and validated ADNI-GO/2 and AIBL</td><td align="left" valign="top">C-index 0.864 (combined model) and significant risk-based stratification of MCI</td><td align="left" valign="top">Focus on the hippocampus only and potential cohort and scanner bias (1.5T vs 3T)</td></tr><tr><td align="left" valign="top">Qiu et al [<xref ref-type="bibr" rid="ref15">15</xref>], 2022</td><td align="left" valign="top">NACC, ADNI, and ADCP<sup><xref ref-type="table-fn" rid="table2fn55">bc</xref></sup></td><td align="left" valign="top">Multimodal deep learning (3D CNN + FCN)</td><td align="left" valign="top">Multiclass classification</td><td align="left" valign="top">Structural MRI, demographics, and neuropsychology</td><td align="left" valign="top">Diagnosis (NC, MCI, AD, and nADD<sup><xref ref-type="table-fn" rid="table2fn56">bd</xref></sup>) and saliency maps</td><td align="left" valign="top">External: trained NACC, and validated ADNI and independent cohorts</td><td align="left" valign="top">Performance comparable to neurologists and saliency aligned with pathology</td><td align="left" valign="top">Retrospective design and heterogeneity in protocols across cohorts</td></tr><tr><td align="left" valign="top">Oh et al [<xref ref-type="bibr" rid="ref91">91</xref>], 2023</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">LEAR<sup><xref ref-type="table-fn" rid="table2fn57">be</xref></sup> framework (CNN + RL<sup><xref ref-type="table-fn" rid="table2fn58">bf</xref></sup> + XAI)</td><td align="left" valign="top">Diagnosis and interpretation</td><td align="left" valign="top">Structural T1 MRI</td><td align="left" valign="top">AD vs non-AD classification and counterfactual maps</td><td align="left" valign="top">Internal: cross-validation on ADNI</td><td align="left" valign="top">Improved accuracy and generalization, and localized plausible atrophy patterns</td><td align="left" valign="top">Single-cohort (ADNI) and XAI evaluation, partly qualitative</td></tr><tr><td align="left" valign="top">Lian et al [<xref ref-type="bibr" rid="ref92">92</xref>], 2020</td><td align="left" valign="top">ADNI-1 and ADNI-2</td><td align="left" valign="top">Hierarchical FCN</td><td align="left" valign="top">Diagnosis and atrophy localization</td><td align="left" valign="top">Structural T1 MRI</td><td align="left" valign="top">AD vs NC, MCI vs NC, and atrophy pattern mapping</td><td align="left" valign="top">External: trained ADNI-1 and tested ADNI-2</td><td align="left" valign="top">Improved accuracy vs conventional features and interpretable atrophy maps</td><td align="left" valign="top">ADNI-only; strong reliance on preprocessing and registration</td></tr><tr><td align="left" valign="top">Avsec et al [<xref ref-type="bibr" rid="ref93">93</xref>], 2021</td><td align="left" valign="top">Genomic reference datasets</td><td align="left" valign="top">Enformer (transformer)</td><td align="left" valign="top">Genomic prediction</td><td align="left" valign="top">DNA sequence</td><td align="left" valign="top">Gene expression and chromatin state prediction</td><td align="left" valign="top">Internal: held-out chromosomes</td><td align="left" valign="top">Improved capture of long-range regulatory effects vs previous models</td><td align="left" valign="top">Limited to available cell types and assays</td></tr><tr><td align="left" valign="top">Yang et al [<xref ref-type="bibr" rid="ref94">94</xref>], 2021</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Deep learning and super learner</td><td align="left" valign="top">Prognosis</td><td align="left" valign="top">MRI, cognitive, and biomarkers</td><td align="left" valign="top">Diagnostic classification and prognostic risk signature</td><td align="left" valign="top">Internal: cross-validation within ADNI</td><td align="left" valign="top">Derived signature distinguished diagnostic groups and progression risk</td><td align="left" valign="top">Limited external validation and restricted to the ADNI research cohort</td></tr><tr><td align="left" valign="top">Lee et al [<xref ref-type="bibr" rid="ref95">95</xref>], 2024</td><td align="left" valign="top">ADNI and UK or Singapore Clinics</td><td align="left" valign="top">PPM<sup><xref ref-type="table-fn" rid="table2fn59">bg</xref></sup></td><td align="left" valign="top">Prognosis (MCI to AD)</td><td align="left" valign="top">MRI (gray matter) and cognitive tests</td><td align="left" valign="top">Individualized prognostic index</td><td align="left" valign="top">External: independent real-world memory clinics</td><td align="left" valign="top">Accuracy &#x2248;81.7%, AUC &#x2248;0.84; and index predicted conversion better than atrophy alone</td><td align="left" valign="top">Heterogeneity in real-world clinical data and potential site effects</td></tr><tr><td align="left" valign="top">Zhu et al [<xref ref-type="bibr" rid="ref96">96</xref>], 2021</td><td align="left" valign="top">ADNI and AIBL</td><td align="left" valign="top">DA-MIDL<sup><xref ref-type="table-fn" rid="table2fn60">bh</xref></sup></td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Structural MRI patches</td><td align="left" valign="top">AD vs NC and MCI vs NC</td><td align="left" valign="top">External: trained ADNI and tested AIBL</td><td align="left" valign="top">Higher accuracy and generalizability than baselines, and attention maps aligned with pathology</td><td align="left" valign="top">Reliance on structural MRI and potential dataset-specific overfitting</td></tr><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref97">97</xref>], 2024</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">GCN<sup><xref ref-type="table-fn" rid="table2fn61">bi</xref></sup>, SHAP<sup><xref ref-type="table-fn" rid="table2fn62">bj</xref></sup>, and automatic fusion</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Cognitive, MRI, PET, and risk factors</td><td align="left" valign="top">AD vs non-AD diagnosis, and multimodal feature selection</td><td align="left" valign="top">Internal: two ADNI multimodal cohorts</td><td align="left" valign="top">Accuracies of 95.9% and 91.9%, and efficient selection of clinically important features</td><td align="left" valign="top">Complex model deployment and reliance on ADNI data</td></tr><tr><td align="left" valign="top">Velazquez and Lee [<xref ref-type="bibr" rid="ref75">75</xref>], 2022</td><td align="left" valign="top">ADNI EMCI<sup><xref ref-type="table-fn" rid="table2fn63">bk</xref></sup></td><td align="left" valign="top">Ensemble (random forest and CNN)</td><td align="left" valign="top">Prediction of conversion</td><td align="left" valign="top">DTI<sup><xref ref-type="table-fn" rid="table2fn64">bl</xref></sup> (ADC<sup><xref ref-type="table-fn" rid="table2fn65">bm</xref></sup> maps) and EHR<sup><xref ref-type="table-fn" rid="table2fn66">bn</xref></sup></td><td align="left" valign="top">EMCI to AD conversion prediction</td><td align="left" valign="top">Internal: held-out test set</td><td align="left" valign="top">98.81% accuracy and feature importance explainability provided</td><td align="left" valign="top">Small converter sample size and potential overfitting</td></tr><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref76">76</xref>], 2024</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Multimodal learning machine (ELM<sup><xref ref-type="table-fn" rid="table2fn67">bo</xref></sup> ensemble)</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">MRI features and neuropsychological tests</td><td align="left" valign="top">NC, MCI, and AD classification</td><td align="left" valign="top">Internal: cross-validation on ADNI</td><td align="left" valign="top">&#x003E;98% accuracy and <italic>F</italic><sub>1</sub>-score, and no observed bias between MCI and AD</td><td align="left" valign="top">A single research cohort and very high accuracy require external verification</td></tr><tr><td align="left" valign="top">Bi et al [<xref ref-type="bibr" rid="ref98">98</xref>], 2020</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Cluster evolutionary random forest</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Resting-state fMRI<sup><xref ref-type="table-fn" rid="table2fn68">bp</xref></sup> and SNP</td><td align="left" valign="top">AD vs control classification and biomarker identification</td><td align="left" valign="top">Comparative vs competing methods</td><td align="left" valign="top">Identified significant brain region-gene pairs and effective classification</td><td align="left" valign="top">Small multimodal sample size and complex hyperparameters</td></tr><tr><td align="left" valign="top">Bi et al [<xref ref-type="bibr" rid="ref99">99</xref>], 2022</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Weighted evolutionary random forest</td><td align="left" valign="top">Pathogen detection</td><td align="left" valign="top">Resting-state fMRI and SNP</td><td align="left" valign="top">MCI identification and pathogenic factor extraction</td><td align="left" valign="top">Comparative vs state-of-the-art methods</td><td align="left" valign="top">Superior MCI identification performance, and highlighted key ROIs and SNPs</td><td align="left" valign="top">High-dimensional fusion features, small N, and overfitting risk</td></tr><tr><td align="left" valign="top">Hashmi and Barukab [<xref ref-type="bibr" rid="ref100">100</xref>], 2023</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">Deep RL and neural network</td><td align="left" valign="top">Staging</td><td align="left" valign="top">Structural MRI</td><td align="left" valign="top">4-class dementia staging</td><td align="left" valign="top">Internal: augmented vs baseline</td><td align="left" valign="top">RL augmentation improved accuracy by &#x2248;6% and recall by &#x2248;13%</td><td align="left" valign="top">Single open dataset; focus on MRI only</td></tr><tr><td align="left" valign="top">Wang et al [<xref ref-type="bibr" rid="ref101">101</xref>], 2024</td><td align="left" valign="top">ADNI-1/2/3</td><td align="left" valign="top">Multimodal DL<sup><xref ref-type="table-fn" rid="table2fn69">bq</xref></sup> with an interaction layer</td><td align="left" valign="top">Prognosis (MCI to AD)</td><td align="left" valign="top">MRI, clinical, and genetics (SNP)</td><td align="left" valign="top">4-year conversion prediction</td><td align="left" valign="top">External: generalized to ADNI-3</td><td align="left" valign="top">AUC 0.962 (cross-validation), 0.939 (test); interaction effects improved accuracy</td><td align="left" valign="top">ADNI-only and potential overfitting despite cross-validation</td></tr><tr><td align="left" valign="top">Hatami et al [<xref ref-type="bibr" rid="ref102">102</xref>], 2024</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">DNN<sup><xref ref-type="table-fn" rid="table2fn70">br</xref></sup> and RL (data augmentation)</td><td align="left" valign="top">Classification</td><td align="left" valign="top">Structural MRI</td><td align="left" valign="top">AD vs NC classification</td><td align="left" valign="top">Comparative vs baseline augmentation approaches</td><td align="left" valign="top">Precision &#x2248;0.95; RL-guided augmentation outperformed baselines</td><td align="left" valign="top">Single research cohort and no external clinical validation</td></tr><tr><td align="left" valign="top">Tabarestani et al [<xref ref-type="bibr" rid="ref103">103</xref>], 2020</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Distributed multitask regression</td><td align="left" valign="top">Longitudinal progression</td><td align="left" valign="top">MRI, PET, CSF<sup><xref ref-type="table-fn" rid="table2fn71">bs</xref></sup>, EEG<sup><xref ref-type="table-fn" rid="table2fn72">bt</xref></sup>, and clinical</td><td align="left" valign="top">Prediction of longitudinal cognitive scores</td><td align="left" valign="top">Comparative vs unimodal or multimodal methods</td><td align="left" valign="top">Reduced errors, particularly in sparse or incomplete longitudinal data</td><td align="left" valign="top">Model complexity and potential sensitivity to hyperparameters</td></tr><tr><td align="left" valign="top">Burkhart et al [<xref ref-type="bibr" rid="ref104">104</xref>], 2024</td><td align="left" valign="top">ADNI and Singapore Memory Clinic</td><td align="left" valign="top">Unsupervised multimodal trajectory modeling</td><td align="left" valign="top">Prognosis</td><td align="left" valign="top">Cognitive, amyloid PET, and MRI</td><td align="left" valign="top">Cognitive health clustering and progression prediction</td><td align="left" valign="top">External: real-world memory clinic data</td><td align="left" valign="top">Better stratification than standard clinical assessments and robust to missing data</td><td align="left" valign="top">Unsupervised complexity and reliance on ADNI for training</td></tr><tr><td align="left" valign="top">El-Sappagh et al [<xref ref-type="bibr" rid="ref105">105</xref>], 2021</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Random forest and SHAP (multilayer)</td><td align="left" valign="top">Diagnosis and progression</td><td align="left" valign="top">11 modalities (MRI, PET, CSF, and clinical)</td><td align="left" valign="top">Multiclass diagnosis and MCI progression detection</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Diagnosis accuracy 93.95%, progression accuracy 87.08%, and interpretable</td><td align="left" valign="top">High complexity and challenges for routine care deployment</td></tr><tr><td align="left" valign="top">Lee et al [<xref ref-type="bibr" rid="ref106">106</xref>], 2024</td><td align="left" valign="top">ADNI and 4 Korean hospitals</td><td align="left" valign="top">GBM<sup><xref ref-type="table-fn" rid="table2fn73">bu</xref></sup></td><td align="left" valign="top">Conversion prediction</td><td align="left" valign="top">MRI (T1, T2-FLAIR<sup><xref ref-type="table-fn" rid="table2fn74">bv</xref></sup>), amyloid PET, and clinical</td><td align="left" valign="top">MCI to AD conversion (4-year)</td><td align="left" valign="top">Internal: nested cross-validation with modality combinations</td><td align="left" valign="top">T1 and amyloid PET is the best combination, and T2-FLAIR did not improve prediction</td><td align="left" valign="top">Small multicenter sample and site and scanner heterogeneity</td></tr><tr><td align="left" valign="top">Yuan et al [<xref ref-type="bibr" rid="ref107">107</xref>], 2021</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">Multimodal cotraining (random forest)</td><td align="left" valign="top">MCI subtype classification</td><td align="left" valign="top">Structural MRI and SNP</td><td align="left" valign="top">sMCI vs pMCI classification</td><td align="left" valign="top">External: ADNI-2 independent test set</td><td align="left" valign="top">Accuracy 85.5% and cotraining outperformed single modality</td><td align="left" valign="top">Dependence on feature selection and ADNI-only</td></tr><tr><td align="left" valign="top">Cirincione et al [<xref ref-type="bibr" rid="ref108">108</xref>], 2024</td><td align="left" valign="top">TADPOLE<sup><xref ref-type="table-fn" rid="table2fn75">bw</xref></sup> (ADNI)</td><td align="left" valign="top">Ensemble integration</td><td align="left" valign="top">Prediction</td><td align="left" valign="top">MRI, PET, clinical, and cognitive</td><td align="left" valign="top">Future dementia prediction in MCI</td><td align="left" valign="top">Internal: held-out test set</td><td align="left" valign="top">AUC 0.81, and outperformed XGBoost and deep learning baselines</td><td align="left" valign="top">Single research dataset and the complexity of multimodal integration</td></tr><tr><td align="left" valign="top">Cassani and Falk [<xref ref-type="bibr" rid="ref109">109</xref>], 2020</td><td align="left" valign="top">Clinical EEG</td><td align="left" valign="top">Feature engineering and ML</td><td align="left" valign="top">Diagnosis and severity</td><td align="left" valign="top">Resting-state EEG</td><td align="left" valign="top">AD vs normal, and mild vs moderate AD classification</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Modulation spectral features outperformed traditional EEG features</td><td align="left" valign="top">Small sample size, resting state only, and single center</td></tr><tr><td align="left" valign="top">Cilia et al [<xref ref-type="bibr" rid="ref110">110</xref>], 2021</td><td align="left" valign="top">Custom (Naples)</td><td align="left" valign="top">Deep transfer learning (CNN)</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Online handwriting (dynamic)</td><td align="left" valign="top">Early AD detection</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Dynamic features (color-encoded) are superior to shape-only images</td><td align="left" valign="top">Single-center dataset and task-specific protocol</td></tr><tr><td align="left" valign="top">Kmetzsch et al [<xref ref-type="bibr" rid="ref111">111</xref>], 2022</td><td align="left" valign="top">PREV-DEMALS<sup><xref ref-type="table-fn" rid="table2fn76">bx</xref></sup></td><td align="left" valign="top">Supervised variational autoencoder</td><td align="left" valign="top">Disease progression modeling</td><td align="left" valign="top">MRI and microRNA</td><td align="left" valign="top">Disease progression score (FTD<sup><xref ref-type="table-fn" rid="table2fn77">by</xref></sup>/ALS<sup><xref ref-type="table-fn" rid="table2fn78">bz</xref></sup>)</td><td align="left" valign="top">Validation: synthetic data and cohort evaluation</td><td align="left" valign="top">Outperformed competing models in capturing progression trajectory</td><td align="left" valign="top">Small sample (rare disease) and cross-sectional data used for progression</td></tr><tr><td align="left" valign="top">Mengoudi et al [<xref ref-type="bibr" rid="ref112">112</xref>], 2020</td><td align="left" valign="top">UCL<sup><xref ref-type="table-fn" rid="table2fn79">ca</xref></sup> and Insight 46</td><td align="left" valign="top">Self-supervised deep neural network</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Eye-tracking (gaze or pupil)</td><td align="left" valign="top">Dementia vs control classification</td><td align="left" valign="top">Comparative vs handcrafted features</td><td align="left" valign="top">Self-supervised features are more sensitive than handcrafted metrics</td><td align="left" valign="top">Modest sample size, mixed dementia subtypes, and specialized hardware</td></tr><tr><td align="left" valign="top">Tsai et al [<xref ref-type="bibr" rid="ref113">113</xref>], 2024</td><td align="left" valign="top">Taiwan NHI<sup><xref ref-type="table-fn" rid="table2fn80">cb</xref></sup></td><td align="left" valign="top">MAND<sup><xref ref-type="table-fn" rid="table2fn81">cc</xref></sup></td><td align="left" valign="top">Incidence prediction</td><td align="left" valign="top">EHR (ICD codes) and demographics</td><td align="left" valign="top">Dementia incidence risk</td><td align="left" valign="top">Internal: held-out test set</td><td align="left" valign="top">AUC 0.901 and outperformed traditional CTR<sup><xref ref-type="table-fn" rid="table2fn82">cd</xref></sup> models</td><td align="left" valign="top">Coding errors in administrative data are specific to the Taiwan NHI</td></tr><tr><td align="left" valign="top">Park et al [<xref ref-type="bibr" rid="ref22">22</xref>], 2024</td><td align="left" valign="top">Korean memory clinics</td><td align="left" valign="top">SVM<sup><xref ref-type="table-fn" rid="table2fn83">ce</xref></sup></td><td align="left" valign="top">Diagnosis (MCI vs HC<sup><xref ref-type="table-fn" rid="table2fn84">cf</xref></sup>)</td><td align="left" valign="top">VR<sup><xref ref-type="table-fn" rid="table2fn85">cg</xref></sup> biomarkers, MRI, and neuropsychological tests</td><td align="left" valign="top">MCI vs healthy control classification</td><td align="left" valign="top">Internal: train or test split</td><td align="left" valign="top">VR, MRI AUC 0.89, and VR biomarkers comparable to MRI alone</td><td align="left" valign="top">Small sample (n=54) and VR hardware requirement</td></tr><tr><td align="left" valign="top">Wu et al [<xref ref-type="bibr" rid="ref114">114</xref>], 2022</td><td align="left" valign="top">Clinical EEG</td><td align="left" valign="top">WiGMM<sup><xref ref-type="table-fn" rid="table2fn86">ch</xref></sup></td><td align="left" valign="top">Severity detection</td><td align="left" valign="top">Resting-state EEG</td><td align="left" valign="top">Unsupervised dementia degree detection</td><td align="left" valign="top">Internal: latent structure analysis</td><td align="left" valign="top">Captured latent dementia degrees matching clinical status</td><td align="left" valign="top">Unsupervised labeling requires careful interpretation</td></tr><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref115">115</xref>], 2025</td><td align="left" valign="top">Chinese memory clinics</td><td align="left" valign="top">FCRN<sup><xref ref-type="table-fn" rid="table2fn87">ci</xref></sup> and MLP<sup><xref ref-type="table-fn" rid="table2fn88">cj</xref></sup> (patch-based)</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">MRI, PET, clinical, and genotype</td><td align="left" valign="top">AD vs normal and MCI vs normal classification</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Accuracy &#x2248;96% (AD), &#x2248;92% (MCI), and interpretable probability maps</td><td align="left" valign="top">Single-country clinical cohorts and limited ethnic diversity</td></tr><tr><td align="left" valign="top">Fabietti et al [<xref ref-type="bibr" rid="ref77">77</xref>], 2023</td><td align="left" valign="top">Mouse models</td><td align="left" valign="top">Ensemble machine learning</td><td align="left" valign="top">Early detection (animal)</td><td align="left" valign="top">LFP<sup><xref ref-type="table-fn" rid="table2fn89">ck</xref></sup></td><td align="left" valign="top">AD vs control mouse classification</td><td align="left" valign="top">Internal: channel masking robustness tests</td><td align="left" valign="top">Accuracy 99.4% and robust to artifacts</td><td align="left" valign="top">Preclinical animal model results and small sample size</td></tr><tr><td align="left" valign="top">Seifallahi et al [<xref ref-type="bibr" rid="ref78">78</xref>], 2022</td><td align="left" valign="top">Single center</td><td align="left" valign="top">SVM</td><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">Kinect V2 (gait or TUG<sup><xref ref-type="table-fn" rid="table2fn90">cl</xref></sup>)</td><td align="left" valign="top">AD vs healthy control classification</td><td align="left" valign="top">Internal: leave-one-out cross-validation</td><td align="left" valign="top">Accuracy 98.68% using 12 skeletal features</td><td align="left" valign="top">Small sample, case-control design may overestimate performance</td></tr><tr><td align="left" valign="top">Fan et al [<xref ref-type="bibr" rid="ref116">116</xref>], 2024</td><td align="left" valign="top">CVD<sup><xref ref-type="table-fn" rid="table2fn91">cm</xref></sup> patients (Wuhan)</td><td align="left" valign="top">ViT<sup><xref ref-type="table-fn" rid="table2fn92">cn</xref></sup> (MRI) and XGBoost (clinical)</td><td align="left" valign="top">VCI<sup><xref ref-type="table-fn" rid="table2fn93">co</xref></sup> diagnosis</td><td align="left" valign="top">MRI (T1, T2-FLAIR) and clinical</td><td align="left" valign="top">Vascular cognitive impairment diagnosis</td><td align="left" valign="top">External: independent CVD dataset</td><td align="left" valign="top">The hybrid model has an AUC of 0.965 and is comparable to expert neurologists</td><td align="left" valign="top">CVD-specific cohort and complex ViT and XGBoost pipeline</td></tr><tr><td align="left" valign="top">Beebe-Wang et al [<xref ref-type="bibr" rid="ref117">117</xref>], 2021</td><td align="left" valign="top">Aging cohort (US)</td><td align="left" valign="top">Nonlinear ML and SHAP</td><td align="left" valign="top">Imminent prediction (3 years)</td><td align="left" valign="top">Clinical, neuropsychological</td><td align="left" valign="top">Incident dementia within 3 years</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Sparse model (4 tests) comparable to full battery</td><td align="left" valign="top">Prediction limited to a 3-year horizon and a single health system</td></tr><tr><td align="left" valign="top">Battineni et al [<xref ref-type="bibr" rid="ref118">118</xref>], 2021</td><td align="left" valign="top">Public MRI dataset</td><td align="left" valign="top">Gradient boosting</td><td align="left" valign="top">Classification</td><td align="left" valign="top">MRI features and demographics</td><td align="left" valign="top">AD vs non-AD classification</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Accuracy 97.58% (gradient boosting performed best)</td><td align="left" valign="top">Small public dataset and lack of external validation</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>NACC: National Alzheimer&#x2019;s Coordinating Centre.</p></fn><fn id="table2fn2"><p><sup>b</sup>ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative.</p></fn><fn id="table2fn3"><p><sup>c</sup>AIBL: Australian Imaging, Biomarkers, and Lifestyle Study.</p></fn><fn id="table2fn4"><p><sup>d</sup>FHS: Framingham Heart Study.</p></fn><fn id="table2fn5"><p><sup>e</sup>PPMI: Parkinson Progression Markers Initiative.</p></fn><fn id="table2fn6"><p><sup>f</sup>OASIS: Open Access Series of Imaging Studies.</p></fn><fn id="table2fn7"><p><sup>g</sup>MRI: magnetic resonance imaging.</p></fn><fn id="table2fn8"><p><sup>h</sup>FLAIR: fluid-attenuated inversion recovery. </p></fn><fn id="table2fn9"><p><sup>i</sup>PET: positron emission tomography.</p></fn><fn id="table2fn10"><p><sup>j</sup>AD: Alzheimer disease.</p></fn><fn id="table2fn11"><p><sup>k</sup>MCI: mild cognitive impairment.</p></fn><fn id="table2fn12"><p><sup>l</sup>NC: normal control.</p></fn><fn id="table2fn13"><p><sup>m</sup>AUROC: area under the receiver operating characteristic curve.</p></fn><fn id="table2fn14"><p><sup>n</sup>MM-SPDN: multimodal stacked deep polynomial network.</p></fn><fn id="table2fn15"><p><sup>o</sup>FDG-PET: fluorodeoxyglucose-positron emission tomography.</p></fn><fn id="table2fn16"><p><sup>p</sup>DPN: deep polynomial network.</p></fn><fn id="table2fn17"><p><sup>q</sup>SPDN: stacked deep polynomial network.</p></fn><fn id="table2fn18"><p><sup>r</sup>ROI: region of interest.</p></fn><fn id="table2fn19"><p><sup>s</sup>XGBoost: Extreme Gradient Boosting. </p></fn><fn id="table2fn20"><p><sup>t</sup>APOE-&#x03B5;4: apolipoprotein E epsilon 4 allele.</p></fn><fn id="table2fn21"><p><sup>u</sup><italic>ICD-10</italic>: <italic>International Statistical Classification of Diseases, Tenth Revision.</italic></p></fn><fn id="table2fn22"><p><sup>v</sup>LightGBM: Light Gradient-Boosting Machine.</p></fn><fn id="table2fn23"><p><sup>w</sup>ASCVD: atherosclerotic cardiovascular disease.</p></fn><fn id="table2fn24"><p><sup>x</sup>VD: vascular dementia.</p></fn><fn id="table2fn25"><p><sup>y</sup>AUC: area under the curve.</p></fn><fn id="table2fn26"><p><sup>z</sup>CAIDE: Cardiovascular Risk Factors, Aging, and Incidence of Dementia.</p></fn><fn id="table2fn27"><p><sup>aa</sup>DRS: Dementia Risk Score.</p></fn><fn id="table2fn28"><p><sup>ab</sup><italic>ICD</italic>: <italic>International Classification of Diseases.</italic></p></fn><fn id="table2fn29"><p><sup>ac</sup>OR: odds ratio.</p></fn><fn id="table2fn30"><p><sup>ad</sup>HT: hormone therapy.</p></fn><fn id="table2fn31"><p><sup>ae</sup>IXI: Information Extraction From Images.</p></fn><fn id="table2fn32"><p><sup>af</sup>3D-ViT: 3D vision transformer.</p></fn><fn id="table2fn33"><p><sup>ag</sup>BAG: brain age gap.</p></fn><fn id="table2fn34"><p><sup>ah</sup>GWAS: genome-wide association study.</p></fn><fn id="table2fn35"><p><sup>ai</sup>SNP: single-nucleotide polymorphism.</p></fn><fn id="table2fn36"><p><sup>aj</sup>xQTL: molecular quantitative trait locus.</p></fn><fn id="table2fn37"><p><sup>ak</sup>MAE: mean absolute error.</p></fn><fn id="table2fn38"><p><sup>al</sup>MAPT: microtubule-associated protein tau.</p></fn><fn id="table2fn39"><p><sup>am</sup>TNFSF12: Tumor Necrosis Factor (Ligand) Superfamily, Member 12.</p></fn><fn id="table2fn40"><p><sup>an</sup>VGG-16: Visual Geometry Group 16-Layer Network.</p></fn><fn id="table2fn41"><p><sup>ao</sup>LAVA: Granular Neuron-Level Explainer.</p></fn><fn id="table2fn42"><p><sup>ap</sup>XAI: explainable artificial intelligence.</p></fn><fn id="table2fn43"><p><sup>aq</sup>IDP: imaging-derived phenotype.</p></fn><fn id="table2fn44"><p><sup>ar</sup>FCN: fully convolutional network.</p></fn><fn id="table2fn45"><p><sup>as</sup>pMCI: progressive mild cognitive impairment.</p></fn><fn id="table2fn46"><p><sup>at</sup>sMCI: stable mild cognitive impairment.</p></fn><fn id="table2fn47"><p><sup>au</sup>VBM: voxel-based morphometry.</p></fn><fn id="table2fn48"><p><sup>av</sup>MWAN: multi-task weakly-supervised attention.</p></fn><fn id="table2fn49"><p><sup>aw</sup>MMSE: Mini-Mental State Examination. </p></fn><fn id="table2fn50"><p><sup>ax</sup>CDRSB: Clinical Dementia Rating&#x2013;Sum of Boxes.</p></fn><fn id="table2fn51"><p><sup>ay</sup>ADAS-Cog: Alzheimer Disease Assessment Scale&#x2013;Cognitive Subscale.</p></fn><fn id="table2fn52"><p><sup>az</sup>RMSE: root mean square error.</p></fn><fn id="table2fn53"><p><sup>ba</sup>ADNI-GO/2: Alzheimer&#x2019;s Disease Neuroimaging Initiative &#x2013; Grand Opportunity / Phase 2.</p></fn><fn id="table2fn54"><p><sup>bb</sup>CNN: convolutional neural network.</p></fn><fn id="table2fn55"><p><sup>bc</sup>ADPC: Alzheimer Disease Prediction Challenge.</p></fn><fn id="table2fn56"><p><sup>bd</sup>nADD: non-Alzheimer disease dementia.</p></fn><fn id="table2fn57"><p><sup>be</sup>LEAR: learn-explain-reinforce.</p></fn><fn id="table2fn58"><p><sup>bf</sup>RL: reinforcement learning.</p></fn><fn id="table2fn59"><p><sup>bg</sup>PPM: Predictive Prognostic Model.</p></fn><fn id="table2fn60"><p><sup>bh</sup>DA-MIDL: dual attention multi-instance deep learning.</p></fn><fn id="table2fn61"><p><sup>bi</sup>GCN: graph convolutional network.</p></fn><fn id="table2fn62"><p><sup>bj</sup>SHAP: Shapley Additive Explanations.</p></fn><fn id="table2fn63"><p><sup>bk</sup>EMCI: early mild cognitive impairment.</p></fn><fn id="table2fn64"><p><sup>bl</sup>DTI: diffusion tensor imaging.</p></fn><fn id="table2fn65"><p><sup>bm</sup>ADC: apparent diffusion coefficient.</p></fn><fn id="table2fn66"><p><sup>bn</sup>EHR: electronic health record.</p></fn><fn id="table2fn67"><p><sup>bo</sup>ELM: extreme learning machine.</p></fn><fn id="table2fn68"><p><sup>bp</sup>fMRI: functional magnetic resonance imaging.</p></fn><fn id="table2fn69"><p><sup>bq</sup>DL: deep learning.</p></fn><fn id="table2fn70"><p><sup>br</sup>DNN: deep neural network.</p></fn><fn id="table2fn71"><p><sup>bs</sup>CSF: cerebrospinal fluid.</p></fn><fn id="table2fn72"><p><sup>bt</sup>EEG: electroencephalography.</p></fn><fn id="table2fn73"><p><sup>bu</sup>GBM: Gradient Boosting Machine.</p></fn><fn id="table2fn74"><p><sup>bv</sup>T2-FLAIR: T2-weighted fluid-attenuated inversion recovery.</p></fn><fn id="table2fn75"><p><sup>bw</sup>TADPOLE: The Alzheimer Disease Prediction of Longitudinal Evolution.</p></fn><fn id="table2fn76"><p><sup>bx</sup>PREV-DEMALS: Predict to Prevent Frontotemporal Lobar Degeneration and Amyotrophic Lateral Sclerosis.</p></fn><fn id="table2fn77"><p><sup>by</sup>FTD: frontotemporal dementia.</p></fn><fn id="table2fn78"><p><sup>bz</sup>ALS: amyotrophic lateral sclerosis.</p></fn><fn id="table2fn79"><p><sup>ca</sup>UCL: University College London.</p></fn><fn id="table2fn80"><p><sup>cb</sup>NHI: National Health Insurance.</p></fn><fn id="table2fn81"><p><sup>cc</sup>MAND: Multimodal Attention Network.</p></fn><fn id="table2fn82"><p><sup>cd</sup>CTR: clinical trial registration.</p></fn><fn id="table2fn83"><p><sup>ce</sup>SVM: support vector machine.</p></fn><fn id="table2fn84"><p><sup>cf</sup>HC: healthy control.</p></fn><fn id="table2fn85"><p><sup>cg</sup>VR: virtual reality.</p></fn><fn id="table2fn86"><p><sup>ch</sup>WiGMM: Warped Infinite Gaussian Mixture.</p></fn><fn id="table2fn87"><p><sup>ci</sup>FCRN: fully convolutional residual network. </p></fn><fn id="table2fn88"><p><sup>cj</sup>MLP: multilayer perceptron.</p></fn><fn id="table2fn89"><p><sup>ck</sup>LFP: local field potentials.</p></fn><fn id="table2fn90"><p><sup>cl</sup>TUG: Timed Up and Go.</p></fn><fn id="table2fn91"><p><sup>cm</sup>CVD: cardiovascular disease.</p></fn><fn id="table2fn92"><p><sup>cn</sup>ViT: vision transformer.</p></fn><fn id="table2fn93"><p><sup>co</sup>VCI: vascular cognitive impairment.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-5-3"><title>UK Biobank Dataset</title><p>UK Biobank enables population-level association studies and early-risk modeling. It has been widely used in AD diagnosis research, including the following notable studies:</p><p>Recent UK Biobank&#x2013;based studies have applied diverse multimodal ML and deep learning approaches for AD risk prediction and diagnosis, integrating neuroimaging, genetic, clinical, and lifestyle data. These models generally achieved moderate to high performance (area under the curve [AUC] &#x2248;0.77&#x2010;0.90) and demonstrated improved diagnostic utility compared with conventional assessment methods [<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref83">83</xref>]. Several studies further emphasized the importance of genetic and hormonal factors in risk stratification [<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]. In addition, explainable and semisupervised frameworks have enhanced model interpretability and scalability for population-level analysis, facilitating clinically relevant phenotyping and disease monitoring [<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>].</p><p>This section describes multimodal model implementation in the UK Biobank. As shown in the analysis and <xref ref-type="table" rid="table2">Table 2</xref>, UK Biobank data support AD diagnosis and risk prediction, but limitations remain: class imbalance, which may bias training, and the need for external validation to confirm generalizability beyond the UK Biobank cohort.</p></sec><sec id="s3-5-4"><title>ADNI Dataset</title><p>ADNI provides a rich and diverse collection of demographic information, multimodal data, and clinical assessments. Owing to its comprehensive scope and longitudinal design, it has become one of the most widely adopted benchmark datasets for computer-aided diagnosis of AD. The following studies exemplify its use:</p><p>Recent ADNI-based studies have developed a wide range of multimodal and deep learning frameworks integrating neuroimaging, genetic, cognitive, and clinical data for AD diagnosis and MCI-to-AD progression prediction. Attention-based, multitask, ensemble, and time-to-event models have enabled accurate localization of disease-related regions, improved prognostic modeling, and enhanced interpretability through explainable artificial intelligence techniques such as SHAP (Shapley Additive Explanations) and counterfactual analysis [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref88">88</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref97">97</xref>]. Several approaches further incorporated RL, semisupervised learning, and data augmentation to improve robustness and generalizability in heterogeneous and imbalanced datasets [<xref ref-type="bibr" rid="ref98">98</xref>-<xref ref-type="bibr" rid="ref103">103</xref>,<xref ref-type="bibr" rid="ref107">107</xref>]. These models typically achieved high diagnostic and prognostic performance (AUC up to &#x2248;0.96), with some demonstrating strong external validation and clinical relevance [<xref ref-type="bibr" rid="ref93">93</xref>-<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref101">101</xref>,<xref ref-type="bibr" rid="ref104">104</xref>-<xref ref-type="bibr" rid="ref106">106</xref>,<xref ref-type="bibr" rid="ref108">108</xref>]. Nevertheless, existing reviews and benchmarking studies have highlighted persistent limitations, including dataset bias, inconsistent evaluation protocols, and limited cross-center validation, underscoring the need for standardized and reproducible multimodal frameworks [<xref ref-type="bibr" rid="ref119">119</xref>].</p><p>While ADNI provides a comprehensive and standardized multimodal resource for AD research and supports robust model performance, several limitations remain. These include class imbalance, underrepresentation of racially diverse populations, and limited external validation, which may bias model training and restrict generalizability across clinical settings.</p></sec><sec id="s3-5-5"><title>Self-Collected Datasets</title><p>While public datasets such as ADNI provide standardized benchmarks, self-collected datasets enable more flexible acquisition of targeted modalities. Representative studies include the following.</p><p>Studies based on self-collected datasets have explored diverse multimodal fusion strategies. EEG- and local field potentials&#x2013;based models, as well as hybrid MRI&#x2013;PET&#x2013;biomarker frameworks, demonstrated high diagnostic and staging accuracy and supported interpretable risk mapping [<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref109">109</xref>,<xref ref-type="bibr" rid="ref111">111</xref>,<xref ref-type="bibr" rid="ref114">114</xref>,<xref ref-type="bibr" rid="ref115">115</xref>]. In parallel, behavioral and digital biomarkers derived from handwriting, eye tracking, virtual reality, and motion capture have enabled noninvasive and low-cost screening with strong classification performance [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref110">110</xref>,<xref ref-type="bibr" rid="ref112">112</xref>]. Large-scale real-world health records and hybrid deep learning models further facilitated population-level risk prediction and vascular cognitive impairment assessment, achieving robust AUC values above 0.90 [<xref ref-type="bibr" rid="ref113">113</xref>,<xref ref-type="bibr" rid="ref120">120</xref>]. Overall, self-collected datasets have expanded the scope of multimodal AD research by enabling flexible modality integration and novel biomarker discovery, while remaining constrained by limited sample sizes and heterogeneous acquisition protocols.</p><p>Self-collected datasets offer distinct advantages, including targeted modality acquisition, novel biomarker discovery, such as microRNA, local field potentials, and handwriting, and enhanced real-world clinical utility. However, self-collected datasets typically endure limited sample sizes, which increases susceptibility to overfitting and compromises generalizability across diverse populations.</p></sec></sec><sec id="s3-6"><title>Multimodal Linguistic-Based Cognitive Impairment Datasets</title><p>Beyond multimodal clinical phenotyping datasets, multimodal linguistic-based cognitive impairment datasets represent an equally vital research resource. These datasets offer a noninvasive and cost-effective methodology for detecting cognitive decline, particularly valuable for identifying early-stage or subtle impairments where traditional neuroimaging or biomarker data may yield inconclusive results. Capturing spontaneous or semistructured speech and language patterns pushes the development of AI in speech data. Recent work is shown in <xref ref-type="table" rid="table3">Table 3</xref>.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Multimodal linguistic-based cognitive impairment datasets related papers.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Datasets</td><td align="left" valign="bottom">Model type</td><td align="left" valign="bottom">Type of task</td><td align="left" valign="bottom">Modalities</td><td align="left" valign="bottom">Outcomes</td><td align="left" valign="bottom">Validation</td><td align="left" valign="bottom">Results</td><td align="left" valign="bottom">Limitation</td></tr></thead><tbody><tr><td align="left" valign="top">Ilias et al [<xref ref-type="bibr" rid="ref121">121</xref>], 2023</td><td align="left" valign="top">ADReSS<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> and ADReSSo<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">Multimodal transformer (BERT<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup> and DeiT<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup>) with optimal transport</td><td align="left" valign="top">Dementia detection (AD<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup> vs non-AD)</td><td align="left" valign="top">Audio (spectrograms) and text (transcripts)</td><td align="left" valign="top">Classification metrics and calibration</td><td align="left" valign="top">Internal: ADReSS or ADReSSo</td><td align="left" valign="top">Accuracy &#x2248;91.25%, <italic>F</italic><sub>1</sub>-score &#x2248;91.06%; improved calibration vs baselines</td><td align="left" valign="top">Small, curated datasets, English-only, and potential overfitting</td></tr><tr><td align="left" valign="top">Poor et al [<xref ref-type="bibr" rid="ref122">122</xref>], 2024</td><td align="left" valign="top">I-CONECT<sup><xref ref-type="table-fn" rid="table3fn6">f</xref></sup></td><td align="left" valign="top">Multimodal cross-transformer with coattention</td><td align="left" valign="top">MCI<sup><xref ref-type="table-fn" rid="table3fn7">g</xref></sup> prediction (MCI vs NC<sup><xref ref-type="table-fn" rid="table3fn8">h</xref></sup>)</td><td align="left" valign="top">Audio, text, and vision (facial video)</td><td align="left" valign="top">AUC<sup><xref ref-type="table-fn" rid="table3fn9">i</xref></sup> scores</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Trimodal AUC 85.3%, and outperformed unimodal (60.9%) and bimodal (76.3%) models</td><td align="left" valign="top">Single cohort (I-CONECT), cross-sectional, and complex architecture</td></tr><tr><td align="left" valign="top">Lin and Washington [<xref ref-type="bibr" rid="ref123">123</xref>], 2024</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">Wav2vec (audio) and Word2Vec (text)</td><td align="left" valign="top">Dementia classification</td><td align="left" valign="top">Audio, text, and timestamps</td><td align="left" valign="top">Accuracy and AUROC<sup><xref ref-type="table-fn" rid="table3fn10">j</xref></sup></td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Text augmentation improved accuracy to &#x2248;80% (AUROC 90%), and timestamps added minimal value</td><td align="left" valign="top">Single corpus: timestamps lacked resolution, and a modest sample size</td></tr><tr><td align="left" valign="top">Ortiz-Perez et al [<xref ref-type="bibr" rid="ref124">124</xref>], 2023</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">Multimodal ensemble (CNN<sup><xref ref-type="table-fn" rid="table3fn11">k</xref></sup> and transformer)</td><td align="left" valign="top">Prediction of dementia signs</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Internal: held-out test sets</td><td align="left" valign="top">Text-only transformer best (accuracy 90.36%) and audio contributed less than text</td><td align="left" valign="top">Single English dataset, broad diagnosis category, and task constrained to picture description</td></tr><tr><td align="left" valign="top">Ilias and Askounis [<xref ref-type="bibr" rid="ref125">125</xref>], 2022</td><td align="left" valign="top">ADReSS (DementiaBank)</td><td align="left" valign="top">Transformer (BERT) and Siamese Network</td><td align="left" valign="top">AD identification and severity estimation</td><td align="left" valign="top">Text (transcripts)</td><td align="left" valign="top">Accuracy and interpretability (LIME<sup><xref ref-type="table-fn" rid="table3fn12">l</xref></sup>)</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Single-task accuracy 87.50%, multitask accuracy 86.25%, and distinct linguistic patterns identified</td><td align="left" valign="top">Small dataset, text only, MMSE<sup><xref ref-type="table-fn" rid="table3fn13">m</xref></sup> treated as categorical, and no acoustic information</td></tr><tr><td align="left" valign="top">Wen et al [<xref ref-type="bibr" rid="ref126">126</xref>], 2023</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">Transformer and causal counterfactual XAI<sup><xref ref-type="table-fn" rid="table3fn14">n</xref></sup></td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Text (part-of-speech tag features)</td><td align="left" valign="top">Accuracy; <italic>F</italic><sub>1</sub>-score; feature importance</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Accuracy 92.2%, <italic>F</italic><sub>1</sub>-score 0.955, identified 12 key part-of-speech features linked to AD</td><td align="left" valign="top">Text only (part-of-speech), reliance on tagging accuracy, and no acoustic or imaging data</td></tr><tr><td align="left" valign="top">Chen et al [<xref ref-type="bibr" rid="ref127">127</xref>], 2023</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">SpeechFormer + + (hierarchical transformer)</td><td align="left" valign="top">Paralinguistic AD detection</td><td align="left" valign="top">Audio (acoustic features)</td><td align="left" valign="top">Accuracy; <italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">Internal: held-out test sets</td><td align="left" valign="top">Outperformed standard transformers and CNN/RNN<sup><xref ref-type="table-fn" rid="table3fn15">o</xref></sup> baselines and SOTA<sup><xref ref-type="table-fn" rid="table3fn16">p</xref></sup> performance</td><td align="left" valign="top">Single corpus, complex computation, audio only, and no cross-lingual evaluation</td></tr><tr><td align="left" valign="top">Zheng et al [<xref ref-type="bibr" rid="ref128">128</xref>], 2022</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">N-gram, AWD-LSTM<sup><xref ref-type="table-fn" rid="table3fn17">q</xref></sup>, or neural models</td><td align="left" valign="top">Dementia detection</td><td align="left" valign="top">Text (context words, stop words, and part-of-speech)</td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Internal: held-out test data</td><td align="left" valign="top">Combined model (vocabulary and grammar) accuracy 81.54%, and grammar contributes comparably to context</td><td align="left" valign="top">Specific to task or language, and moderate performance vs multimodal approaches</td></tr><tr><td align="left" valign="top">Nambiar et al [<xref ref-type="bibr" rid="ref129">129</xref>], 2022</td><td align="left" valign="top">DementiaBank (Pitt)</td><td align="left" valign="top">Deep Classifiers (BERT/ALBERT<sup><xref ref-type="table-fn" rid="table3fn18">r</xref></sup> + BiLSTM<sup><xref ref-type="table-fn" rid="table3fn19">s</xref></sup>)</td><td align="left" valign="top">Early dementia detection</td><td align="left" valign="top">Text (transcripts)</td><td align="left" valign="top">Accuracy; <italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">Internal: train and test splits</td><td align="left" valign="top">BERT + BiLSTM accuracy 0.812; ALBERT + BiLSTM <italic>F</italic><sub>1</sub>-score 0.81; contextual embeddings superior</td><td align="left" valign="top">Text only; reliance on manual transcripts; single dataset</td></tr><tr><td align="left" valign="top">Priyadarshinee et al [<xref ref-type="bibr" rid="ref130">130</xref>], 2023</td><td align="left" valign="top">ADReSSo-2021</td><td align="left" valign="top">ML<sup><xref ref-type="table-fn" rid="table3fn20">t</xref></sup> classifiers (SVM<sup><xref ref-type="table-fn" rid="table3fn21">u</xref></sup>, RF<sup><xref ref-type="table-fn" rid="table3fn22">v</xref></sup>, and NN<sup><xref ref-type="table-fn" rid="table3fn23">w</xref></sup>)</td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Audio and text (transcripts)</td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Internal: held-out test set</td><td align="left" valign="top">Text features (accuracy 88.7%) outperformed audio, and file-level features were superior to frame-level</td><td align="left" valign="top">Benchmarking context, single task, and single language</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref131">131</xref>], 2023</td><td align="left" valign="top">ADReSS, ADReSSo, and the local Chinese dataset</td><td align="left" valign="top">Ensemble ML (VAD<sup><xref ref-type="table-fn" rid="table3fn24">x</xref></sup> pause and acoustic)</td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Audio (acoustic and VAD pause features)</td><td align="left" valign="top">Accuracy</td><td align="left" valign="top">Internal: cross-validation; cross-lingual (Chinese)</td><td align="left" valign="top">Ensemble improved accuracy by &#x2248;8% on ADReSS, and accuracy 80% on the local Chinese dataset</td><td align="left" valign="top">Small local dataset (n=10), handcrafted features, and ensemble complexity</td></tr><tr><td align="left" valign="top">Shah et al [<xref ref-type="bibr" rid="ref23">23</xref>], 2023</td><td align="left" valign="top">ADReSS-M</td><td align="left" valign="top">Logistic regression and SVR</td><td align="left" valign="top">Cross-lingual AD detection; MMSE regression</td><td align="left" valign="top">Audio (duration, pause, and intelligibility) and metadata</td><td align="left" valign="top">Accuracy and RMSE<sup><xref ref-type="table-fn" rid="table3fn25">y</xref></sup></td><td align="left" valign="top">External: Greek test set</td><td align="left" valign="top">English cross-validation accuracy 74.7%, Greek Test accuracy 69.57%, and MMSE RMSE 4.77 (Greek)</td><td align="left" valign="top">Small Greek sample, modest accuracy, and simple ML models vs deep learning</td></tr><tr><td align="left" valign="top">Mahajan and Baths [<xref ref-type="bibr" rid="ref132">132</xref>], 2021</td><td align="left" valign="top">ADReSS</td><td align="left" valign="top">Bimodal framework (CNN-LSTM<sup><xref ref-type="table-fn" rid="table3fn26">z</xref></sup> and Speech-GRU<sup><xref ref-type="table-fn" rid="table3fn27">aa</xref></sup>)</td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">Bimodal enriched model improved performance by &#x2248;6.25% over acoustic baselines</td><td align="left" valign="top">Small dataset, potential overfitting, and single task (picture description)</td></tr><tr><td align="left" valign="top">Mei et al [<xref ref-type="bibr" rid="ref133">133</xref>], 2023</td><td align="left" valign="top">ADReSS-M</td><td align="left" valign="top">Bilingual wav2vec 2.0 + XGBoost<sup><xref ref-type="table-fn" rid="table3fn28">ab</xref></sup></td><td align="left" valign="top">Cross-lingual AD detection and MMSE prediction</td><td align="left" valign="top">Audio (acoustic, silence, and low-frequency bands)</td><td align="left" valign="top">Accuracy and RMSE</td><td align="left" valign="top">External: Greek test set</td><td align="left" valign="top">Accuracy 73.9% (Greek), MMSE RMSE 4.610, and low-frequency speech aided transfer</td><td align="left" valign="top">Very small Greek sample, speech-only, and challenge context</td></tr><tr><td align="left" valign="top">Meerza et al [<xref ref-type="bibr" rid="ref134">134</xref>], 2022</td><td align="left" valign="top">ADReSS</td><td align="left" valign="top">FL<sup><xref ref-type="table-fn" rid="table3fn29">ac</xref></sup> (LSTM<sup><xref ref-type="table-fn" rid="table3fn30">ak</xref></sup> and feed-forward)</td><td align="left" valign="top">Privacy-preserving AD diagnosis</td><td align="left" valign="top">Audio (Mel-frequency and pause features)</td><td align="left" valign="top">Accuracy and fairness metrics</td><td align="left" valign="top">Internal: simulated FL clients</td><td align="left" valign="top">FL accuracy close to the centralized baseline, and q-FedAvg improved fairness</td><td align="left" valign="top">Simulated clients, single dataset, and relies on feature extraction</td></tr><tr><td align="left" valign="top">Chen et al [<xref ref-type="bibr" rid="ref135">135</xref>], 2023</td><td align="left" valign="top">ADReSS-M</td><td align="left" valign="top">SVM or NN on pretrained features</td><td align="left" valign="top">Cross-lingual AD detection</td><td align="left" valign="top">Audio (paralinguistic and XLSR-53<sup><xref ref-type="table-fn" rid="table3fn31">ae</xref></sup>), and text (ASR<sup><xref ref-type="table-fn" rid="table3fn32">af</xref></sup></td><td align="left" valign="top">Accuracy and RMSE</td><td align="left" valign="top">External: Greek test set</td><td align="left" valign="top">Accuracy 69.6% (Greek), RMSE 4.788, and paralinguistic features transferable</td><td align="left" valign="top">Performance below monolingual systems and reliance on ASR quality</td></tr><tr><td align="left" valign="top">Ilias et al [<xref ref-type="bibr" rid="ref121">121</xref>], 2023</td><td align="left" valign="top">ADReSS</td><td align="left" valign="top">Multimodal transformer (ViT<sup><xref ref-type="table-fn" rid="table3fn33">ag</xref></sup>, BERT, and GMU<sup><xref ref-type="table-fn" rid="table3fn34">ah</xref></sup>)</td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Audio (spectrograms) and text</td><td align="left" valign="top">Accuracy and <italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">Internal: cross-validation</td><td align="left" valign="top">High eighties or low nineties accuracy, ViT is best for acoustic, and fusion surpassed SOTA</td><td align="left" valign="top">Small dataset, binary classification focus, and external generalization untested</td></tr><tr><td align="left" valign="top">Tamm et al [<xref ref-type="bibr" rid="ref136">136</xref>], 2023</td><td align="left" valign="top">ADReSS-M<sup><xref ref-type="table-fn" rid="table3fn35">ai</xref></sup></td><td align="left" valign="top">Sequence models (transfer learning)</td><td align="left" valign="top">Cross-lingual AD detection and MMSE</td><td align="left" valign="top">Audio features and demographics</td><td align="left" valign="top">Accuracy and RMSE</td><td align="left" valign="top">External: Greek test set</td><td align="left" valign="top">Accuracy 82.6% (Greek), RMSE 4.345, and ranked second in the challenge</td><td align="left" valign="top">Small Greek sample, acoustic only, and transfer limited to English-Greek</td></tr><tr><td align="left" valign="top">Woszczyk et al [<xref ref-type="bibr" rid="ref137">137</xref>], 2022</td><td align="left" valign="top">ADReSS</td><td align="left" valign="top">Transformers vs traditional ML</td><td align="left" valign="top">AD detection</td><td align="left" valign="top">Audio and text</td><td align="left" valign="top">Classification accuracy</td><td align="left" valign="top">Internal: held-out test data</td><td align="left" valign="top">Data augmentation improved performance and was comparable to SOTA</td><td align="left" valign="top">Augmentations tuned for ADReSS and a single speech task</td></tr><tr><td align="left" valign="top">Jin et al [<xref ref-type="bibr" rid="ref138">138</xref>], 2023</td><td align="left" valign="top">ADReSS-M</td><td align="left" valign="top">CONSEN<sup><xref ref-type="table-fn" rid="table3fn36">aj</xref></sup> ensemble (acoustic and disfluency)</td><td align="left" valign="top">Multilingual AD detection and MMSE</td><td align="left" valign="top">Audio (acoustic embeddings and disfluency)</td><td align="left" valign="top">Accuracy and RMSE</td><td align="left" valign="top">External: Greek test set</td><td align="left" valign="top">First place in the challenge, accuracy 86.69% (Greek), and RMSE 3.727</td><td align="left" valign="top">Challenge dataset, ensemble complexity, and reliance on diarization quality</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>ADReSS: Alzheimer Dementia Recognition Through Spontaneous Speech.</p></fn><fn id="table3fn2"><p><sup>b</sup>ADReSSo: Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech only.</p></fn><fn id="table3fn3"><p><sup>c</sup>BERT: Bidirectional Encoder Representations From Transformers.</p></fn><fn id="table3fn4"><p><sup>d</sup>DeiT: Data-Efficient Image Transformers.</p></fn><fn id="table3fn5"><p><sup>e</sup>AD: Alzheimer disease.</p></fn><fn id="table3fn6"><p><sup>f</sup>I-CONECT: Identifying Cognition in the Elderly Through Conversational Engagement.</p></fn><fn id="table3fn7"><p><sup>g</sup>MCI: mild cognitive impairment.</p></fn><fn id="table3fn8"><p><sup>h</sup>NC: normal control.</p></fn><fn id="table3fn9"><p><sup>i</sup>AUC: area under the curve.</p></fn><fn id="table3fn10"><p><sup>j</sup>AUROC: area under the receiver operating characteristic curve.</p></fn><fn id="table3fn11"><p><sup>k</sup>CNN: convolutional neural network.</p></fn><fn id="table3fn12"><p><sup>l</sup>LIME: Local Interpretable Model-Agnostic Explanations.</p></fn><fn id="table3fn13"><p><sup>m</sup>MMSE: Mini-Mental State Examination.</p></fn><fn id="table3fn14"><p><sup>n</sup>XAI: explainable artificial intelligence.</p></fn><fn id="table3fn15"><p><sup>o</sup>RNN: recurrent neural network.</p></fn><fn id="table3fn16"><p><sup>p</sup>SOTA: state of the art.</p></fn><fn id="table3fn17"><p><sup>q</sup>AWD-LSTM: Average stochastic gradient descent weight-dropped long short-term memory</p></fn><fn id="table3fn18"><p><sup>r</sup>ALBERT: A Lite Bidirectional Encoder Representations From Transformers.</p></fn><fn id="table3fn19"><p><sup>s</sup>BiLSTM: bidirectional long short-term memory.</p></fn><fn id="table3fn20"><p><sup>t</sup>ML: machine learning.</p></fn><fn id="table3fn21"><p><sup>u</sup>SVM: support vector machine.</p></fn><fn id="table3fn22"><p><sup>v</sup>RF: random forest.</p></fn><fn id="table3fn23"><p><sup>w</sup>NN: neural network.</p></fn><fn id="table3fn24"><p><sup>x</sup>VAD: voice activity detection.</p></fn><fn id="table3fn25"><p><sup>y</sup>RMSE: root mean square error.</p></fn><fn id="table3fn26"><p><sup>z</sup>CNN-LSTM: convolutional neural network long short-term memory.</p></fn><fn id="table3fn27"><p><sup>aa</sup>Speech-GRU: Speech Gated Recurrent Unit.</p></fn><fn id="table3fn28"><p><sup>ab</sup>XGBoost: Extreme Gradient Boosting.</p></fn><fn id="table3fn29"><p><sup>ac</sup>FL: federated learning.</p></fn><fn id="table3fn30"><p><sup>ad</sup>LSTM: long short-term memory.</p></fn><fn id="table3fn31"><p><sup>ae</sup>XLSR-53: cross-lingual speech representation-version 53</p></fn><fn id="table3fn32"><p><sup>af</sup>ASR: automatic speech recognition.</p></fn><fn id="table3fn33"><p><sup>ag</sup>ViT: vision transformer.</p></fn><fn id="table3fn34"><p><sup>ah</sup>GMU: gated multimodal unit.</p></fn><fn id="table3fn35"><p><sup>ai</sup>ADReSS-M: Alzheimer Dementia Recognition through Spontaneous Speech &#x2013; Multimodal.</p></fn><fn id="table3fn36"><p><sup>aj</sup>CONSEN: complementary and simultaneous ensemble.</p></fn></table-wrap-foot></table-wrap><p>Recent studies have shown that multimodal fusion of speech and text using transformer-based architectures substantially improves AD detection performance, with <italic>F</italic><sub>1</sub>-scores above 0.90 on ADReSS and ADReSSo (Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech 2021 Challenge) datasets [<xref ref-type="bibr" rid="ref121">121</xref>,<xref ref-type="bibr" rid="ref132">132</xref>,<xref ref-type="bibr" rid="ref139">139</xref>]. Linguistic feature engineering and interpretable language models further enhanced classification accuracy, achieving up to 92.2% accuracy and <italic>F</italic><sub>1</sub>-scores of 0.955 using compact part-of-speech features [<xref ref-type="bibr" rid="ref124">124</xref>-<xref ref-type="bibr" rid="ref126">126</xref>,<xref ref-type="bibr" rid="ref128">128</xref>,<xref ref-type="bibr" rid="ref130">130</xref>]. Cross-lingual approaches based on language-agnostic and transfer learning methods enabled moderate generalization, with accuracies ranging from 69% to 73.9% in English-Greek transfer settings [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref127">127</xref>,<xref ref-type="bibr" rid="ref133">133</xref>,<xref ref-type="bibr" rid="ref136">136</xref>]. To support real-world deployment, lightweight and hierarchical models achieved around 80% accuracy with reduced computational cost [<xref ref-type="bibr" rid="ref131">131</xref>,<xref ref-type="bibr" rid="ref135">135</xref>]. In addition, data augmentation and ensemble strategies improved robustness in low-resource scenarios, yielding <italic>F</italic><sub>1</sub>-score gains of 5%&#x2010;7% and competitive challenge performance (accuracy 86.69%) [<xref ref-type="bibr" rid="ref123">123</xref>,<xref ref-type="bibr" rid="ref137">137</xref>,<xref ref-type="bibr" rid="ref138">138</xref>].</p></sec><sec id="s3-7"><title>Summarization Based on All Multimodal Datasets and Quantitative Analysis</title><p><xref ref-type="table" rid="table2">Table 2</xref> and <xref ref-type="table" rid="table3">Table 3</xref> summarize the recent state-of-the-art models across the 2 major types of multimodal datasets, extracted according to the Cochrane Handbook. Full QUADAS-2 forms are available in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>. Based on these results, the following quantitative synthesis compares performance trends across all multimodal datasets. Across the 4 major dataset categories, modality choices and model performance show clear dataset-dependent patterns as shown in <xref ref-type="table" rid="table4">Table 4</xref>. UK Biobank studies mainly combine MRI, clinical variables, and genetic features, with 2 diagnosis studies reporting an average accuracy of 71.4% (SD 5.2%) and 4 risk-prediction studies reaching an average AUC of 0.84 (SD 0.056). ADNI studies use the most comprehensive modality integrations, with 3 diagnosis studies averaging 92.5% (SD 3.8%) accuracy, 3 MCI-conversion studies achieving a mean AUC of 0.922 (SD 0.045), and risk-prediction studies reaching an average AUC of 0.81 (SD 0.06); these tasks collectively achieve the strongest results, with fusion models frequently reporting AUC values above 0.95. DementiaBank studies differ fundamentally by focusing on speech- and language-based modalities; 9 diagnosis studies report an average AUC of 0.813 (SD 0.042), and 5 cross-lingual AD-detection studies show a mean accuracy of 77% (SD 6.5%), where transformer architectures consistently outperform classical approaches, with models such as BERT + DeiT (Data-Efficient Image Transformers), BERT + ViT (vision transformer), and RoBERTa + (Robustly Optimized Bidirectional Encoder Representations From Transformers Approach) DNN (deep neural network) showing <italic>F</italic><sub>1</sub>-scores exceeding 0.90. Self-collected datasets are typically smaller and more heterogeneous; 3 diagnosis studies report an average accuracy of 96% (SD 2.4%), and lightweight models such as EEGNet or ViT-based hybrids demonstrate strong predictive capacity when applied to EEG or structural MRI.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Summary of representative modality combinations and top-performing models in multimodal AI<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup>-aided AD<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup> diagnosis.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Dataset and task</td><td align="left" valign="bottom">Counts</td><td align="left" valign="bottom">Average performance</td><td align="left" valign="bottom">Best performance modalities</td><td align="left" valign="bottom">Related article</td></tr></thead><tbody><tr><td align="left" valign="top">UK Biobank</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Diagnosis</td><td align="left" valign="top">2</td><td align="left" valign="top">Accuracy=71.4%</td><td align="left" valign="top">Retinal fundus images</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref140">140</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Risk prediction</td><td align="left" valign="top">4</td><td align="left" valign="top">AUC<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup>=84%</td><td align="left" valign="top">Clinical, biological assays, cognitive tests, and physical measures</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref140">140</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">3</td><td align="left" valign="top">N/A<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup></td><td align="left" valign="top">Multimodal MRI<sup><xref ref-type="table-fn" rid="table4fn5">e</xref></sup> (T1, T2, MRI, etc)</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref140">140</xref>]</td></tr><tr><td align="left" valign="top">ADNI<sup><xref ref-type="table-fn" rid="table4fn6">f</xref></sup></td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Diagnosis</td><td align="left" valign="top">3</td><td align="left" valign="top">Accuracy=92.5%</td><td align="left" valign="top">Structural MRI features and neuropsychological tests</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>-<xref ref-type="bibr" rid="ref108">108</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MCI<sup><xref ref-type="table-fn" rid="table4fn7">g</xref></sup> conversion</td><td align="left" valign="top">3</td><td align="left" valign="top">AUC=92.2%</td><td align="left" valign="top">Structural MRI, clinical variables, and genetics (SNP<sup><xref ref-type="table-fn" rid="table4fn8">h</xref></sup>)</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>-<xref ref-type="bibr" rid="ref108">108</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MMSE<sup><xref ref-type="table-fn" rid="table4fn9">i</xref></sup> regression</td><td align="left" valign="top">2</td><td align="left" valign="top">No integration</td><td align="left" valign="top">Whole-brain T1-weighted MRI and clinical scores</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>-<xref ref-type="bibr" rid="ref108">108</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Risk prediction</td><td align="left" valign="top">7</td><td align="left" valign="top">AUC=81%</td><td align="left" valign="top">MRI, PET<sup><xref ref-type="table-fn" rid="table4fn10">j</xref></sup>, clinical, and cognitive</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>-<xref ref-type="bibr" rid="ref108">108</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">13</td><td align="left" valign="top">N/A</td><td align="left" valign="top">N/A</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref94">94</xref>-<xref ref-type="bibr" rid="ref108">108</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref141">141</xref>]</td></tr><tr><td align="left" valign="top">Dementia bank</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Diagnosis</td><td align="left" valign="top">9</td><td align="left" valign="top">AUC=81.3%</td><td align="left" valign="top">Text transcripts &#x2192; part-of-speech feature vectors</td><td align="left" valign="top"><xref ref-type="table" rid="table3">Table 3</xref></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Cross-lingual AD detection</td><td align="left" valign="top">5</td><td align="left" valign="top">Accuracy=77%</td><td align="left" valign="top">Multimodal acoustic fusion</td><td align="left" valign="top"><xref ref-type="table" rid="table3">Table 3</xref></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">6</td><td align="left" valign="top">N/A</td><td align="left" valign="top">N/A</td><td align="left" valign="top"><xref ref-type="table" rid="table3">Table 3</xref></td></tr><tr><td align="left" valign="top">Self-collected datasets</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Diagnosis</td><td align="left" valign="top">3</td><td align="left" valign="top">Accuracy=96%</td><td align="left" valign="top">MRI, PET, clinical, and genotype</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref106">106</xref>,<xref ref-type="bibr" rid="ref109">109</xref>-<xref ref-type="bibr" rid="ref115">115</xref>,<xref ref-type="bibr" rid="ref117">117</xref>,<xref ref-type="bibr" rid="ref120">120</xref>,<xref ref-type="bibr" rid="ref142">142</xref>]</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">6</td><td align="left" valign="top">No integration</td><td align="left" valign="top">Different task</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>,<xref ref-type="bibr" rid="ref106">106</xref>,<xref ref-type="bibr" rid="ref109">109</xref>-<xref ref-type="bibr" rid="ref115">115</xref>,<xref ref-type="bibr" rid="ref117">117</xref>,<xref ref-type="bibr" rid="ref120">120</xref>,<xref ref-type="bibr" rid="ref142">142</xref>]</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table4fn2"><p><sup>b</sup>AD: Alzheimer disease.</p></fn><fn id="table4fn3"><p><sup>c</sup>AUC: area under the curve.</p></fn><fn id="table4fn4"><p><sup>d</sup>N/A: not available.</p></fn><fn id="table4fn5"><p><sup>e</sup>MRI: magnetic resonance imaging.</p></fn><fn id="table4fn6"><p><sup>f</sup>ADNI: Alzheimer Disease Neuroimaging Initiative.</p></fn><fn id="table4fn7"><p><sup>g</sup>MCI: mild cognitive impairment.</p></fn><fn id="table4fn8"><p><sup>h</sup>SNP: single-nucleotide polymorphism.</p></fn><fn id="table4fn9"><p><sup>i</sup>MMSE: Mini-Mental State Examination.</p></fn><fn id="table4fn10"><p><sup>j</sup>PET: positron emission tomography.</p></fn></table-wrap-foot></table-wrap><p>To interpret these results and limit metric inflation, note that purely internal cross-validation tends to overestimate performance: AUC is typically &#x2248;5&#x2010;15 points higher than with external validation. Small or tightly controlled datasets also report accuracies &#x2248;10%&#x2010;20% above those in large, heterogeneous cohorts. Severe class imbalance can further raise accuracy while lowering <italic>F</italic><sub>1</sub>-score or sensitivity; without correction, imbalance may inflate results by &#x2248;5%&#x2010;12%. Cross-sectional models often score higher in single-timepoint evaluations, whereas longitudinal designs usually yield lower but more stable estimates, which are more informative for follow-up and clinical use.</p><p>These findings should be interpreted in light of substantial heterogeneity and risk of bias. Variation in sample composition, task definitions, and evaluation procedures across datasets limits direct comparison of performance metrics. QUADAS-2 also indicated frequent unclear and high-risk in-patient selection, reference standards, and flow or timing, especially in studies using only internal validation or selected samples. Reported metrics, therefore, likely represent upper-bound estimates rather than expected real-world performance, and apparent gains often reflect dataset-specific effects rather than generalizable model superiority.</p><p>Overall, the evidence shows that modality effectiveness varies substantially across datasets, transformer models deliver the highest gains in speech-language tasks, and large clinical phenotyping datasets such as UK Biobank and ADNI still rely mainly on traditional machine-learning or custom fusion frameworks rather than modern cross-modal transformers. This gap highlights an opportunity to develop transformer-based multimodal integration approaches tailored to large, heterogeneous clinical datasets.</p></sec><sec id="s3-8"><title>Multimodal Fusion Taxonomy</title><p>A structured multimodal fusion taxonomy clarifies the performance of different integration strategies across datasets (<xref ref-type="table" rid="table2">Tables 2 and 3</xref>). A total of 4 main paradigms are commonly used: early, intermediate, late, and attention- or graph-based fusion.</p><p>Early fusion concatenates low-level features and performs well for aligned modalities such as MRI + PET, often achieving AUC&#x003E;0.95 in ADNI studies, but is sensitive to missing data and feature-scale heterogeneity. Intermediate fusion combines latent representations from modality-specific encoders and is effective for heterogeneous inputs such as MRI + speech or EEG + clinical data, as demonstrated by high performance in ADReSS-based models, although it may be unstable in small datasets. Late fusion aggregates model outputs and is robust to missing modalities, performing well in large datasets such as the UK Biobank, but underuses fine-grained cross-modal interactions.</p><p>Across paradigms, limited modality availability and high acquisition costs remain key challenges, underscoring the need for adaptive and clinically feasible fusion strategies.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This review synthesized multimodal AI studies for AD across diverse dataset families, including clinical phenotyping and cognitive-linguistic datasets. Multimodal fusion generally outperformed unimodal baselines, but the gain is dataset-dependent and should be interpreted cautiously. Strong performance in curated cohorts and constrained speech benchmarks may not generalize to population-based or multicenter settings. QUADAS-2 also indicated frequent risk of bias and unclear reporting across domains, likely inflating metrics and limiting comparability. Accordingly, headline accuracy and AUC should be treated as upper-bound estimates unless supported by external validation and transparent reporting.</p></sec><sec id="s4-2"><title>Challenges and Future Directions</title><p>In recent years, multimodal models have demonstrated remarkable potential in computer-aided diagnosis and risk prediction for AD. While these methods have achieved significant successes, several challenges remain that warrant careful examination. In this section, this systematic review summarizes the common limitations identified in existing studies and proposes directions for future research to advance the field.</p></sec><sec id="s4-3"><title>Clinical and Translational Implications</title><p>Multimodal AI could support AD diagnosis through several clinical pathways. In memory clinics, models combining MRI, cognitive scores, and blood biomarkers could triage referrals, prioritizing patients for specialist review or PET. In general practice, speech-based and routine clinical-feature models could be embedded in consultations to flag early cognitive change. In radiology, MRI-clinical fusion could act as a second reader, reducing interobserver variability and supporting less experienced clinicians. Where imaging or specialist access is limited, speech, digital questionnaires, and basic clinical data could enable telemedicine-based screening and follow-up. At the population level, these models could support risk stratification and targeted monitoring. To enable real-world deployment, research should prioritize external multicenter validation, integration with electronic health records, and evaluation of regulatory feasibility, cost-effectiveness, and clinical impact.</p></sec><sec id="s4-4"><title>Ethical and Regulatory Implications</title><p>Deploying multimodal AI for AD diagnosis requires ethical and regulatory safeguards. As datasets often combine imaging, clinical records, genomics, and speech, they fall under strict privacy regimes (eg, General Data Protection Regulation in the European Union; HIPAA [Health Insurance Portability and Accountability Act] in the United States), requiring explicit consent, data minimization, and secure handling, with added complexity for sensitive modalities such as speech and genomic data. Clinical deployment is also shaped by medical-AI governance frameworks (eg, the European Union AI Act, Food and Drug Administration Software as a Medical Device guidance, and UK Medicines and Healthcare products Regulatory Agency Good Machine Learning Practice), which emphasize transparency, risk management, and postdeployment monitoring. Fairness is essential because demographic imbalance can yield uneven performance across age, ethnicity, and language groups. Interpretability (eg, imaging attention maps and linguistic saliency) supports clinical accountability and aligns with explainability expectations. Future work should incorporate privacy-preserving methods, bias audits, and regulatory-aligned validation pipelines to enable responsible clinical integration.</p></sec><sec id="s4-5"><title>Data Privacy and Data-Sharing Constraints</title><p>Access to multimodal AD data remains severely restricted by privacy regulations and ethical constraints, which limit data sharing and external validation. This restricts the sharing and usage of comprehensive datasets needed for robust external validation and generalizability.</p><p>Federated learning (FL) provides a technically viable privacy-preserving solution; however, differences in data formats and institutional infrastructures still impede its large-scale deployment. For instance, Meerza et al [<xref ref-type="bibr" rid="ref134">134</xref>] pioneered FL for AD speech diagnosis using mel-frequency cepstral coefficients and pause features, maintaining model performance while ensuring privacy through q-FedAvg/q-FedSGD optimization. Nambiar [<xref ref-type="bibr" rid="ref129">129</xref>] validated an ALBERT (A Lite Bidirectional Encoder Representations From Transformers) + BiLSTM (bidirectional long short-term memory) hybrid model on the ADReSS dataset, achieving strong performance without compromising data confidentiality. In parallel, multi-institutional collaborations leveraging publicly available datasets such as ADNI, UK Biobank, and OASIS have enabled richer external validation while adhering to rigorous privacy standards [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref100">100</xref>,<xref ref-type="bibr" rid="ref139">139</xref>,<xref ref-type="bibr" rid="ref140">140</xref>].</p><p>Despite encouraging results, FL still lacks harmonized protocols and interoperable platforms. This limits cross-center reproducibility and weakens clinical credibility. International collaboration also remains constrained by regulatory differences. Future work should prioritize unified federated frameworks with standardized protocols and privacy-preserving methods to enable secure global data collaboration [<xref ref-type="bibr" rid="ref143">143</xref>,<xref ref-type="bibr" rid="ref144">144</xref>].</p><p>As most datasets lack fully matched modalities per participant, multimodal fusion often relies on representation- or population-level integration rather than early fusion. Early fusion requires paired samples and is therefore infeasible across datasets. By contrast, late fusion and embedding-level integration can train unimodal models separately and combine them via meta-learners, cross-modal transformers, or probabilistic ensembles. Domain adaptation, transfer learning, and harmonization can also combine heterogeneous cohorts at the population level to improve generalizability. A standardized benchmark could further support this by defining shared preprocessing, label taxonomies, and evaluation metrics, enabling meaningful comparison or representation-stage fusion even without subject-level pairing.</p></sec><sec id="s4-6"><title>Data Imbalance</title><p>Severe class imbalance remains a major obstacle, biasing training toward the majority class and inflating accuracy while masking low sensitivity to early disease. In addition, datasets such as the UK Biobank are dominated by White European ancestry, limiting generalizability across racially and ethnically diverse populations. Addressing this requires both technical mitigation and proactive recruitment of underrepresented groups so models better reflect population heterogeneity.</p><p>Researchers have applied data-level interventions such as generative adversarial network&#x2013;based augmentation, diffusion models, and resampling [<xref ref-type="bibr" rid="ref123">123</xref>,<xref ref-type="bibr" rid="ref137">137</xref>,<xref ref-type="bibr" rid="ref138">138</xref>,<xref ref-type="bibr" rid="ref145">145</xref>,<xref ref-type="bibr" rid="ref146">146</xref>]; algorithm-level solutions, including cost-sensitive, loss-focused, ensemble, and class-weighted training schemes [<xref ref-type="bibr" rid="ref147">147</xref>-<xref ref-type="bibr" rid="ref152">152</xref>]; and evaluation-focused remedies [<xref ref-type="bibr" rid="ref153">153</xref>] have been developed to mitigate biases.</p><p>Current methods frequently introduce new challenges, such as overfitting or inadequate performance in minority classes. Moreover, efforts to increase diversity remain inadequate. Future directions should focus on novel adaptive resampling methods, generative methods for synthetic minority data creation, and dedicated efforts to include and characterize underrepresented populations to ensure equitable and robust clinical applicability across diverse populations.</p></sec><sec id="s4-7"><title>Lack of Standardized and Longitudinal Data</title><p>Differences in acquisition protocols and diagnostic criteria across datasets limit comparability of imaging, cognitive, and biomarker outcomes. Longitudinal evidence is also constrained: even in relatively standardized resources such as ADNI, limited long-term follow-up hampers modeling the temporal dynamics of disease progression.</p><p>Future work should standardize key acquisition elements and diagnostic criteria across longitudinal studies and strengthen coordination across institutions. Building on this, a multimodal benchmark spanning imaging, clinical, biomarker, behavioral, and linguistic modalities would enable cross-dataset validation, improve comparability, and support reproducible evaluation of new models. These steps would strengthen temporal modeling and provide more reliable evidence for clinical translation.</p></sec><sec id="s4-8"><title>Dataset-Specific Limitations</title><p>Data imbalance is prevalent across many AD datasets, but the nature of this issue varies substantially between cohorts. This review, therefore, outlines the dataset-specific limitations of commonly used AD cohorts and corpora.</p><p>ADNI participants are generally healthier, with fewer comorbidities and a restricted age range (55&#x2010;90 y), limiting representativeness. Protocol differences across centers and evolving diagnostic standards introduce heterogeneity, while frequent reliance on subsets hampers comparability [<xref ref-type="bibr" rid="ref154">154</xref>].</p><p>Of UK Biobank, dementia outcomes are derived mainly from health records, leading to potential misclassification and delayed ascertainment. Participants show strong volunteer bias, and PET or cerebrospinal fluid biomarkers are limited to a small subset, constraining multimodal analyses [<xref ref-type="bibr" rid="ref155">155</xref>].</p><p>OASIS provides open neuroimaging data but with relatively small AD/MCI sample sizes and inconsistent modality coverage. Limited longitudinal depth and cross-scanner variability further reduce reproducibility [<xref ref-type="bibr" rid="ref156">156</xref>].</p><p>Of NACC, data are aggregated from multiple centers with heterogeneous recruitment and diagnostic protocols, making harmonization challenging. The cohort is clinic-based rather than population-representative, and missing biomarker modalities are common [<xref ref-type="bibr" rid="ref157">157</xref>].</p><p>Although high-quality, Australian Imaging, Biomarkers and Lifestyle Study is smaller than ADNI and NACC and is often used only for validation. Regional recruitment and protocol differences reduce ethnic diversity and cross-cohort comparability [<xref ref-type="bibr" rid="ref158">158</xref>].</p><p>Of Pitt Corpus, this is the most widely used speech dataset, but remains small and imbalanced. Tasks are constrained, limiting ecological validity, and cross-linguistic generalizability is poor [<xref ref-type="bibr" rid="ref159">159</xref>].</p><p>Of the ADReSS series, the ADReSS benchmarks provide standardized speech corpora but are modest in size and restricted to English. Narrow task design and small training partitions raise concerns of overfitting and limited external validity [<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>Of self-collected cohorts, locally collected datasets often involve small, single-site samples with heterogeneous acquisition protocols. Missing modalities, limited follow-up, and selection bias further restrict their generalizability [<xref ref-type="bibr" rid="ref153">153</xref>].</p><p>Dataset challenges are compounded by unrepresentative cohorts, incomplete modalities, and poor cross-center consistency, limiting model robustness and cross-dataset generalization in AD diagnosis. Future work should improve data coordination and standardization, enable more practical sharing mechanisms, and adopt cross-cohort validation where feasible. Strengthening data quality and access is essential for translating multimodal AI methods into clinical use.</p></sec><sec id="s4-9"><title>Model Interpretability and Explainability</title><p>A major limitation of multimodal ML models in clinical AD diagnosis is limited interpretability and transparency. Many high-performing models provide insufficient insight into their decision processes, which can hinder clinical adoption and reduce confidence among end users.</p><p>Efforts that have been made toward model interpretability include designing inherently transparent models. For example, some studies demonstrate emerging explainability strategies, including hybrid neuro-symbolic models [<xref ref-type="bibr" rid="ref160">160</xref>] that generate interpretable reports and post hoc methods such as SHAP, LIME (Local Interpretable Model-Agnostic Explanations), gradient-based saliency, and graph-masking techniques [<xref ref-type="bibr" rid="ref161">161</xref>,<xref ref-type="bibr" rid="ref162">162</xref>], which collectively enhance transparency in multimodal AD diagnosis.</p><p>Current interpretability methods often fail to produce explanations that clinicians can use reliably. Future work should prioritize clinically grounded explainability, including interactive visualizations and concise workflow-aligned natural-language summaries. Hybrid designs that combine deep learning with structured reasoning can further improve transparency by making decision logic explicit. For deployment, models should also report prediction uncertainty and demonstrate compatibility with clinical systems and regulatory requirements.</p><p>Beyond technical advances, incorporating patient and public involvement can improve multimodal AI development for AD. Patients and caregivers can help shape evaluation and result communication, not just act as end users, aligning explanations with patient priorities and addressing transparency and fairness. Engaging patient and public involvement earlier in model design may therefore support more interpretable and clinically usable diagnostic tools.</p></sec><sec id="s4-10"><title>Heterogeneous Multiview Learning Problem</title><p>Integrating data across studies is challenging because single datasets rarely cover all modalities, forcing combinations such as ADNI with UK Biobank. However, differences in cohorts, imaging protocols, and cognitive assessment frameworks create substantial heterogeneity that limits direct pooling and comparability.</p><p>This heterogeneity hinders building unified models that generalize across nonoverlapping cohorts, so single-dataset models often fail out of domain. Platform-agnostic methods that tolerate missing or inconsistent modalities are therefore needed. Proposed solutions include shared latent-space learning [<xref ref-type="bibr" rid="ref163">163</xref>], multibranch networks [<xref ref-type="bibr" rid="ref164">164</xref>], and mixture-of-experts architectures [<xref ref-type="bibr" rid="ref165">165</xref>] to support partial fusion and cross-dataset adaptation, but most still assume strong cross-domain alignment or require substantial retraining under domain shift.</p><p>Despite recent progress, multimodal methods often assume strict cross-domain alignment and require extensive retraining under domain shift or missing modalities. Future work should develop robust, platform-agnostic frameworks that adapt to changing modality availability and distribution shifts with minimal performance loss and advance representation learning to derive stable joint embeddings from heterogeneous data.</p></sec><sec id="s4-11"><title>Uncertainty Quantification and Clinical Applicability</title><p>Although multimodal AD models have advanced, most studies still omit uncertainty quantification (eg, confidence or prediction intervals). Models typically provide deterministic outputs without communicating reliability, despite clinicians relying on uncertainty to guide management and treatment decisions. Future work should embed uncertainty metrics into diagnostic models to better align with clinical needs and improve interpretability, reliability, and real-world adoption.</p></sec><sec id="s4-12"><title>Risks of Data Leakage in Multimodal AI Modeling</title><p>Another limitation is data leakage, which can inflate performance. Common forms include subject-level leakage (samples from the same participant in both training and test sets), patch-level overlap in MRI slice and patch models, and transcript or utterance-level leakage in speech datasets when multiple segments come from 1 individual. Many studies did not report whether participant-independent splits were enforced. Clearer reporting of partitioning and rigorous participant-level cross-validation are therefore essential to ensure real-world generalizability.</p></sec><sec id="s4-13"><title>Conclusions</title><p>This review synthesizes evidence on multimodal AI approaches for AD across clinical, neuroimaging, genetic, and linguistic data, systematically comparing modeling strategies, validation practices, and performance trends across heterogeneous datasets. In contrast to prior modality-specific reviews, the findings show that multimodal models generally outperform unimodal approaches, although performance varies substantially with dataset characteristics, modality availability, and cross-source alignment. High accuracies are often reported in curated or internally validated cohorts, whereas population-based and externally validated studies yield more modest but clinically realistic results, reflecting substantial heterogeneity and risk of bias.</p><p>Despite these limitations, the evidence demonstrates that multimodal AI captures complementary biological and behavioral signals relevant to AD, offering clear advantages for diagnosis and risk prediction. Transformer-based architectures and speech- or behavior-derived modalities show promise for scalable and noninvasive early detection. However, meaningful clinical translation will require harmonized benchmarking, transparent reporting, and rigorous external validation. Overall, this review advances the field by contextualizing performance gains within their methodological constraints and by outlining practical directions for developing robust, interpretable, and generalizable multimodal AI systems. These insights support the responsible integration of AI into real-world dementia screening, risk prediction, and early intervention strategies.</p></sec></sec></body><back><ack><p>The authors declare the use of generative artificial intelligence (GenAI) in the research and writing process. According to the GAIDeT (Generative Artificial Intelligence for Digital Twins) taxonomy (2025), the following tasks were delegated to GenAI tools under full human supervision: proofreading and editing. The GenAI tool used was ChatGPT-5.2. Responsibility for the final manuscript lies entirely with the authors. Declaration submitted by: JMIR Publications. We used ChatGPT-5.2 (OpenAI) to conduct a grammatical review of the abstract and conclusion sections.</p></ack><notes><sec><title>Funding</title><p>This work received no specific financial or nonfinancial support. No funder or sponsor had any role in the design of the review; data collection, analysis, or interpretation; writing of this paper; or the decision to submit for publication.</p></sec><sec><title>Data Availability</title><p>This systematic review did not generate any new datasets. All data extracted and analyzed in this systematic review were obtained from publicly available publications included in the review. No additional unpublished or proprietary data were used.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AD</term><def><p>Alzheimer disease</p></def></def-item><def-item><term id="abb2">AdaBoost</term><def><p>Adaptive Boosting</p></def></def-item><def-item><term id="abb3">ADNI</term><def><p>Alzheimer&#x2019;s Disease Neuroimaging Initiative</p></def></def-item><def-item><term id="abb4">ADReSS</term><def><p>Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech</p></def></def-item><def-item><term id="abb5">ADReSSo</term><def><p>Alzheimer&#x2019;s Dementia Recognition Through Spontaneous Speech 2021 Challenge</p></def></def-item><def-item><term id="abb6">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb7">ALBERT</term><def><p>A Lite Bidirectional Encoder Representations From Transformers</p></def></def-item><def-item><term id="abb8">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb9">BERT</term><def><p>Bidirectional Encoder Representations From Transformers</p></def></def-item><def-item><term id="abb10">BiLSTM</term><def><p>bidirectional long short-term memory</p></def></def-item><def-item><term id="abb11">DeiT</term><def><p>Data-Efficient Image Transformers</p></def></def-item><def-item><term id="abb12">DNN</term><def><p>DNN</p></def></def-item><def-item><term id="abb13">EEG</term><def><p>electroencephalography</p></def></def-item><def-item><term id="abb14">FL</term><def><p>federated learning</p></def></def-item><def-item><term id="abb15">HIPAA </term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb16">LightGBM</term><def><p>Light Gradient-Boosting Machine</p></def></def-item><def-item><term id="abb17">LIME</term><def><p>Local Interpretable Model-Agnostic Explanations</p></def></def-item><def-item><term id="abb18">MCI</term><def><p>mild cognitive impairment</p></def></def-item><def-item><term id="abb19">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb20">MRI</term><def><p>magnetic resonance imaging</p></def></def-item><def-item><term id="abb21">NACC</term><def><p>National Alzheimer&#x2019;s Coordinating Centre</p></def></def-item><def-item><term id="abb22">OASIS</term><def><p>Open Access Series of Imaging Studies</p></def></def-item><def-item><term id="abb23">PET</term><def><p>positron emission tomography</p></def></def-item><def-item><term id="abb24">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb25">PRISMA-S</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses literature search extension</p></def></def-item><def-item><term id="abb26">QUADAS-2</term><def><p>Revised Quality Assessment of Diagnostic Accuracy Studies tool</p></def></def-item><def-item><term id="abb27">RL</term><def><p>reinforcement learning</p></def></def-item><def-item><term id="abb28">RoBERTa+</term><def><p>Robustly Optimized Bidirectional Encoder Representations From Transformers Approach</p></def></def-item><def-item><term id="abb29">SHAP</term><def><p>Shapley Additive Explanations</p></def></def-item><def-item><term id="abb30">ViT</term><def><p>vision transformer</p></def></def-item><def-item><term id="abb31">XGBoost</term><def><p>Extreme Gradient Boosting</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheltens</surname><given-names>P</given-names> </name><name name-style="western"><surname>De Strooper</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kivipelto</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Alzheimer&#x2019;s disease</article-title><source>Lancet</source><year>2021</year><month>04</month><day>24</day><volume>397</volume><issue>10284</issue><fpage>1577</fpage><lpage>1590</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(20)32205-4</pub-id><pub-id pub-id-type="medline">33667416</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><article-title>2024 Alzheimer&#x2019;s disease facts and figures</article-title><source>Alzheimers Dement</source><year>2024</year><month>05</month><volume>20</volume><issue>5</issue><fpage>3708</fpage><lpage>3821</lpage><pub-id pub-id-type="doi">10.1002/alz.13809</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="report"><article-title>World alzheimer report 2024</article-title><year>2024</year><access-date>2026-02-10</access-date><publisher-name>Alzheimer&#x2019;s Disease International</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.alzint.org/resource/world-alzheimer-report-2024/">https://www.alzint.org/resource/world-alzheimer-report-2024/</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ka&#x0161;telan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gverovi&#x0107; Antunica</surname><given-names>A</given-names> </name><name name-style="western"><surname>Puzovi&#x0107;</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Non-invasive retinal biomarkers for early diagnosis of Alzheimer&#x2019;s disease</article-title><source>Biomedicines</source><year>2025</year><month>01</month><day>24</day><volume>13</volume><issue>2</issue><fpage>283</fpage><pub-id pub-id-type="doi">10.3390/biomedicines13020283</pub-id><pub-id pub-id-type="medline">40002697</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Castellano</surname><given-names>G</given-names> </name><name name-style="western"><surname>Esposito</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lella</surname><given-names>E</given-names> </name><name name-style="western"><surname>Montanaro</surname><given-names>G</given-names> </name><name name-style="western"><surname>Vessio</surname><given-names>G</given-names> </name></person-group><article-title>Automated detection of Alzheimer&#x2019;s disease: a multi-modal approach with 3D MRI and amyloid PET</article-title><source>Sci Rep</source><year>2024</year><month>03</month><day>3</day><volume>14</volume><issue>1</issue><fpage>5210</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-56001-9</pub-id><pub-id pub-id-type="medline">38433282</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Abrol</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Calhoun</surname><given-names>VD</given-names> </name></person-group><article-title>A multimodal vision transformer for interpretable fusion of functional and structural neuroimaging data</article-title><source>Hum Brain Mapp</source><year>2024</year><month>12</month><day>1</day><volume>45</volume><issue>17</issue><fpage>e26783</fpage><pub-id pub-id-type="doi">10.1002/hbm.26783</pub-id><pub-id pub-id-type="medline">39600159</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Da</surname><given-names>L</given-names> </name><etal/></person-group><article-title>A transformer-based unified multimodal framework for Alzheimer&#x2019;s disease assessment</article-title><source>Comput Biol Med</source><year>2024</year><month>09</month><volume>180</volume><fpage>108979</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.108979</pub-id><pub-id pub-id-type="medline">39098237</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Leng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>He</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Amini</surname><given-names>S</given-names> </name><etal/></person-group><article-title>A GPT-4o-powered framework for identifying cognitive impairment stages in electronic health records</article-title><source>npj Digit Med</source><year>2025</year><month>07</month><day>3</day><volume>8</volume><issue>1</issue><fpage>401</fpage><pub-id pub-id-type="doi">10.1038/s41746-025-01834-5</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Balabin</surname><given-names>H</given-names> </name><name name-style="western"><surname>Tamm</surname><given-names>B</given-names> </name><name name-style="western"><surname>Spruyt</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Natural language processing-based classification of early Alzheimer&#x2019;s disease from connected speech</article-title><source>Alzheimer's Dement</source><year>2025</year><month>02</month><volume>21</volume><issue>2</issue><fpage>e14530</fpage><pub-id pub-id-type="doi">10.1002/alz.14530</pub-id><pub-id pub-id-type="medline">39868827</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Hong</surname><given-names>K</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>K</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Fati</surname><given-names>SM</given-names> </name></person-group><article-title>Early diagnosis of Alzheimer&#x2019;s disease based on multi-attention mechanism</article-title><source>PLOS ONE</source><year>2024</year><volume>19</volume><issue>9</issue><fpage>e0310966</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0310966</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wijeratne</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Alexander</surname><given-names>DC</given-names> </name></person-group><article-title>Learning transition times in event sequences: the event-based hidden markov model of disease progression</article-title><source>Inf Process Med Imaging</source><year>2021</year><month>06</month><volume>12729</volume><issue>14</issue><fpage>583</fpage><lpage>595</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-78191-0_45</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huh</surname><given-names>YJ</given-names> </name><name name-style="western"><surname>Park</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>YJ</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>KG</given-names> </name></person-group><article-title>Ensemble learning-based Alzheimer&#x2019;s disease classification using electroencephalogram signals and clock drawing test images</article-title><source>Sensors (Basel)</source><year>2025</year><month>05</month><day>2</day><volume>25</volume><issue>9</issue><fpage>2881</fpage><pub-id pub-id-type="doi">10.3390/s25092881</pub-id><pub-id pub-id-type="medline">40363322</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karasu</surname><given-names>E</given-names> </name><name name-style="western"><surname>Bayta&#x015F;</surname><given-names>&#x0130;M</given-names> </name></person-group><article-title>Conversion-aware forecasting of Alzheimer&#x2019;s disease via featurewise attention</article-title><source>Pattern Anal Applic</source><year>2025</year><month>06</month><volume>28</volume><issue>2</issue><fpage>64</fpage><pub-id pub-id-type="doi">10.1007/s10044-025-01447-4</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xiao</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Development and validation of a novel predictive model for dementia risk in middle-aged and elderly depression individuals: a large and longitudinal machine learning cohort study</article-title><source>Alz Res Therapy</source><year>2025</year><month>05</month><day>13</day><volume>17</volume><issue>1</issue><fpage>103</fpage><pub-id pub-id-type="doi">10.1186/s13195-025-01750-6</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Qiu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Joshi</surname><given-names>PS</given-names> </name><etal/></person-group><article-title>Multimodal deep learning for Alzheimer&#x2019;s disease dementia assessment</article-title><source>Nat Commun</source><year>2022</year><month>06</month><day>20</day><volume>13</volume><issue>1</issue><fpage>3404</fpage><pub-id pub-id-type="doi">10.1038/s41467-022-31037-5</pub-id><pub-id pub-id-type="medline">35725739</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chakravarthi</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Shivakanth</surname><given-names>G</given-names> </name></person-group><article-title>Integrating multimodal AI techniques and MRI preprocessing for enhanced diagnosis of Alzheimer&#x2019;s disease: clinical applications and research horizons</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>63519</fpage><lpage>63531</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3557533</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elazab</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Abdelaziz</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Alzheimer&#x2019;s disease diagnosis from single and multimodal data using machine and deep learning models: achievements and future directions</article-title><source>Expert Syst Appl</source><year>2024</year><month>12</month><volume>255</volume><fpage>124780</fpage><pub-id pub-id-type="doi">10.1016/j.eswa.2024.124780</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ding</surname><given-names>K</given-names> </name><name name-style="western"><surname>Chetty</surname><given-names>M</given-names> </name><name name-style="western"><surname>Noori Hoshyar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bhattacharya</surname><given-names>T</given-names> </name><name name-style="western"><surname>Klein</surname><given-names>B</given-names> </name></person-group><article-title>Speech based detection of Alzheimer&#x2019;s disease: a survey of AI techniques, datasets and challenges</article-title><source>Artif Intell Rev</source><year>2024</year><month>10</month><day>12</day><volume>57</volume><issue>12</issue><fpage>325</fpage><pub-id pub-id-type="doi">10.1007/s10462-024-10961-6</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><month>03</month><day>29</day><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id><pub-id pub-id-type="medline">33782057</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rethlefsen</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Kirtley</surname><given-names>S</given-names> </name><name name-style="western"><surname>Waffenschmidt</surname><given-names>S</given-names> </name><etal/></person-group><article-title>PRISMA-S: an extension to the PRISMA Statement for Reporting Literature Searches in Systematic Reviews</article-title><source>Syst Rev</source><year>2021</year><month>01</month><day>26</day><volume>10</volume><issue>1</issue><fpage>39</fpage><pub-id pub-id-type="doi">10.1186/s13643-020-01542-z</pub-id><pub-id pub-id-type="medline">33499930</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>T</given-names> </name><name name-style="western"><surname>Higgins</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Deeks</surname><given-names>JJ</given-names> </name></person-group><article-title>Collecting data</article-title><source>Cochrane Handbook for Systematic Reviews of Interventions</source><year>2019</year><fpage>109</fpage><lpage>141</lpage><pub-id pub-id-type="doi">10.1002/9781119536604</pub-id><pub-id pub-id-type="other">978-1-119-53660-4</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Park</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Integrating biomarkers from virtual reality and magnetic resonance imaging for the early detection of mild cognitive impairment using a multimodal learning approach: validation study</article-title><source>J Med Internet Res</source><year>2024</year><month>04</month><day>17</day><volume>26</volume><fpage>e54538</fpage><pub-id pub-id-type="doi">10.2196/54538</pub-id><pub-id pub-id-type="medline">38631021</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shah</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Qi</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Exploring language-agnostic speech representations using domain knowledge for detecting Alzheimer&#x2019;s dementia</article-title><source>ICASSP 2023 - 2023 IEEE Int Conf Acoustics, Speech Signal Process (ICASSP)</source><year>2023</year><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1109/ICASSP49357.2023.10095593</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cortes</surname><given-names>C</given-names> </name><name name-style="western"><surname>Vapnik</surname><given-names>V</given-names> </name></person-group><article-title>Support-vector networks</article-title><source>Mach Learn</source><year>1995</year><month>09</month><volume>20</volume><issue>3</issue><fpage>273</fpage><lpage>297</lpage><pub-id pub-id-type="doi">10.1007/BF00994018</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kaur</surname><given-names>S</given-names> </name><name name-style="western"><surname>Memon</surname><given-names>N</given-names> </name><name name-style="western"><surname>Jainul Fathima</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ray</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bhatt</surname><given-names>MW</given-names> </name></person-group><article-title>Alzheimer&#x2019;s patients detection using support vector machine (SVM) with quantitative analysis</article-title><source>Neurosci Inf</source><year>2021</year><month>11</month><volume>1</volume><issue>3</issue><fpage>100012</fpage><pub-id pub-id-type="doi">10.1016/j.neuri.2021.100012</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Shi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name></person-group><article-title>Brain status transferring generative adversarial network for decoding individualized atrophy in Alzheimer&#x2019;s disease</article-title><source>IEEE J Biomed Health Inform</source><year>2023</year><month>10</month><volume>27</volume><issue>10</issue><fpage>4961</fpage><lpage>4970</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2023.3304388</pub-id><pub-id pub-id-type="medline">37607152</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lazli</surname><given-names>L</given-names> </name></person-group><article-title>Improved Alzheimer disease diagnosis with a machine learning approach and neuroimaging: case study development</article-title><source>JMIRx Med</source><year>2025</year><month>04</month><day>21</day><volume>6</volume><fpage>e60866</fpage><pub-id pub-id-type="doi">10.2196/60866</pub-id><pub-id pub-id-type="medline">40257754</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hossain</surname><given-names>F</given-names> </name><name name-style="western"><surname>Halder</surname><given-names>RK</given-names> </name><name name-style="western"><surname>Uddin</surname><given-names>MN</given-names> </name></person-group><article-title>An integrated machine learning based adaptive error minimization framework for Alzheimer&#x2019;s stage identification</article-title><source>Intell-Based Med</source><year>2025</year><volume>11</volume><fpage>100243</fpage><pub-id pub-id-type="doi">10.1016/j.ibmed.2025.100243</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fulkar</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dhale</surname><given-names>T</given-names> </name><name name-style="western"><surname>Pacharaney</surname><given-names>U</given-names> </name><name name-style="western"><surname>Deshmukh</surname><given-names>S</given-names> </name></person-group><article-title>Early detection of chronic diseases using machine and deep learning algorithms</article-title><source>2025 4th Int Conf Sentiment Anal Deep Learn (ICSADL)</source><year>2025</year><fpage>1656</fpage><lpage>1661</lpage><pub-id pub-id-type="doi">10.1109/ICSADL65848.2025.10933005</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sathiya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Basha</surname><given-names>CH</given-names> </name><name name-style="western"><surname>S</surname><given-names>V</given-names> </name><name name-style="western"><surname>Sharmila P</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>S</surname><given-names>P</given-names> </name><name name-style="western"><surname>Indhumathi</surname><given-names>R</given-names> </name></person-group><article-title>Enhancing Alzheimer&#x2019;s disease detection using optimized attribute selection and random forest classifier for improved accuracy</article-title><source>2025 Int Conf Visual Anal Data Visualization (ICVADV)</source><year>2025</year><fpage>1174</fpage><lpage>1179</lpage><pub-id pub-id-type="doi">10.1109/ICVADV63329.2025.10961844</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saleh</surname><given-names>AW</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>G</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>SB</given-names> </name><name name-style="western"><surname>Alkhaldi</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Verma</surname><given-names>A</given-names> </name></person-group><article-title>An Alzheimer&#x2019;s disease classification model using transfer learning Densenet with embedded healthcare decision support system</article-title><source>Decis Anal J</source><year>2023</year><month>12</month><volume>9</volume><fpage>100348</fpage><pub-id pub-id-type="doi">10.1016/j.dajour.2023.100348</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baucum</surname><given-names>M</given-names> </name><name name-style="western"><surname>Khojandi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Papamarkou</surname><given-names>T</given-names> </name></person-group><article-title>Hidden markov models as recurrent neural networks: an application to Alzheimer&#x2019;s disease</article-title><source>2021 IEEE 21st Int Conf Bioinf Bioeng (BIBE)</source><year>2021</year><fpage>1</fpage><lpage>6</lpage><pub-id pub-id-type="doi">10.1109/BIBE52308.2021.9635256</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Cai</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zeng</surname><given-names>D</given-names> </name><name name-style="western"><surname>Marder</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Honig</surname><given-names>LS</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name></person-group><article-title>Dynamic classification of latent disease progression with auxiliary surrogate labels</article-title><source>arXiv</source><comment>Preprint posted online on  Dec 11, 2024</comment><pub-id pub-id-type="doi">10.3390/math11102335</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Pham</surname><given-names>TD</given-names> </name></person-group><article-title>Development of a brain MRI-based hidden Markov model for dementia recognition</article-title><source>Biomed Eng Online</source><year>2013</year><volume>12 Suppl 1</volume><issue>Suppl 1</issue><fpage>S2</fpage><pub-id pub-id-type="doi">10.1186/1475-925X-12-S1-S2</pub-id><pub-id pub-id-type="medline">24564961</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vats</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Yadavalli</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gurugubelli</surname><given-names>K</given-names> </name><name name-style="western"><surname>Vuppala</surname><given-names>AK</given-names> </name></person-group><article-title>Acoustic features, BERT model and their complementary nature for Alzheimer&#x2019;s dementia detection</article-title><source>IC3 &#x2019;21</source><year>2021</year><month>08</month><day>5</day><fpage>267</fpage><lpage>272</lpage><pub-id pub-id-type="doi">10.1145/3474124.3474162</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xiao</surname><given-names>R</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>X</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Early diagnosis model of Alzheimer&#x2019;s disease based on sparse logistic regression with the generalized elastic net</article-title><source>Biomed Signal Process Control</source><year>2021</year><month>04</month><volume>66</volume><fpage>102362</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2020.102362</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ablimit</surname><given-names>A</given-names> </name><name name-style="western"><surname>Botelho</surname><given-names>C</given-names> </name><name name-style="western"><surname>Abad</surname><given-names>A</given-names> </name><name name-style="western"><surname>Schultz</surname><given-names>T</given-names> </name><name name-style="western"><surname>Trancoso</surname><given-names>I</given-names> </name></person-group><article-title>Exploring dementia detection from speech: cross corpus analysis</article-title><source>ICASSP 2022 - 2022 IEEE Int Conf Acoust, Speech Signal Proc (ICASSP)</source><year>2022</year><fpage>6472</fpage><lpage>6476</lpage><pub-id pub-id-type="doi">10.1109/ICASSP43922.2022.9747167</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lahmiri</surname><given-names>S</given-names> </name></person-group><article-title>Integrating convolutional neural networks, kNN, and Bayesian optimization for efficient diagnosis of Alzheimer&#x2019;s disease in magnetic resonance images</article-title><source>Biomed Signal Process Control</source><year>2023</year><month>02</month><volume>80</volume><fpage>104375</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2022.104375</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suwalka</surname><given-names>D</given-names> </name><name name-style="western"><surname>Pandita</surname><given-names>D</given-names> </name><name name-style="western"><surname>Godse</surname><given-names>S</given-names> </name><name name-style="western"><surname>Patil</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Salam Khan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kumar</surname><given-names>A</given-names> </name></person-group><article-title>AI applications and simulation-based learning integrating future of nursing education</article-title><source>2024 Int Conf Intell Innovative Pract Eng Manage (IIPEM)</source><year>2024</year><fpage>1</fpage><lpage>6</lpage><pub-id pub-id-type="doi">10.1109/IIPEM62726.2024.10925639</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chaudhari</surname><given-names>A</given-names> </name><name name-style="western"><surname>Saratkar</surname><given-names>S</given-names> </name><name name-style="western"><surname>Thute</surname><given-names>T</given-names> </name></person-group><article-title>AI-enhanced imaging techniques for understanding Alzheimer&#x2019;s progression</article-title><source>2025 Int Conf Mach Learn Auton Syst (ICMLAS)</source><year>2025</year><fpage>1174</fpage><lpage>1179</lpage><pub-id pub-id-type="doi">10.1109/ICMLAS64557.2025.10969042</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ango</surname><given-names>R</given-names> </name><name name-style="western"><surname>C</surname><given-names>KKR</given-names> </name><name name-style="western"><surname>Fatima</surname><given-names>S</given-names> </name><name name-style="western"><surname>Nag</surname><given-names>A</given-names> </name></person-group><article-title>Brain connectivity analysis in Alzheimer&#x2019;s disease using graph convolutional network</article-title><source>2024 4th Int Conf Soft Comput Secur Appl (ICSCSA)</source><year>2024</year><fpage>133</fpage><lpage>139</lpage><pub-id pub-id-type="doi">10.1109/ICSCSA64454.2024.00028</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chattopadhyay</surname><given-names>T</given-names> </name><name name-style="western"><surname>Joshy</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Ozarkar</surname><given-names>SS</given-names> </name><etal/></person-group><article-title>Deep learning algorithms for Alzheimer&#x2019;s disease detection based on diffusion MRI: tests in Indian and North American cohorts</article-title><source>Alzheimer&#x2019;s Dementia</source><year>2024</year><month>12</month><volume>20</volume><issue>S2</issue><fpage>e089294</fpage><pub-id pub-id-type="doi">10.1002/alz.089294</pub-id><pub-id pub-id-type="medline">39737627</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>D</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name></person-group><article-title>Editorial: deep learning methods and applications in brain imaging for the diagnosis of neurological and psychiatric disorders</article-title><source>Front Neurosci</source><year>2024</year><volume>18</volume><fpage>1497417</fpage><pub-id pub-id-type="doi">10.3389/fnins.2024.1497417</pub-id><pub-id pub-id-type="medline">39411146</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williams</surname><given-names>C</given-names> </name><name name-style="western"><surname>Anik</surname><given-names>FI</given-names> </name><name name-style="western"><surname>Hasan</surname><given-names>MM</given-names> </name><etal/></person-group><article-title>Advancing brain-computer interface closed-loop systems for neurorehabilitation: A systematic review of AI and machine learning innovations in biomedical engineering (preprint)</article-title><source>JMIR Biomed Eng</source><year>2025</year><month>11</month><day>5</day><volume>10</volume><fpage>e72218</fpage><pub-id pub-id-type="doi">10.2196/72218</pub-id><pub-id pub-id-type="medline">41191851</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiting</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Westwood</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title><source>Ann Intern Med</source><year>2011</year><month>10</month><day>18</day><volume>155</volume><issue>8</issue><fpage>529</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id><pub-id pub-id-type="medline">22007046</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Swart</surname><given-names>WK</given-names> </name><name name-style="western"><surname>Loog</surname><given-names>M</given-names> </name><name name-style="western"><surname>Krijthe</surname><given-names>JH</given-names> </name></person-group><article-title>A comparative study of methods for dynamic survival analysis</article-title><source>Front Neurol</source><year>2025</year><volume>16</volume><fpage>1504535</fpage><pub-id pub-id-type="doi">10.3389/fneur.2025.1504535</pub-id><pub-id pub-id-type="medline">40040908</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kang</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Hong</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>HK</given-names> </name></person-group><article-title>Multi-scale neural networks classification of mild cognitive impairment using functional near-infrared spectroscopy</article-title><source>Biocybern Biomed Eng</source><year>2025</year><month>01</month><volume>45</volume><issue>1</issue><fpage>11</fpage><lpage>22</lpage><pub-id pub-id-type="doi">10.1016/j.bbe.2024.12.001</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abir</surname><given-names>SI</given-names> </name><etal/></person-group><article-title>EEG functional connectivity and deep learning for automated diagnosis of Alzheimer&#x2019;s disease and schizophrenia</article-title><source>JCSTS</source><year>2025</year><month>01</month><day>26</day><volume>7</volume><issue>1</issue><fpage>82</fpage><lpage>99</lpage><pub-id pub-id-type="doi">10.32996/jcsts.2025.7.1.7</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sathish</surname><given-names>R</given-names> </name><name name-style="western"><surname>Muthukumar</surname><given-names>R</given-names> </name><name name-style="western"><surname>Dhivya</surname><given-names>K</given-names> </name><name name-style="western"><surname>Karthikkumar</surname><given-names>S</given-names> </name></person-group><article-title>Deep learning and IoT-enabled framework for accurate classification and monitoring of alzheimer&#x2019;s disease based on eeg signal analysis</article-title><source>2025 Fifth Int Conf Adv Electr, Comput, Commun Sustainable Technol (ICAECT)</source><year>2025</year><fpage>1</fpage><lpage>8</lpage><pub-id pub-id-type="doi">10.1109/ICAECT63952.2025.10958882</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dubey</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Kapoor</surname><given-names>R</given-names> </name><name name-style="western"><surname>Saraswat</surname><given-names>M</given-names> </name></person-group><article-title>Optimized machine learning for medical data analysis and disease prediction</article-title><source>2024 Int Conf Artif Intell Emerging Tech (Global AI Summit</source><year>2024</year><fpage>1282</fpage><lpage>1286</lpage><pub-id pub-id-type="doi">10.1109/GlobalAISummit62156.2024.10948001</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>K</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chitla</surname><given-names>VB</given-names> </name><name name-style="western"><surname>Aftab</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kamath</surname><given-names>S</given-names> </name></person-group><article-title>LSTM-based assistance for people with Alzheimer&#x2019;s disease</article-title><source>2025 Int Conf Intell Innovative Tech Comput, Electr Electron (IITCEE)</source><year>2025</year><fpage>1</fpage><lpage>5</lpage><pub-id pub-id-type="doi">10.1109/IITCEE64140.2025.10915385</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>GE</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Bian</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>J</given-names> </name></person-group><article-title>Federated learning with multi-cohort real-world data for predicting the progression from mild cognitive impairment to Alzheimer&#x2019;s disease</article-title><source>Alzheimer's Dement</source><year>2025</year><month>04</month><volume>21</volume><issue>4</issue><fpage>e70128</fpage><pub-id pub-id-type="doi">10.1002/alz.70128</pub-id><pub-id pub-id-type="medline">40219846</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zuo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Glypred: lysine glycation site prediction via CCU&#x2013;LightGBM&#x2013;BiLSTM framework with multi-head attention mechanism</article-title><source>J Chem Inf Model</source><year>2024</year><month>08</month><day>26</day><volume>64</volume><issue>16</issue><fpage>6699</fpage><lpage>6711</lpage><pub-id pub-id-type="doi">10.1021/acs.jcim.4c01034</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>SD</given-names> </name></person-group><article-title>GCSTormer: gated swin transformer with channel weights for image denoising</article-title><source>Expert Syst Appl</source><year>2025</year><month>07</month><volume>284</volume><fpage>127924</fpage><pub-id pub-id-type="doi">10.1016/j.eswa.2025.127924</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Han</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xue</surname><given-names>R</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Hypergraph foundation model for brain disease diagnosis</article-title><source>IEEE Trans Neural Netw Learning Syst</source><year>2025</year><volume>36</volume><issue>10</issue><fpage>17702</fpage><lpage>17716</lpage><pub-id pub-id-type="doi">10.1109/TNNLS.2025.3554755</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>SY</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>YD</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>YD</given-names> </name></person-group><article-title>A regularized transformer with adaptive token fusion for Alzheimer&#x2019;s disease diagnosis in brain magnetic resonance images</article-title><source>Eng Appl Artif Intell</source><year>2025</year><month>09</month><volume>155</volume><fpage>111058</fpage><pub-id pub-id-type="doi">10.1016/j.engappai.2025.111058</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Qiu</surname><given-names>P</given-names> </name><name name-style="western"><surname>Dumitrascu</surname><given-names>OM</given-names> </name><name name-style="western"><surname>Youssef</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name></person-group><article-title>A BERT-style self-supervised learning CNN for disease identification from retinal images</article-title><source>arXiv</source><comment>Preprint posted online on  Apr 25, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2504.18049</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Mahapatra</surname><given-names>C</given-names> </name></person-group><article-title>Exploring advanced applications of artificial intelligence in neuropharmacology: a comprehensive overview</article-title><source>Biol Life Sci</source><comment>Preprint posted online on  May 8, 2025</comment><pub-id pub-id-type="doi">10.20944/preprints202505.0369.v1</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ren</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Using machine learning to predict cognitive decline in older adults from the Chinese longitudinal healthy longevity survey: model development and validation study</article-title><source>JMIR Aging</source><year>2025</year><month>04</month><day>30</day><volume>8</volume><fpage>e67437</fpage><pub-id pub-id-type="doi">10.2196/67437</pub-id><pub-id pub-id-type="medline">40305830</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shah</surname><given-names>YAR</given-names> </name><name name-style="western"><surname>Qureshi</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Qureshi</surname><given-names>HA</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>SUR</given-names> </name><name name-style="western"><surname>Ahmad</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shiwlani</surname><given-names>A</given-names> </name></person-group><article-title>Advances in artificial intelligence and machine learning for neurodegenerative disease: a literature review</article-title><source>WJRR</source><year>2024</year><month>09</month><day>5</day><volume>19</volume><issue>3</issue><fpage>4</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.31871/WJRR.19.3.8</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fatima</surname><given-names>G</given-names> </name><name name-style="western"><surname>Ashiquzzaman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>YR</given-names> </name><name name-style="western"><surname>Kwon</surname><given-names>HS</given-names> </name><name name-style="western"><surname>Chung</surname><given-names>E</given-names> </name></person-group><article-title>Vascular and glymphatic dysfunction as drivers of cognitive impairment in Alzheimer&#x2019;s disease: insights from computational approaches</article-title><source>Neurobiol Dis</source><year>2025</year><month>05</month><volume>208</volume><fpage>106877</fpage><pub-id pub-id-type="doi">10.1016/j.nbd.2025.106877</pub-id><pub-id pub-id-type="medline">40107629</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Dang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Cai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Heng</surname><given-names>P</given-names> </name></person-group><article-title>Temporal&#x2010;multimodal consistency alignment for Alzheimer&#x2019;s cognitive assessment prediction</article-title><source>Med Phys Mex Symp Med Phys</source><year>2025</year><month>06</month><volume>52</volume><issue>6</issue><fpage>5064</fpage><lpage>5080</lpage><pub-id pub-id-type="doi">10.1002/mp.17767</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sadeghian</surname><given-names>R</given-names> </name><name name-style="western"><surname>Haider</surname><given-names>F</given-names> </name><name name-style="western"><surname>Fraser</surname><given-names>K</given-names> </name><name name-style="western"><surname>Tasaki</surname><given-names>S</given-names> </name><name name-style="western"><surname>Muniz-Terrera</surname><given-names>G</given-names> </name></person-group><article-title>Editorial: methods in artificial intelligence for dementia 2024</article-title><source>Front Dement</source><year>2024</year><volume>3</volume><fpage>1444825</fpage><pub-id pub-id-type="doi">10.3389/frdem.2024.1444825</pub-id><pub-id pub-id-type="medline">39081595</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kale</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wankhede</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pawar</surname><given-names>R</given-names> </name><etal/></person-group><article-title>AI-driven innovations in Alzheimer&#x2019;s disease: integrating early diagnosis, personalized treatment, and prognostic modelling</article-title><source>Ageing Res Rev</source><year>2024</year><month>11</month><volume>101</volume><fpage>102497</fpage><pub-id pub-id-type="doi">10.1016/j.arr.2024.102497</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="web"><source>UKbiobank</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ukbiobank.ac.uk">https://ukbiobank.ac.uk</ext-link></comment></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="web"><source>ADNI</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://adni.loni.usc.edu">https://adni.loni.usc.edu</ext-link></comment></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="web"><article-title>Open access series of imaging studies (OASIS)</article-title><source>Washington University in St Louis</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://sites.wustl.edu/oasisbrains/">https://sites.wustl.edu/oasisbrains/</ext-link></comment></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="web"><source>NACC</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://naccdata.org/">https://naccdata.org/</ext-link></comment></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="web"><source>Framingham Heart Study</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.framinghamheartstudy.org/">https://www.framinghamheartstudy.org/</ext-link></comment></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="web"><source>aibl</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://aibl.csiro.au">https://aibl.csiro.au</ext-link></comment></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="web"><source>TalkBank</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://dementia.talkbank.org/">https://dementia.talkbank.org/</ext-link></comment></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gkoumas</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Tsakalidis</surname><given-names>A</given-names> </name><etal/></person-group><article-title>A longitudinal multi-modal dataset for dementia monitoring and diagnosis</article-title><source>Lang Resour Eval</source><year>2024</year><volume>58</volume><issue>3</issue><fpage>883</fpage><lpage>902</lpage><pub-id pub-id-type="doi">10.1007/s10579-023-09718-4</pub-id><pub-id pub-id-type="medline">39323983</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>T</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Lun</surname><given-names>X</given-names> </name><name name-style="western"><surname>Pan</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name></person-group><article-title>ADReFV: face video dataset based on human&#x2010;computer interaction for Alzheimer&#x2019;s disease recognition</article-title><source>Comput Animation Virtual</source><year>2023</year><month>01</month><volume>34</volume><issue>1</issue><fpage>e2127</fpage><pub-id pub-id-type="doi">10.1002/cav.2127</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="web"><source>GENCODE</source><access-date>2026-02-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.gencodegenes.org/">https://www.gencodegenes.org/</ext-link></comment></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Velazquez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>Y</given-names> </name></person-group><article-title>Multimodal ensemble model for Alzheimer&#x2019;s disease conversion prediction from early mild cognitive impairment subjects</article-title><source>Comput Biol Med</source><year>2022</year><month>12</month><volume>151</volume><issue>Pt A</issue><fpage>106201</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106201</pub-id><pub-id pub-id-type="medline">36370583</pub-id></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>Q</given-names> </name><name name-style="western"><surname>L&#x00FC;</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Li</surname><given-names>W</given-names> </name></person-group><article-title>A multimodal learning machine framework for Alzheimer&#x2019;s disease diagnosis based on neuropsychological and neuroimaging data</article-title><source>Comput Ind Eng</source><year>2024</year><month>11</month><volume>197</volume><fpage>110625</fpage><pub-id pub-id-type="doi">10.1016/j.cie.2024.110625</pub-id></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fabietti</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mahmud</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lotfi</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Early detection of Alzheimer&#x2019;s disease from cortical and hippocampal local field potentials using an ensembled machine learning model</article-title><source>IEEE Trans Neural Syst Rehabil Eng</source><year>2023</year><volume>31</volume><fpage>2839</fpage><lpage>2848</lpage><pub-id pub-id-type="doi">10.1109/TNSRE.2023.3288835</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Seifallahi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mehraban</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Galvin</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Ghoraani</surname><given-names>B</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease detection using comprehensive analysis of Timed Up and Go Test via Kinect V.2 camera and machine learning</article-title><source>IEEE Trans Neural Syst Rehabil Eng</source><year>2022</year><volume>30</volume><fpage>1589</fpage><lpage>1600</lpage><pub-id pub-id-type="doi">10.1109/TNSRE.2022.3181252</pub-id><pub-id pub-id-type="medline">35675251</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xue</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kowshik</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Lteif</surname><given-names>D</given-names> </name><etal/></person-group><article-title>AI-based differential diagnosis of dementia etiologies on multimodal data</article-title><source>Nat Med</source><year>2024</year><month>10</month><volume>30</volume><issue>10</issue><fpage>2977</fpage><lpage>2989</lpage><pub-id pub-id-type="doi">10.1038/s41591-024-03118-z</pub-id><pub-id pub-id-type="medline">38965435</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>X</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Ying</surname><given-names>S</given-names> </name></person-group><article-title>Multimodal neuroimaging feature learning with multimodal stacked deep polynomial networks for diagnosis of Alzheimer&#x2019;s disease</article-title><source>IEEE J Biomed Health Inform</source><year>2018</year><month>01</month><volume>22</volume><issue>1</issue><fpage>173</fpage><lpage>183</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2017.2655720</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Allwright</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mundell</surname><given-names>HD</given-names> </name><name name-style="western"><surname>McCorkindale</surname><given-names>AN</given-names> </name><etal/></person-group><article-title>Ranking the risk factors for Alzheimer&#x2019;s disease; findings from the UK Biobank study</article-title><source>Aging Brain</source><year>2023</year><volume>3</volume><fpage>100081</fpage><pub-id pub-id-type="doi">10.1016/j.nbas.2023.100081</pub-id><pub-id pub-id-type="medline">37384134</pub-id></nlm-citation></ref><ref id="ref82"><label>82</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Estimation of machine learning-based models to predict dementia risk in patients with atherosclerotic cardiovascular diseases: UK Biobank study</article-title><source>JMIR Aging</source><year>2025</year><month>02</month><day>26</day><volume>8</volume><fpage>e64148</fpage><pub-id pub-id-type="doi">10.2196/64148</pub-id><pub-id pub-id-type="medline">40009844</pub-id></nlm-citation></ref><ref id="ref83"><label>83</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>You</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>YR</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>HF</given-names> </name><etal/></person-group><article-title>Development of a novel dementia risk prediction model in the general population: a large, longitudinal, population-based machine-learning study</article-title><source>eClinicalMedicine</source><year>2022</year><month>11</month><volume>53</volume><fpage>101665</fpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2022.101665</pub-id></nlm-citation></ref><ref id="ref84"><label>84</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Calvo</surname><given-names>N</given-names> </name><name name-style="western"><surname>McFall</surname><given-names>GP</given-names> </name><name name-style="western"><surname>Ramana</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Associated risk and resilience factors of Alzheimer&#x2019;s disease in women with early bilateral oophorectomy: data from the UK Biobank</article-title><source>J Alzheimers Dis</source><year>2024</year><month>11</month><volume>102</volume><issue>1</issue><fpage>119</fpage><lpage>128</lpage><pub-id pub-id-type="doi">10.3233/JAD-240646</pub-id><pub-id pub-id-type="medline">39497303</pub-id></nlm-citation></ref><ref id="ref85"><label>85</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Yuan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Somekh</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Genetically supported targets and drug repurposing for brain aging: a systematic study in the UK Biobank</article-title><source>Sci Adv</source><year>2025</year><month>03</month><day>14</day><volume>11</volume><issue>11</issue><fpage>eadr3757</fpage><pub-id pub-id-type="doi">10.1126/sciadv.adr3757</pub-id><pub-id pub-id-type="medline">40073132</pub-id></nlm-citation></ref><ref id="ref86"><label>86</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yousefzadeh</surname><given-names>N</given-names> </name><name name-style="western"><surname>Tran</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ramirez-Zamora</surname><given-names>A</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Thai</surname><given-names>MT</given-names> </name></person-group><article-title>Neuron-level explainable AI for Alzheimer&#x2019;s disease assessment from fundus images</article-title><source>Sci Rep</source><year>2024</year><month>04</month><day>2</day><volume>14</volume><issue>1</issue><fpage>7710</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-58121-8</pub-id><pub-id pub-id-type="medline">38565579</pub-id></nlm-citation></ref><ref id="ref87"><label>87</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gong</surname><given-names>W</given-names> </name><name name-style="western"><surname>Bai</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>YQ</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Beckmann</surname><given-names>CF</given-names> </name></person-group><article-title>Supervised phenotype discovery from multimodal brain imaging</article-title><source>IEEE Trans Med Imaging</source><year>2023</year><month>03</month><volume>42</volume><issue>3</issue><fpage>834</fpage><lpage>849</lpage><pub-id pub-id-type="doi">10.1109/TMI.2022.3218720</pub-id><pub-id pub-id-type="medline">36318559</pub-id></nlm-citation></ref><ref id="ref88"><label>88</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lian</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Attention-guided hybrid network for dementia diagnosis with structural MR images</article-title><source>IEEE Trans Cybern</source><year>2022</year><month>04</month><volume>52</volume><issue>4</issue><fpage>1992</fpage><lpage>2003</lpage><pub-id pub-id-type="doi">10.1109/TCYB.2020.3005859</pub-id><pub-id pub-id-type="medline">32721906</pub-id></nlm-citation></ref><ref id="ref89"><label>89</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lian</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Multi-task weakly-supervised attention network for dementia status estimation with structural MRI</article-title><source>IEEE Trans Neural Netw Learning Syst</source><year>2022</year><month>08</month><volume>33</volume><issue>8</issue><fpage>4056</fpage><lpage>4068</lpage><pub-id pub-id-type="doi">10.1109/TNNLS.2021.3055772</pub-id></nlm-citation></ref><ref id="ref90"><label>90</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Habes</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wolk</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>Y</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease neuroimaging initiative and the Australian Imaging Biomarkers and Lifestyle Study of Aging. A deep learning model for early prediction of Alzheimer&#x2019;s disease dementia based on hippocampal magnetic resonance imaging data</article-title><source>Alzheimer&#x2019;s Dementia</source><year>2019</year><month>08</month><volume>15</volume><issue>8</issue><fpage>1059</fpage><lpage>1070</lpage><pub-id pub-id-type="doi">10.1016/j.jalz.2019.02.007</pub-id><pub-id pub-id-type="medline">31201098</pub-id></nlm-citation></ref><ref id="ref91"><label>91</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oh</surname><given-names>K</given-names> </name><name name-style="western"><surname>Yoon</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Suk</surname><given-names>HI</given-names> </name></person-group><article-title>Learn-explain-reinforce: counterfactual reasoning and its guidance to reinforce an Alzheimer&#x2019;s disease diagnosis model</article-title><source>IEEE Trans Pattern Anal Mach Intell</source><year>2023</year><month>04</month><volume>45</volume><issue>4</issue><fpage>4843</fpage><lpage>4857</lpage><pub-id pub-id-type="doi">10.1109/TPAMI.2022.3197845</pub-id><pub-id pub-id-type="medline">35947563</pub-id></nlm-citation></ref><ref id="ref92"><label>92</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lian</surname><given-names>C</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Hierarchical fully convolutional network for joint atrophy localization and Alzheimer&#x2019;s disease diagnosis using structural MRI</article-title><source>IEEE Trans Pattern Anal Mach Intell</source><year>2020</year><month>04</month><volume>42</volume><issue>4</issue><fpage>880</fpage><lpage>893</lpage><pub-id pub-id-type="doi">10.1109/TPAMI.2018.2889096</pub-id><pub-id pub-id-type="medline">30582529</pub-id></nlm-citation></ref><ref id="ref93"><label>93</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Avsec</surname><given-names>&#x017D;</given-names> </name><name name-style="western"><surname>Agarwal</surname><given-names>V</given-names> </name><name name-style="western"><surname>Visentin</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Effective gene expression prediction from sequence by integrating long-range interactions</article-title><source>Nat Methods</source><year>2021</year><month>10</month><volume>18</volume><issue>10</issue><fpage>1196</fpage><lpage>1203</lpage><pub-id pub-id-type="doi">10.1038/s41592-021-01252-x</pub-id></nlm-citation></ref><ref id="ref94"><label>94</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>X</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>For the Alzheimer&#x2019;s disease neuroimaging initiative. deep learning based multimodal progression modeling for Alzheimer&#x2019;s disease</article-title><source>Stat Biopharm Res</source><year>2021</year><month>07</month><day>3</day><volume>13</volume><fpage>337</fpage><lpage>343</lpage><pub-id pub-id-type="doi">10.1080/19466315.2021.1884129</pub-id></nlm-citation></ref><ref id="ref95"><label>95</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>LY</given-names> </name><name name-style="western"><surname>Vaghari</surname><given-names>D</given-names> </name><name name-style="western"><surname>Burkhart</surname><given-names>MC</given-names> </name><etal/></person-group><article-title>Robust and interpretable AI-guided marker for early dementia prediction in real-world clinical settings</article-title><source>eClinicalMedicine</source><year>2024</year><month>08</month><volume>74</volume><fpage>102725</fpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2024.102725</pub-id></nlm-citation></ref><ref id="ref96"><label>96</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>L</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Han</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name></person-group><article-title>Dual attention multi-instance deep learning for Alzheimer&#x2019;s disease diagnosis with structural MRI</article-title><source>IEEE Trans Med Imaging</source><year>2021</year><month>09</month><volume>40</volume><issue>9</issue><fpage>2354</fpage><lpage>2366</lpage><pub-id pub-id-type="doi">10.1109/TMI.2021.3077079</pub-id></nlm-citation></ref><ref id="ref97"><label>97</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>Q</given-names> </name><name name-style="western"><surname>L&#x00FC;</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>W</given-names> </name></person-group><article-title>A feature-aware multimodal framework with auto-fusion for Alzheimer&#x2019;s disease diagnosis</article-title><source>Comput Biol Med</source><year>2024</year><month>08</month><volume>178</volume><fpage>108740</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.108740</pub-id><pub-id pub-id-type="medline">38901184</pub-id></nlm-citation></ref><ref id="ref98"><label>98</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bi</surname><given-names>XA</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name></person-group><article-title>Multimodal data analysis of Alzheimer&#x2019;s disease based on clustering evolutionary random forest</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>10</month><volume>24</volume><issue>10</issue><fpage>2973</fpage><lpage>2983</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2020.2973324</pub-id><pub-id pub-id-type="medline">32071013</pub-id></nlm-citation></ref><ref id="ref99"><label>99</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bi</surname><given-names>XA</given-names> </name><name name-style="western"><surname>Xing</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>W</given-names> </name><name name-style="western"><surname>Li</surname><given-names>L</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>L</given-names> </name></person-group><article-title>Pathogeny detection for mild cognitive impairment via weighted evolutionary random forest with brain imaging and genetic data</article-title><source>IEEE J Biomed Health Inform</source><year>2022</year><month>07</month><volume>26</volume><issue>7</issue><fpage>3068</fpage><lpage>3079</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2022.3151084</pub-id><pub-id pub-id-type="medline">35157601</pub-id></nlm-citation></ref><ref id="ref100"><label>100</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hashmi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Barukab</surname><given-names>O</given-names> </name></person-group><article-title>Dementia classification using deep reinforcement learning for early diagnosis</article-title><source>Appl Sci (Basel)</source><year>2023</year><month>01</month><day>22</day><volume>13</volume><issue>3</issue><fpage>1464</fpage><pub-id pub-id-type="doi">10.3390/app13031464</pub-id></nlm-citation></ref><ref id="ref101"><label>101</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Predicting long-term progression of Alzheimer&#x2019;s disease using a multimodal deep learning model incorporating interaction effects</article-title><source>J Transl Med</source><year>2024</year><month>03</month><day>11</day><volume>22</volume><issue>1</issue><fpage>265</fpage><pub-id pub-id-type="doi">10.1186/s12967-024-05025-w</pub-id></nlm-citation></ref><ref id="ref102"><label>102</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hatami</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yaghmaee</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ebrahimpour</surname><given-names>R</given-names> </name></person-group><article-title>Investigating the potential of reinforcement learning and deep learning in improving Alzheimer&#x2019;s disease classification</article-title><source>Neurocomputing</source><year>2024</year><month>09</month><volume>597</volume><fpage>128119</fpage><pub-id pub-id-type="doi">10.1016/j.neucom.2024.128119</pub-id></nlm-citation></ref><ref id="ref103"><label>103</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tabarestani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Aghili</surname><given-names>M</given-names> </name><name name-style="western"><surname>Eslami</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A distributed multitask multimodal approach for the prediction of Alzheimer&#x2019;s disease in a longitudinal study</article-title><source>Neuroimage</source><year>2020</year><month>02</month><day>1</day><volume>206</volume><fpage>116317</fpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.116317</pub-id><pub-id pub-id-type="medline">31678502</pub-id></nlm-citation></ref><ref id="ref104"><label>104</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Burkhart</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>LY</given-names> </name><name name-style="western"><surname>Vaghari</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Unsupervised multimodal modeling of cognitive and brain health trajectories for early dementia prediction</article-title><source>Sci Rep</source><year>2024</year><month>05</month><day>10</day><volume>14</volume><issue>1</issue><fpage>10755</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-60914-w</pub-id><pub-id pub-id-type="medline">38729989</pub-id></nlm-citation></ref><ref id="ref105"><label>105</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>El-Sappagh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Alonso</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Islam</surname><given-names>SMR</given-names> </name><name name-style="western"><surname>Sultan</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Kwak</surname><given-names>KS</given-names> </name></person-group><article-title>A multilayer multimodal detection and prediction model based on explainable artificial intelligence for Alzheimer&#x2019;s disease</article-title><source>Sci Rep</source><year>2021</year><month>01</month><day>29</day><volume>11</volume><issue>1</issue><fpage>2660</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-82098-3</pub-id><pub-id pub-id-type="medline">33514817</pub-id></nlm-citation></ref><ref id="ref106"><label>106</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>MW</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Choe</surname><given-names>YS</given-names> </name><etal/></person-group><article-title>A multimodal machine learning model for predicting dementia conversion in Alzheimer&#x2019;s disease</article-title><source>Sci Rep</source><year>2024</year><month>05</month><day>29</day><volume>14</volume><issue>1</issue><fpage>12276</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-60134-2</pub-id></nlm-citation></ref><ref id="ref107"><label>107</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yuan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>X</given-names> </name></person-group><article-title>Classification of mild cognitive impairment with multimodal data using both labeled and unlabeled samples</article-title><source>IEEE/ACM Trans Comput Biol and Bioinf</source><year>2021</year><month>11</month><day>1</day><volume>18</volume><issue>6</issue><fpage>2281</fpage><lpage>2290</lpage><pub-id pub-id-type="doi">10.1109/TCBB.2021.3053061</pub-id></nlm-citation></ref><ref id="ref108"><label>108</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cirincione</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lynch</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bennett</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Prediction of future dementia among patients with mild cognitive impairment (MCI) by integrating multimodal clinical data</article-title><source>Heliyon</source><year>2024</year><month>09</month><day>15</day><volume>10</volume><issue>17</issue><fpage>e36728</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e36728</pub-id><pub-id pub-id-type="medline">39281465</pub-id></nlm-citation></ref><ref id="ref109"><label>109</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cassani</surname><given-names>R</given-names> </name><name name-style="western"><surname>Falk</surname><given-names>TH</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease diagnosis and severity level detection based on electroencephalography modulation spectral &#x201C;patch&#x201D; features</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>07</month><volume>24</volume><issue>7</issue><fpage>1982</fpage><lpage>1993</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2019.2953475</pub-id><pub-id pub-id-type="medline">31725401</pub-id></nlm-citation></ref><ref id="ref110"><label>110</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cilia</surname><given-names>ND</given-names> </name><name name-style="western"><surname>D&#x2019;Alessandro</surname><given-names>T</given-names> </name><name name-style="western"><surname>De Stefano</surname><given-names>C</given-names> </name><name name-style="western"><surname>Fontanella</surname><given-names>F</given-names> </name><name name-style="western"><surname>Molinara</surname><given-names>M</given-names> </name></person-group><article-title>From online handwriting to synthetic images for Alzheimer&#x2019;s disease detection using a deep transfer learning approach</article-title><source>IEEE J Biomed Health Inform</source><year>2021</year><month>12</month><volume>25</volume><issue>12</issue><fpage>4243</fpage><lpage>4254</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2021.3101982</pub-id></nlm-citation></ref><ref id="ref111"><label>111</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kmetzsch</surname><given-names>V</given-names> </name><name name-style="western"><surname>Becker</surname><given-names>E</given-names> </name><name name-style="western"><surname>Saracino</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Disease progression score estimation from multimodal imaging and MicroRNA data using supervised variational autoencoders</article-title><source>IEEE J Biomed Health Inform</source><year>2022</year><month>12</month><volume>26</volume><issue>12</issue><fpage>6024</fpage><lpage>6035</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2022.3208517</pub-id></nlm-citation></ref><ref id="ref112"><label>112</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mengoudi</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ravi</surname><given-names>D</given-names> </name><name name-style="western"><surname>Yong</surname><given-names>KXX</given-names> </name><etal/></person-group><article-title>Augmenting dementia cognitive assessment with instruction-less eye-tracking tests</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>11</month><volume>24</volume><issue>11</issue><fpage>3066</fpage><lpage>3075</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2020.3004686</pub-id><pub-id pub-id-type="medline">32749977</pub-id></nlm-citation></ref><ref id="ref113"><label>113</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tsai</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>TW</given-names> </name><name name-style="western"><surname>Ou</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Su</surname><given-names>TH</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>C</given-names> </name><name name-style="western"><surname>Chou</surname><given-names>CF</given-names> </name></person-group><article-title>Multimodal attention network for dementia prediction</article-title><source>IEEE J Biomed Health Inform</source><year>2024</year><month>11</month><volume>28</volume><issue>11</issue><fpage>6918</fpage><lpage>6930</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2024.3438885</pub-id></nlm-citation></ref><ref id="ref114"><label>114</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>EQ</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>XY</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>SD</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>XY</given-names> </name><name name-style="western"><surname>Tang</surname><given-names>ZR</given-names> </name></person-group><article-title>Detecting Alzheimer&#x2019;s dementia degree</article-title><source>IEEE Trans Cogn Dev Syst</source><year>2022</year><month>03</month><volume>14</volume><issue>1</issue><fpage>116</fpage><lpage>125</lpage><pub-id pub-id-type="doi">10.1109/TCDS.2020.3015131</pub-id></nlm-citation></ref><ref id="ref115"><label>115</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Ni</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Patch-based interpretable deep learning framework for Alzheimer&#x2019;s disease diagnosis using multimodal data</article-title><source>Biomed Signal Process Control</source><year>2025</year><month>02</month><volume>100</volume><fpage>107085</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2024.107085</pub-id></nlm-citation></ref><ref id="ref116"><label>116</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fan</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Graph reasoning module for Alzheimer&#x2019;s disease diagnosis: a plug-and-play method</article-title><source>IEEE Trans Neural Syst Rehabil Eng</source><year>2023</year><volume>31</volume><fpage>4773</fpage><lpage>4780</lpage><pub-id pub-id-type="doi">10.1109/TNSRE.2023.3337533</pub-id></nlm-citation></ref><ref id="ref117"><label>117</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beebe-Wang</surname><given-names>N</given-names> </name><name name-style="western"><surname>Okeson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Althoff</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SI</given-names> </name></person-group><article-title>Efficient and explainable risk assessments for imminent dementia in an aging cohort study</article-title><source>IEEE J Biomed Health Inform</source><year>2021</year><month>07</month><volume>25</volume><issue>7</issue><fpage>2409</fpage><lpage>2420</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2021.3059563</pub-id></nlm-citation></ref><ref id="ref118"><label>118</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Battineni</surname><given-names>G</given-names> </name><name name-style="western"><surname>Hossain</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Chintalapudi</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Improved Alzheimer&#x2019;s disease detection by MRI using multimodal machine learning algorithms</article-title><source>Diagnostics (Basel)</source><year>2021</year><month>11</month><day>13</day><volume>11</volume><issue>11</issue><fpage>2103</fpage><pub-id pub-id-type="doi">10.3390/diagnostics11112103</pub-id></nlm-citation></ref><ref id="ref119"><label>119</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nguyen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Chu</surname><given-names>NN</given-names> </name></person-group><article-title>An introduction to deep learning research for Alzheimer&#x2019;s disease</article-title><source>IEEE Consumer Electron Mag</source><year>2021</year><month>05</month><day>1</day><volume>10</volume><issue>3</issue><fpage>72</fpage><lpage>75</lpage><pub-id pub-id-type="doi">10.1109/MCE.2020.3048254</pub-id></nlm-citation></ref><ref id="ref120"><label>120</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fan</surname><given-names>F</given-names> </name><name name-style="western"><surname>Song</surname><given-names>H</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Development and validation of a multimodal deep learning framework for vascular cognitive impairment diagnosis</article-title><source>iScience</source><year>2024</year><month>10</month><volume>27</volume><issue>10</issue><fpage>110945</fpage><pub-id pub-id-type="doi">10.1016/j.isci.2024.110945</pub-id></nlm-citation></ref><ref id="ref121"><label>121</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ilias</surname><given-names>L</given-names> </name><name name-style="western"><surname>Askounis</surname><given-names>D</given-names> </name><name name-style="western"><surname>Psarras</surname><given-names>J</given-names> </name></person-group><article-title>Detecting dementia from speech and transcripts using transformers</article-title><source>Comput Speech Lang</source><year>2023</year><month>04</month><volume>79</volume><fpage>101485</fpage><pub-id pub-id-type="doi">10.1016/j.csl.2023.101485</pub-id></nlm-citation></ref><ref id="ref122"><label>122</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Poor</surname><given-names>FF</given-names> </name><name name-style="western"><surname>Dodge</surname><given-names>HH</given-names> </name><name name-style="western"><surname>Mahoor</surname><given-names>MH</given-names> </name></person-group><article-title>A multimodal cross-transformer-based model to predict mild cognitive impairment using speech, language and vision</article-title><source>Comput Biol Med</source><year>2024</year><month>11</month><volume>182</volume><fpage>109199</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.109199</pub-id><pub-id pub-id-type="medline">39332117</pub-id></nlm-citation></ref><ref id="ref123"><label>123</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>K</given-names> </name><name name-style="western"><surname>Washington</surname><given-names>PY</given-names> </name></person-group><article-title>Multimodal deep learning for dementia classification using text and audio</article-title><source>Sci Rep</source><year>2024</year><month>06</month><day>16</day><volume>14</volume><issue>1</issue><fpage>13887</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-64438-1</pub-id></nlm-citation></ref><ref id="ref124"><label>124</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ortiz-Perez</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ruiz-Ponce</surname><given-names>P</given-names> </name><name name-style="western"><surname>Tom&#x00E1;s</surname><given-names>D</given-names> </name><name name-style="western"><surname>Garcia-Rodriguez</surname><given-names>J</given-names> </name><name name-style="western"><surname>Vizcaya-Moreno</surname><given-names>MF</given-names> </name><name name-style="western"><surname>Leo</surname><given-names>M</given-names> </name></person-group><article-title>A deep learning-based multimodal architecture to predict signs of dementia</article-title><source>Neurocomputing</source><year>2023</year><month>09</month><volume>548</volume><fpage>126413</fpage><pub-id pub-id-type="doi">10.1016/j.neucom.2023.126413</pub-id></nlm-citation></ref><ref id="ref125"><label>125</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ilias</surname><given-names>L</given-names> </name><name name-style="western"><surname>Askounis</surname><given-names>D</given-names> </name></person-group><article-title>Explainable identification of dementia from transcripts using transformer networks</article-title><source>IEEE J Biomed Health Inform</source><year>2022</year><month>08</month><volume>26</volume><issue>8</issue><fpage>4153</fpage><lpage>4164</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2022.3172479</pub-id><pub-id pub-id-type="medline">35511841</pub-id></nlm-citation></ref><ref id="ref126"><label>126</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>N</given-names> </name><name name-style="western"><surname>Subbalakshmi</surname><given-names>K</given-names> </name><name name-style="western"><surname>Chandramouli</surname><given-names>R</given-names> </name></person-group><article-title>Revealing the roles of part-of-speech taggers in Alzheimer disease detection: scientific discovery using one-intervention causal explanation</article-title><source>JMIR Form Res</source><year>2023</year><month>05</month><day>2</day><volume>7</volume><fpage>e36590</fpage><pub-id pub-id-type="doi">10.2196/36590</pub-id><pub-id pub-id-type="medline">37129944</pub-id></nlm-citation></ref><ref id="ref127"><label>127</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name><name name-style="western"><surname>Pu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>WQ</given-names> </name></person-group><article-title>Cross-lingual Alzheimer&#x2019;s disease detection based on paralinguistic and pre-trained features</article-title><source>ICASSP 2023 - 2023 IEEE Int Conf Acoustics, Speech Signal Proc (ICASSP)</source><year>2023</year><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1109/ICASSP49357.2023.10095522</pub-id></nlm-citation></ref><ref id="ref128"><label>128</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zheng</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bouazizi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ohtsuki</surname><given-names>T</given-names> </name></person-group><article-title>An evaluation on information composition in dementia detection based on speech</article-title><source>IEEE Access</source><year>2022</year><volume>10</volume><fpage>92294</fpage><lpage>92306</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2022.3203068</pub-id></nlm-citation></ref><ref id="ref129"><label>129</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nambiar</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Likhita</surname><given-names>K</given-names> </name><name name-style="western"><surname>Pujya</surname><given-names>K</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>D</given-names> </name><name name-style="western"><surname>Vekkot</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lalitha</surname><given-names>S</given-names> </name></person-group><article-title>Comparative study of deep classifiers for early dementia detection using speech transcripts</article-title><source>2022 IEEE 19th India Counc Int Conf (INDICON)</source><year>2022</year><fpage>1</fpage><lpage>6</lpage><pub-id pub-id-type="doi">10.1109/INDICON56171.2022.10039705</pub-id></nlm-citation></ref><ref id="ref130"><label>130</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Priyadarshinee</surname><given-names>P</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Melechovsky</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CMY</given-names> </name><name name-style="western"><surname>B. T.</surname><given-names>B</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>JM</given-names> </name></person-group><article-title>Alzheimer&#x2019;s dementia speech (audio vs. text): multi-modal machine learning at high vs. low resolution</article-title><source>Appl Sci (Basel)</source><year>2023</year><volume>13</volume><issue>7</issue><fpage>4244</fpage><pub-id pub-id-type="doi">10.3390/app13074244</pub-id></nlm-citation></ref><ref id="ref131"><label>131</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Li</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Efficient pause extraction and encode strategy for Alzheimer&#x2019;s disease detection using only acoustic features from spontaneous speech</article-title><source>Brain Sci</source><year>2023</year><month>03</month><day>11</day><volume>13</volume><issue>3</issue><fpage>477</fpage><pub-id pub-id-type="doi">10.3390/brainsci13030477</pub-id><pub-id pub-id-type="medline">36979287</pub-id></nlm-citation></ref><ref id="ref132"><label>132</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mahajan</surname><given-names>P</given-names> </name><name name-style="western"><surname>Baths</surname><given-names>V</given-names> </name></person-group><article-title>Acoustic and language based deep learning approaches for Alzheimer&#x2019;s dementia detection from spontaneous speech</article-title><source>Front Aging Neurosci</source><year>2021</year><volume>13</volume><fpage>623607</fpage><pub-id pub-id-type="doi">10.3389/fnagi.2021.623607</pub-id><pub-id pub-id-type="medline">33613269</pub-id></nlm-citation></ref><ref id="ref133"><label>133</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mei</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>X</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>The USTC system for ADReSS-M challenge</article-title><source>ICASSP 2023 - 2023 IEEE Int Conf Acoustics, Speech Signal Proc (ICASSP)</source><year>2023</year><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1109/ICASSP49357.2023.10094714</pub-id></nlm-citation></ref><ref id="ref134"><label>134</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ali Meerza</surname><given-names>SI</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name></person-group><article-title>Fair and privacy-preserving Alzheimer&#x2019;s disease diagnosis based on spontaneous speech analysis via federated learning</article-title><source>2022 44th Ann Int Conf IEEE Eng Med Biol Soc (EMBC)</source><year>2022</year><fpage>1362</fpage><lpage>1365</lpage><pub-id pub-id-type="doi">10.1109/EMBC48229.2022.9871204</pub-id></nlm-citation></ref><ref id="ref135"><label>135</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Xing</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Pang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Du</surname><given-names>L</given-names> </name></person-group><article-title>SpeechFormer++: a hierarchical efficient framework for paralinguistic speech processing</article-title><source>IEEE/ACM Trans Audio Speech Lang Process</source><year>2023</year><volume>31</volume><fpage>775</fpage><lpage>788</lpage><pub-id pub-id-type="doi">10.1109/TASLP.2023.3235194</pub-id></nlm-citation></ref><ref id="ref136"><label>136</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tamm</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vandenberghe</surname><given-names>R</given-names> </name><name name-style="western"><surname>Van Hamme</surname><given-names>H</given-names> </name></person-group><article-title>Cross-lingual transfer learning for alzheimer&#x2019;s detection from spontaneous speech</article-title><source>ICASSP 2023 - 2023 IEEE Int Conf Acoust, Speech Signal Process (ICASSP)</source><year>2023</year><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1109/ICASSP49357.2023.10096770</pub-id></nlm-citation></ref><ref id="ref137"><label>137</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Woszczyk</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hedlikova</surname><given-names>A</given-names> </name><name name-style="western"><surname>Akman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Demetriou</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schuller</surname><given-names>B</given-names> </name></person-group><article-title>Data augmentation for dementia detection in spoken language</article-title><source>Proc Interspeech 2022</source><year>2022</year><fpage>2858</fpage><lpage>2862</lpage><pub-id pub-id-type="doi">10.21437/Interspeech.2022-10210</pub-id></nlm-citation></ref><ref id="ref138"><label>138</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jin</surname><given-names>L</given-names> </name><name name-style="western"><surname>Oh</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>H</given-names> </name><etal/></person-group><article-title>CONSEN: complementary and simultaneous ensemble for Alzheimer&#x2019;s disease detection and MMSE score prediction</article-title><source>ICASSP 2023 - 2023 IEEE Int Conf Acoustics, Speech Signal Proc (ICASSP)</source><year>2023</year><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1109/ICASSP49357.2023.10096253</pub-id></nlm-citation></ref><ref id="ref139"><label>139</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ilias</surname><given-names>L</given-names> </name><name name-style="western"><surname>Askounis</surname><given-names>D</given-names> </name></person-group><article-title>Context-aware attention layers coupled with optimal transport domain adaptation and multimodal fusion methods for recognizing dementia from spontaneous speech</article-title><source>Knowl Based Syst</source><year>2023</year><month>10</month><volume>277</volume><fpage>110834</fpage><pub-id pub-id-type="doi">10.1016/j.knosys.2023.110834</pub-id></nlm-citation></ref><ref id="ref140"><label>140</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Azevedo</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bethlehem</surname><given-names>RAI</given-names> </name><name name-style="western"><surname>Whiteside</surname><given-names>DJ</given-names> </name><etal/></person-group><article-title>Identifying healthy individuals with Alzheimer&#x2019;s disease neuroimaging phenotypes in the UK Biobank</article-title><source>Commun Med</source><year>2023</year><month>07</month><day>20</day><volume>3</volume><issue>1</issue><fpage>100</fpage><pub-id pub-id-type="doi">10.1038/s43856-023-00313-w</pub-id></nlm-citation></ref><ref id="ref141"><label>141</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Du</surname><given-names>W</given-names> </name></person-group><article-title>Identification of mild cognitive impairment using multimodal 3D imaging data and graph convolutional networks</article-title><source>Phys Med Biol</source><year>2024</year><month>12</month><day>7</day><volume>69</volume><issue>23</issue><fpage>235002</fpage><pub-id pub-id-type="doi">10.1088/1361-6560/ad8c94</pub-id></nlm-citation></ref><ref id="ref142"><label>142</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jahan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Abu Taher</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kaiser</surname><given-names>MS</given-names> </name><etal/></person-group><article-title>Explainable AI-based Alzheimer&#x2019;s prediction and management using multimodal data</article-title><source>PLOS ONE</source><year>2023</year><volume>18</volume><issue>11</issue><fpage>e0294253</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0294253</pub-id><pub-id pub-id-type="medline">37972072</pub-id></nlm-citation></ref><ref id="ref143"><label>143</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jahan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Saif Adib</surname><given-names>MdR</given-names> </name><name name-style="western"><surname>Huda</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>Federated explainable AI-based Alzheimer&#x2019;s disease prediction with multimodal data</article-title><source>IEEE Access</source><year>2025</year><volume>13</volume><fpage>43435</fpage><lpage>43454</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2025.3547343</pub-id></nlm-citation></ref><ref id="ref144"><label>144</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Myrzashova</surname><given-names>R</given-names> </name><name name-style="western"><surname>Alsamhi</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Shvetsov</surname><given-names>AV</given-names> </name><name name-style="western"><surname>Hawbani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Guizani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>X</given-names> </name></person-group><article-title>BCFTL: blockchain-enabled multimodal federated transfer learning for decentralized Alzheimer&#x2019;s diagnosis</article-title><source>IEEE Internet Things J</source><year>2025</year><volume>12</volume><issue>15</issue><fpage>29656</fpage><lpage>29669</lpage><pub-id pub-id-type="doi">10.1109/JIOT.2025.3569652</pub-id></nlm-citation></ref><ref id="ref145"><label>145</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>K</given-names> </name><name name-style="western"><surname>Weng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>A multi&#x2010;view learning approach with diffusion model to synthesize FDG PET from MRI T1WI for diagnosis of Alzheimer&#x2019;s disease</article-title><source>Alzheimers Dement</source><year>2025</year><month>02</month><volume>21</volume><issue>2</issue><fpage>e14421</fpage><pub-id pub-id-type="doi">10.1002/alz.14421</pub-id></nlm-citation></ref><ref id="ref146"><label>146</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>W</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>W</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Bidirectional mapping of brain MRI and PET with 3D Reversible GAN for the diagnosis of Alzheimer&#x2019;s disease</article-title><source>Front Neurosci</source><year>2021</year><volume>15</volume><fpage>646013</fpage><pub-id pub-id-type="doi">10.3389/fnins.2021.646013</pub-id><pub-id pub-id-type="medline">33935634</pub-id></nlm-citation></ref><ref id="ref147"><label>147</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gupta</surname><given-names>B</given-names> </name><name name-style="western"><surname>Jegannathan</surname><given-names>GK</given-names> </name><name name-style="western"><surname>Alam</surname><given-names>MS</given-names> </name><etal/></person-group><article-title>Multimodal lightweight neural network for Alzheimer&#x2019;s disease diagnosis integrating neuroimaging and cognitive scores</article-title><source>Neurosci Inf</source><year>2025</year><month>09</month><volume>5</volume><issue>3</issue><fpage>100218</fpage><pub-id pub-id-type="doi">10.1016/j.neuri.2025.100218</pub-id></nlm-citation></ref><ref id="ref148"><label>148</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A new classification network for diagnosing Alzheimer&#x2019;s disease in class-imbalance MRI datasets</article-title><source>Front Neurosci</source><year>2022</year><month>08</month><day>25</day><volume>16</volume><fpage>807085</fpage><pub-id pub-id-type="doi">10.3389/fnins.2022.807085</pub-id></nlm-citation></ref><ref id="ref149"><label>149</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sarma</surname><given-names>M</given-names> </name><name name-style="western"><surname>Chatterjee</surname><given-names>D</given-names> </name></person-group><article-title>Multistage diagnosis of Alzheimer&#x2019;s disease from clinical data using &#x2018;deep ensemble learning&#x2019;</article-title><source>JAIAI</source><year>2024</year><volume>01</volume><issue>1</issue><fpage>122</fpage><lpage>138</lpage><pub-id pub-id-type="doi">10.54364/JAIAI.2024.1109</pub-id></nlm-citation></ref><ref id="ref150"><label>150</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mujahid</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rehman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Alam</surname><given-names>T</given-names> </name><name name-style="western"><surname>Alamri</surname><given-names>FS</given-names> </name><name name-style="western"><surname>Fati</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Saba</surname><given-names>T</given-names> </name></person-group><article-title>An efficient ensemble approach for Alzheimer&#x2019;s disease detection using an adaptive synthetic technique and deep learning</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>07</month><day>26</day><volume>13</volume><issue>15</issue><fpage>2489</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13152489</pub-id><pub-id pub-id-type="medline">37568852</pub-id></nlm-citation></ref><ref id="ref151"><label>151</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dubey</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Bhongade</surname><given-names>A</given-names> </name><name name-style="western"><surname>Palsodkar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Fulzele</surname><given-names>P</given-names> </name></person-group><article-title>Efficient explainable models for Alzheimer&#x2019;s disease classification with feature selection and data balancing approach using ensemble learning</article-title><source>Diagnostics (Basel)</source><year>2024</year><month>12</month><day>10</day><volume>14</volume><issue>24</issue><fpage>2770</fpage><pub-id pub-id-type="doi">10.3390/diagnostics14242770</pub-id><pub-id pub-id-type="medline">39767131</pub-id></nlm-citation></ref><ref id="ref152"><label>152</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mandawkar</surname><given-names>U</given-names> </name><name name-style="western"><surname>Diwan</surname><given-names>T</given-names> </name></person-group><article-title>Hybrid cuttle Fish-Grey wolf optimization tuned weighted ensemble classifier for Alzheimer&#x2019;s disease classification</article-title><source>Biomed Signal Process Control</source><year>2024</year><month>06</month><volume>92</volume><fpage>106101</fpage><pub-id pub-id-type="doi">10.1016/j.bspc.2024.106101</pub-id></nlm-citation></ref><ref id="ref153"><label>153</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jasodanand</surname><given-names>VH</given-names> </name><name name-style="western"><surname>Kowshik</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Puducheri</surname><given-names>S</given-names> </name><etal/></person-group><article-title>AI-driven fusion of multimodal data for Alzheimer&#x2019;s disease biomarker assessment</article-title><source>Nat Commun</source><year>2025</year><month>08</month><day>11</day><volume>16</volume><issue>1</issue><fpage>7407</fpage><pub-id pub-id-type="doi">10.1038/s41467-025-62590-4</pub-id><pub-id pub-id-type="medline">40789853</pub-id></nlm-citation></ref><ref id="ref154"><label>154</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weiner</surname><given-names>MW</given-names> </name><name name-style="western"><surname>Kanoria</surname><given-names>S</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>MJ</given-names> </name><etal/></person-group><article-title>Overview of Alzheimer&#x2019;s disease neuroimaging Initiative and future clinical trials</article-title><source>Alzheimer's Dement</source><year>2025</year><month>01</month><volume>21</volume><issue>1</issue><fpage>e14321</fpage><pub-id pub-id-type="doi">10.1002/alz.14321</pub-id><pub-id pub-id-type="medline">39711072</pub-id></nlm-citation></ref><ref id="ref155"><label>155</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilkinson</surname><given-names>T</given-names> </name><name name-style="western"><surname>Schnier</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bush</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Identifying dementia outcomes in UK Biobank: a validation study of primary care, hospital admissions and mortality data</article-title><source>Eur J Epidemiol</source><year>2019</year><month>06</month><volume>34</volume><issue>6</issue><fpage>557</fpage><lpage>565</lpage><pub-id pub-id-type="doi">10.1007/s10654-019-00499-1</pub-id></nlm-citation></ref><ref id="ref156"><label>156</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thulasimani</surname><given-names>V</given-names> </name><name name-style="western"><surname>Shanmugavadivel</surname><given-names>K</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>J</given-names> </name><name name-style="western"><surname>Easwaramoorthy</surname><given-names>SV</given-names> </name></person-group><article-title>A review of datasets, optimization strategies, and learning algorithms for analyzing Alzheimer&#x2019;s dementia detection</article-title><source>Neuropsychiatr Dis Treat</source><year>2024</year><volume>20</volume><fpage>2203</fpage><lpage>2225</lpage><pub-id pub-id-type="doi">10.2147/NDT.S496307</pub-id><pub-id pub-id-type="medline">39588176</pub-id></nlm-citation></ref><ref id="ref157"><label>157</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chan</surname><given-names>KCG</given-names> </name><name name-style="western"><surname>Xia</surname><given-names>F</given-names> </name><name name-style="western"><surname>Kukull</surname><given-names>WA</given-names> </name></person-group><article-title>NACC data: who is represented over time and across centers, and implications for generalizability</article-title><source>Alzheimer's Dement</source><year>2025</year><month>09</month><volume>21</volume><issue>9</issue><fpage>e70657</fpage><pub-id pub-id-type="doi">10.1002/alz.70657</pub-id><pub-id pub-id-type="medline">40968249</pub-id></nlm-citation></ref><ref id="ref158"><label>158</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fowler</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rainey-Smith</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Bird</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Fifteen years of the Australian Imaging, Biomarkers and Lifestyle (AIBL) study: progress and observations from 2,359 older adults spanning the spectrum from cognitive normality to Alzheimer&#x2019;s disease</article-title><source>J Alzheimer&#x2019;s Dis Rep</source><year>2021</year><month>03</month><day>11</day><volume>5</volume><issue>1</issue><fpage>443</fpage><lpage>468</lpage><pub-id pub-id-type="doi">10.3233/ADR-210005</pub-id></nlm-citation></ref><ref id="ref159"><label>159</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ling</surname><given-names>Z</given-names> </name></person-group><article-title>Deep learning-based speech analysis for Alzheimer&#x2019;s disease detection: a literature review</article-title><source>Alz Res Therapy</source><year>2022</year><month>12</month><day>14</day><volume>14</volume><issue>1</issue><fpage>186</fpage><pub-id pub-id-type="doi">10.1186/s13195-022-01131-3</pub-id></nlm-citation></ref><ref id="ref160"><label>160</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>NeuroSymAD: a neuro-symbolic framework for interpretable Alzheimer&#x2019;s disease diagnosis</article-title><comment>Preprint posted online on  Mar 1, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2503.00510</pub-id></nlm-citation></ref><ref id="ref161"><label>161</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Sadeghi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hajati</surname><given-names>F</given-names> </name><name name-style="western"><surname>Argha</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lovell</surname><given-names>NH</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>M</given-names> </name></person-group><article-title>Interpretable graph-based models on multimodal biomedical data integration: a technical review and benchmarking</article-title><source>arXiv</source><comment>Preprint posted online on  May 3, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2505.01696</pub-id></nlm-citation></ref><ref id="ref162"><label>162</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mahamud</surname><given-names>E</given-names> </name><name name-style="western"><surname>Assaduzzaman</surname><given-names>M</given-names> </name><name name-style="western"><surname>Islam</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fahad</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hossen</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Ramanathan</surname><given-names>TT</given-names> </name></person-group><article-title>Enhancing Alzheimer&#x2019;s disease detection: an explainable machine learning approach with ensemble techniques</article-title><source>Intell-Based Med</source><year>2025</year><volume>11</volume><fpage>100240</fpage><pub-id pub-id-type="doi">10.1016/j.ibmed.2025.100240</pub-id></nlm-citation></ref><ref id="ref163"><label>163</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>T</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Thung</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Latent representation learning for Alzheimer&#x2019;s disease diagnosis with incomplete multi-modality neuroimaging and genetic data</article-title><source>IEEE Trans Med Imaging</source><year>2019</year><month>10</month><volume>38</volume><issue>10</issue><fpage>2411</fpage><lpage>2422</lpage><pub-id pub-id-type="doi">10.1109/TMI.2019.2913158</pub-id></nlm-citation></ref><ref id="ref164"><label>164</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sibille</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fahmi</surname><given-names>R</given-names> </name></person-group><article-title>Multi&#x2010;branch convolutional neural network for Alzheimer&#x2019;s disease versus normal control classification using PET images</article-title><source>Alzheimer&#x2019;s Dementia</source><year>2023</year><month>06</month><volume>19</volume><issue>S3</issue><fpage>e061092</fpage><pub-id pub-id-type="doi">10.1002/alz.061092</pub-id></nlm-citation></ref><ref id="ref165"><label>165</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><etal/></person-group><article-title>BrainNet-moe: brain-inspired mixture-of-experts learning for neurological disease identification</article-title><comment>Preprint posted online on  Mar 5, 2025</comment><pub-id pub-id-type="doi">10.48550/ARXIV.2503.07640</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Full database search strategies for PubMed, Scopus, IEEE Xplore, and ACM Digital Library, including complete Boolean queries, search fields, filters, and publication date limits used for study identification.</p><media xlink:href="jmir_v28i1e85414_app1.docx" xlink:title="DOCX File, 999 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Performance evaluation for AD diagnosis. AD: Alzheimer disease.</p><media xlink:href="jmir_v28i1e85414_app2.docx" xlink:title="DOCX File, 22 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Complete QUADAS-2 risk-of-bias assessments for all included studies, summarizing judgments across patient selection, index test, reference standard, and flow or timing, with detailed study-level ratings. QUADAS-2: Revised Quality Assessment of Diagnostic Accuracy Studies Tool.</p><media xlink:href="jmir_v28i1e85414_app3.docx" xlink:title="DOCX File, 38 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Overview of traditional machine-learning models applied in Alzheimer disease research, including SVM, decision trees, HMMs, KNN, logistic regression, GMMs, and foundational CNN or RL descriptions, with methodological principles and limitations. CNN: convolutional neural network; GMM: gaussian mixture models; HMM: hidden markov model ; KNN: k-nearest neighbors; RL: reinforcement learning; SVM: support vector machine.</p><media xlink:href="jmir_v28i1e85414_app4.docx" xlink:title="DOCX File, 42 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Cochrane Handbook 5.3.3&#x2013;aligned data-extraction tables summarizing study design, datasets, participants, modalities, preprocessing, model architectures, validation schemes, outcomes, and limitations for all included studies.</p><media xlink:href="jmir_v28i1e85414_app5.docx" xlink:title="DOCX File, 185 KB"/></supplementary-material><supplementary-material id="app6"><label>Checklist 1</label><p>Completed PRISMA 2020, PRISMA-S checklist, and PRISMA expanded checklist specifying reporting locations for all required items, including eligibility criteria, search methods, extraction procedures, bias assessments, and synthesis reporting. PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses; PRISMA-S: Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for literature searches.</p><media xlink:href="jmir_v28i1e85414_app6.pdf" xlink:title="PDF File, 11026 KB"/></supplementary-material></app-group></back></article>