<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e74423</article-id><article-id pub-id-type="doi">10.2196/74423</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>ChatGPT-Assisted Deep Learning Models for Influenza-Like Illness Prediction in Mainland China: Time Series Analysis</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Huang</surname><given-names>Weihong</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wei</surname><given-names>Wudi</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>He</surname><given-names>Xiaotao</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhan</surname><given-names>Baili</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Xie</surname><given-names>Xiaoting</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Meng</given-names></name><degrees>BEng</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Lai</surname><given-names>Shiyi</given-names></name><degrees>MBBS</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Yuan</surname><given-names>Zongxiang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Lai</surname><given-names>Jingzhen</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Chen</surname><given-names>Rongfeng</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Jiang</surname><given-names>Junjun</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ye</surname><given-names>Li</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Liang</surname><given-names>Hao</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Guangxi Key Laboratory of AIDS Prevention and Treatment &#x0026; School of Public Health, Guangxi Medical University</institution><addr-line>Nanning</addr-line><addr-line>Guangxi</addr-line><country>China</country></aff><aff id="aff2"><institution>Joint Laboratory for Emerging Infectious Diseases in China (Guangxi)-Association of Southeast Asian Nations, Life Sciences Institute, Guangxi Medical University</institution><addr-line>22 Shuangyong Road, Qingxiu District</addr-line><addr-line>Nanning</addr-line><addr-line>Guangxi</addr-line><country>China</country></aff><aff id="aff3"><institution>Life Sciences Institute, Guangxi Key Laboratory of AIDS Prevention and Treatment &#x0026; Joint Laboratory for Emerging Infectious Diseases in China (Guangxi)-Association of Southeast Asian Nations, Guangxi Medical University</institution><addr-line>Nanning</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Tutsoy</surname><given-names>Onder</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Guo</surname><given-names>Qingchun</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Hao Liang, PhD, Joint Laboratory for Emerging Infectious Diseases in China (Guangxi)-Association of Southeast Asian Nations, Life Sciences Institute, Guangxi Medical University, 22 Shuangyong Road, Qingxiu District, Nanning, Guangxi, 530021, China, 86 0771-5334215; <email>lianghao@gxmu.edu.cn</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>27</day><month>6</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e74423</elocation-id><history><date date-type="received"><day>24</day><month>03</month><year>2025</year></date><date date-type="rev-recd"><day>29</day><month>04</month><year>2025</year></date><date date-type="accepted"><day>08</day><month>05</month><year>2025</year></date></history><copyright-statement>&#x00A9; Weihong Huang, Wudi Wei, Xiaotao He, Baili Zhan, Xiaoting Xie, Meng Zhang, Shiyi Lai, Zongxiang Yuan, Jingzhen Lai, Rongfeng Chen, Junjun Jiang, Li Ye, Hao Liang. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 27.6.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e74423"/><abstract><sec><title>Background</title><p>Influenza in mainland China results in a large number of outpatient and emergency visits related to influenza-like illness (ILI) annually. While deep learning models show promise for improving influenza forecasting, their technical complexity remains a barrier to practical implementation. Large language models, such as ChatGPT, offer the potential to reduce these barriers by supporting automated code generation, debugging, and model optimization.</p></sec><sec><title>Objective</title><p>This study aimed to evaluate the predictive performance of several deep learning models for ILI positive rates in mainland China and to explore the auxiliary role of ChatGPT-assisted development in facilitating model implementation.</p></sec><sec sec-type="methods"><title>Methods</title><p>ILI positivity rate data spanning from 2014 to 2024 were obtained from the Chinese National Influenza Center (CNIC) database. In total, 5 deep learning architectures&#x2014;long short-term memory (LSTM), neural basis expansion analysis for time series (N-BEATS), transformer, temporal fusion transformer (TFT), and time-series dense encoder (TiDE)&#x2014;were developed using a ChatGPT-assisted workflow covering code generation, error debugging, and performance optimization. Models were trained on data from 2014 to 2023 and tested on holdout data from 2024 (weeks 1&#x2010;39). Performance was evaluated using mean squared error (MSE), mean absolute error (MAE), and mean absolute percentage error (MAPE).</p></sec><sec sec-type="results"><title>Results</title><p>ILI trends exhibited clear seasonal patterns with winter peaks and summer troughs, alongside marked fluctuations during the COVID-19 pandemic period (2020&#x2010;2022). All 5 deep learning models were successfully constructed, debugged, and optimized with the assistance of ChatGPT. Among the 5 models, TiDE achieved the best predictive performance nationally (MAE=5.551, MSE=43.976, MAPE=72.413%) and in the southern region (MAE=7.554, MSE=89.708, MAPE=74.475%). In the northern region, where forecasting proved more challenging, TiDE still performed best (MAE=4.131, MSE=28.922), although high percentage errors remained (MAPE&#x003E;400%). N-BEATS demonstrated the second-best performance nationally (MAE=9.423) and showed greater stability in the north (MAE=6.325). In contrast, transformer and TFT consistently underperformed, with national MAE values of 10.613 and 12.538, respectively. TFT exhibited the highest deviation (national MAPE=169.29%). Extreme regional disparities were observed, particularly in northern China, where LSTM and TFT generated MAPE values exceeding 1918%, despite LSTM&#x2019;s moderate performance in the south (MAE=9.460).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Deep learning models, particularly TiDE, demonstrate strong potential for accurate ILI forecasting across diverse regions of China. Furthermore, large language models like ChatGPT can substantially enhance modeling efficiency and accessibility by assisting nontechnical users in model development. These findings support the integration of AI-assisted workflows into epidemic prediction systems as a scalable approach for improving public health preparedness.</p></sec></abstract><kwd-group><kwd>time series analysis</kwd><kwd>epidemic forecasting</kwd><kwd>public health preparedness</kwd><kwd>model optimization</kwd><kwd>seasonal pattern</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>In mainland China, influenza results in an annual average of 3 million excess outpatient and emergency visits attributable to influenza-like illness (ILI) [<xref ref-type="bibr" rid="ref1">1</xref>]. Timely epidemic prediction plays a critical role in effective public health interventions. However, traditional surveillance systems often experience reporting delays, creating significant gaps in real-time outbreak response&#x2014;an issue particularly challenging for frontline health workers tasked with rapid containment [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. In recent years, deep learning models have shown considerable potential in time series analysis for disease trend forecasting. Architectures such as long short-term memory (LSTM) [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref9">9</xref>], neural basis expansion analysis for time series (N-BEATS) [<xref ref-type="bibr" rid="ref10">10</xref>], temporal fusion transformer (TFT) [<xref ref-type="bibr" rid="ref11">11</xref>], time-series dense encoder (TiDE) [<xref ref-type="bibr" rid="ref12">12</xref>], and transformer [<xref ref-type="bibr" rid="ref13">13</xref>] have demonstrated strong predictive performance across various epidemiological applications. Despite these advancements, the application of such models specifically to ILI forecasting remains relatively limited [<xref ref-type="bibr" rid="ref14">14</xref>]. Current research in this area, including the studies of Darwish A et al [<xref ref-type="bibr" rid="ref15">15</xref>], Yang et al [<xref ref-type="bibr" rid="ref16">16</xref>], and Amendolara et al [<xref ref-type="bibr" rid="ref17">17</xref>], has predominantly used LSTM-based approaches, while other advanced architectures have yet to be systematically explored for ILI prediction. Moreover, the practical implementation of deep learning methodologies presents a significant barrier. Building and optimizing such models requires specialized programming skills, which can limit their adoption by public health professionals, particularly those without technical backgrounds [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>Addressing these practical barriers might be possible through the use of large language models (LLMs) such as ChatGPT [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>], which have proven to be powerful tools in scientific research, natural language processing, and epidemic modeling. Built on advanced machine learning architectures, LLMs can support critical development tasks, including code generation, data preprocessing, error debugging, and model optimization [<xref ref-type="bibr" rid="ref22">22</xref>]. In epidemiological modeling, this feature shows significant potential. Many frontline health practitioners and grassroots workers lack formal programming training, which limits their ability to independently apply deep learning models. LLMs provide a means to make advanced modeling techniques more accessible by aiding in technical implementation via natural language interactions. Although LLMs have been increasingly applied in fields such as healthcare [<xref ref-type="bibr" rid="ref23">23</xref>] and medical diagnostics [<xref ref-type="bibr" rid="ref24">24</xref>], their integration into influenza epidemic forecasting and early warning systems remains rare. Existing studies have largely focused on traditional model design and implementation, with limited attention to how LLMs can actively assist in the modeling workflow for infectious disease prediction.</p><p>In this study, we aimed to predict the ILI positivity rate of mainland China by implementing 5 deep learning architectures (LSTM, transformer, N-BEATS, TFT, and TiDE), thereby eliminating these gaps. Additionally, we investigate the auxiliary role of ChatGPT in supporting the model development process, particularly in tasks such as code generation, debugging, and performance optimization. In this way, we attempt to demonstrate that frontline health workers can bypass traditional programming barriers and run advanced predictive models through natural language interaction with LLM, thereby narrowing the gap between epidemiological research and primary public health practice.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Ethical Considerations</title><p>The data used in this study were sourced from a publicly accessible secondary database. Therefore, formal ethical approval was not required.</p></sec><sec id="s2-2"><title>Data Source</title><p>The data on the positive rate of ILI cases from the southern and northern regions of mainland China, as well as nationwide, spanning from the 1st week of 2014 to the 39th week of 2024, were obtained from the public database of the Chinese National Influenza Center (CNIC) [<xref ref-type="bibr" rid="ref25">25</xref>]<ext-link ext-link-type="uri" xlink:href="https://ivdc.chinacdc.cn/cnic/">.</ext-link></p></sec><sec id="s2-3"><title>Data Preprocessing and Model Input</title><p>The dataset was divided into a training set and a validation set. Specifically, the training set comprised data from the 1st week of 2014 to the 52nd week of 2023, which was used to develop and train the models. The validation set covered data from the 1st week to the 39th week of 2024 and was used to evaluate the models&#x2019; predictive performance on unseen, future data. This chronological partitioning permitted evaluation of the models&#x2019; capacity to extrapolate from historical patterns to accurately forecast emerging ILI trends in real-world settings.</p></sec><sec id="s2-4"><title>Construction of LSTM Models</title><p>LSTM networks, a specific form of recurrent neural network, were used to capture temporal dependencies in the time series of ILI positive rates. By using memory cells with gating mechanisms, the LSTM design regulates information flow, which helps the model capture short-term fluctuations and long-term seasonal patterns crucial for predicting ILI. Through the use of input, forget, and output gates, LSTM dynamically processes raw high-dimensional ILI time series, integrating essential data into a fixed-size hidden state vector. The stacked layers use trainable nonlinear transformations to capture seasonal patterns and sudden changes, effectively reducing dimensions and extracting features within the network. The core computational mechanism of our LSTM implementation can be expressed as:</p><p>Forget gate:</p><disp-formula id="equWL1"><mml:math id="eqn1"><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula><p>Input gate:</p><disp-formula id="equWL2"><mml:math id="eqn2"><mml:msub><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula><p>Candidate cell state:</p><disp-formula id="equWL3"><mml:math id="eqn3"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mo>~</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mfenced separators="|"><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfenced></mml:math></disp-formula><p>Cell state upstate:</p><disp-formula id="equWL4"><mml:math id="eqn4"><mml:msub><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2299;</mml:mo><mml:msub><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2299;</mml:mo><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mo>~</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula><p>Output gate:</p><disp-formula id="equWL5"><mml:math id="eqn5"><mml:msub><mml:mrow><mml:mi>o</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula><p>Hidden state:</p><disp-formula id="equWL6"><mml:math id="eqn6"><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>o</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2299;</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula><p>In these equations, &#x03C3; denotes the sigmoid activation function, tan h represents the hyperbolic tangent function, and indicates element-wise multiplication. The formulation helps the model to retain significant historical information and discard irrelevant noise, making it particularly effective for capturing the complex temporal patterns in ILIL epidemiological data. The LSTM networks were confirmed with bidirectional layers to process sequence information in both forward and backward directions, enhancing the model&#x2019;s ability to detect contextual patterns in the time series.</p></sec><sec id="s2-5"><title>Construction of N-BEATS Models</title><p>To forecast ILI positive rates, the N-BEATS model was applied, taking advantage of its deep learning architecture to understand complex temporal trends in epidemiological information. The N-BEATS model uses a stack of fully connected neural network blocks, each made up of multiple layers with 2 residual connections: one dedicated to backcasting to reconstruct the input, and another for forecasting to predict future values. This design, featuring 2 layers of residuals, enables the model to incrementally refine its predictions by targeting the residuals not captured by earlier blocks, thereby improving forecast accuracy. The N-BEATS model uses each block to convert the input time series into basis expansion coefficients, which are subsequently combined with predefined basis functions to create the backcast and forecast outputs. Mathematically, the forecast <inline-formula><mml:math id="ieqn1"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is obtained by summing the outputs of each stack:</p><disp-formula id="equWL7"><mml:math id="eqn7"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>S</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where <inline-formula><mml:math id="ieqn2"><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the forward projection function of the i-th stack,</p><p>and <inline-formula><mml:math id="ieqn3"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the historical input sequence of length L.</p></sec><sec id="s2-6"><title>Construction of TFT Models</title><p>The TFT model was used to predict ILI positive rates, using its hybrid structure that merges recurrent neural networks with attention mechanisms to grasp both short-term and long-term patterns. TFT consists of several crucial parts: variable selection networks that dynamically select important input features at each time interval; static covariate encoders that manage time-invariant information like regional traits; and multi-head attention mechanisms that represent complex temporal interactions across multiple time scales. At the core of TFT&#x2019;s design is the understandable multi-head attention mechanism, which calculates attention weights through the scaled dot-product formula:</p><disp-formula id="E14"><mml:math id="eqn8"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>A</mml:mi><mml:mi>t</mml:mi><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>Q</mml:mi><mml:mo>,</mml:mo><mml:mi>K</mml:mi><mml:mo>,</mml:mo><mml:mi>V</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>s</mml:mi><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mi>t</mml:mi><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mfrac><mml:mrow><mml:mi>Q</mml:mi><mml:msup><mml:mi>K</mml:mi><mml:mi>T</mml:mi></mml:msup></mml:mrow><mml:msqrt><mml:msub><mml:mi>d</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:msqrt></mml:mfrac><mml:mo stretchy="false">)</mml:mo><mml:mi>V</mml:mi></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where Q, K, and V represent query, key, and value matrices derived from the input sequence, and <inline-formula><mml:math id="ieqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mstyle></mml:math></inline-formula></p><p>is the dimension of the key vectors. By using this mechanism, the model can prioritize historical data based on its importance for future predictions, enhancing both accuracy and comprehensibility. Furthermore, TFT applies gating mechanisms and residual connections to support efficient information flow and alleviate issues such as vanishing gradients. By combining static metadata with temporal characteristics, TFT can address regional disparities in ILI transmission, making it highly effective for capturing the non-stationary and diverse nature of epidemiological time series data.</p></sec><sec id="s2-7"><title>Construction of TiDE Models</title><p>The TiDE model uses an encoder-decoder architecture that relies exclusively on multilayer perceptrons to effectively capture long-range dependencies and model non-stationary time series. In contrast to standard sequence models that use recurrence or convolution, TiDE simplifies forecasting by applying feedforward transformations to each time step separately while preserving temporal structure with positional and contextual encodings. With reduced computational complexity and the ability to support highly parallelized training, this design is apt for large-scale epidemiological forecasting tasks like ILI prediction. The core projection mechanism in TiDE can be expressed as:</p><disp-formula id="E13"><mml:math id="eqn9"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mi>z</mml:mi></mml:msub><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mi>z</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where <italic>Zt</italic> represents the encoded state at time t, <italic>&#x03D5;</italic> is a nonlinear activation function, <italic>Wz</italic> is the projection weight matrix, and <italic>Xt</italic> is the input vector of ILI features. The formulation supports the sharing of parameters over time steps, enabling the model to capture temporal dynamics through depth rather than through recurrence. Several stacked residual multilayer perceptron blocks form the encoder, enabling the transmission of information and the step-by-step abstraction of time-dependent patterns in the input sequence. TiDE also includes context gating, normalization, and skip connections to stabilize optimization and help model long-range patterns while maintaining local variability. Similarly, its decoder uses feedforward layers to transform the learned representations into forecasts over multiple steps.</p></sec><sec id="s2-8"><title>Construction of the Transformer Models</title><p>A self-attention-based encoder-decoder architecture was used in the transformer model to optimize sequential ILI data handling. Transformers, unlike recurrent networks, simultaneously process entire sequences with the help of positional encodings and multi-head attention. The model can grasp complex dependencies between observations at different times due to this design, no matter the temporal gap. At the core of the transformer&#x2019;s function is the scaled dot-product attention mechanism, which is described as:</p><disp-formula id="E15"><mml:math id="eqn10"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>A</mml:mi><mml:mi>t</mml:mi><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>Q</mml:mi><mml:mo>,</mml:mo><mml:mi>K</mml:mi><mml:mo>,</mml:mo><mml:mi>V</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>s</mml:mi><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mi>t</mml:mi><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mfrac><mml:mrow><mml:mi>Q</mml:mi><mml:msup><mml:mi>K</mml:mi><mml:mi>T</mml:mi></mml:msup></mml:mrow><mml:msqrt><mml:msub><mml:mi>d</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:msqrt></mml:mfrac><mml:mo stretchy="false">)</mml:mo><mml:mi>V</mml:mi></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where Q, K, and V represent query, key, and value matrices derived from the input sequence, and is the dimension of the key vectors. The softmax function balances attention weights throughout the sequence, facilitating the model&#x2019;s ability to focus on crucial parts of the input during the generation of each output. The transformer&#x2019;s self-attention mechanism allows the model to identify complex relationships between observations over different time intervals for ILI forecasting. This feature enables the model to concentrate on pertinent historical data points dynamically, no matter where they appear in the sequence. Furthermore, the transformer&#x2019;s design allows for parallel processing of whole sequences, enabling efficient training and inference, which is a major benefit when dealing with large epidemiological datasets. The model&#x2019;s proficiency in handling long-range dependencies without recurrent structures allows it to effectively capture the complex temporal dynamics found in ILI time series data. Previous research has shown these characteristics using transformer-based models for epidemiological forecasting.</p></sec><sec id="s2-9"><title>ChatGPT-Assisted Model Development</title><p>In this study, the ChatGPT large language model was used as an auxiliary tool to systematically support the development of deep learning models. This assistance primarily involved 2 key aspects: code generation and debugging support.</p><p>For code generation, structured prompt templates were designed to produce initial code frameworks for each model architecture. Standardized query formats were used (eg, "Generate a Python script for time-series prediction using the Darts library and the transformer model, including steps for data loading, preprocessing, model definition, training, prediction, and evaluation&#x201D;) to ensure consistency and comprehensiveness across implementations. This approach facilitated efficient and reproducible development of complex model architectures without the need for extensive manual coding.</p><p>During debugging, code errors and related details were shared with ChatGPT for help. When working through data preprocessing steps, like transforming and scaling time series data, this approach was really useful for catching and fixing issues such as problems with converting dates or mismatched scales. We kept track of all error types and how we fixed them so we could create a clear debugging guide.</p><p>All interactions with ChatGPT followed specific prompt patterns to keep everything consistent and easy to reproduce. Example prompts are included in the extra materials in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. Using AI this way provides a repeatable method for building deep learning models in epidemiology, making it easier for more people to use predictive tools in public health without needing advanced technical skills.</p></sec><sec id="s2-10"><title>Cost Function and Parameter Learning Strategy</title><p>The mean squared error (MSE) was used as the loss function for training all deep learning models in this study, and it is defined as<inline-formula><mml:math id="ieqn5"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula> , where <inline-formula><mml:math id="ieqn6"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the predictive value and <inline-formula><mml:math id="ieqn7"><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the actual value, and n is the total number of observations.</p><p>The model&#x2019;s parameters were fine-tuned using gradient descent learning, particularly using the Adam optimizer alongside backpropagation. The learning rate and weight decay settings were adjusted manually for each architecture according to initial experiments, as follows:</p><p>TiDE: learning rate=0.0005, weight decay=1e-2</p><p>Transformer: learning rate=0.0001, weight decay=1e-3</p><p>N-BEATS: learning rate=0.0005, weight decay=1e-3</p><p>LSTM: learning rate=0.005, weight decay=1e-4</p><p>TFT: learning rate=0.001, weight decay=1e-3</p><p>In order to prevent overfitting and improve convergence, learning rate scheduling is implemented using the ReduceLROnPlateau strategy in all models. If the training loss does not improve within a patience window of 10 epochs, the learning rate will be reduced by 0.1. Dropout layers and batch normalization were applied to specific architecture design applications to enhance generalization.</p></sec><sec id="s2-11"><title>Software and Libraries</title><p>All models were implemented in Python using TensorFlow and Keras for LSTM, N-BEATS, TFT, and transformer models, with PyTorch used for some prototyping tasks. Time series forecasting and model evaluation were carried out using the Darts library. Data preprocessing and manipulation were done with NumPy and Pandas, while Matplotlib and Seaborn were used for visualization. SciPy was used for statistical analysis and hyperparameter optimization. Additionally, ChatGPT was used for code generation, debugging, and model optimization throughout the study.</p></sec><sec id="s2-12"><title>Model Comparison</title><p>To evaluate the performance of the models, we used several metrics: MSE, root mean squared error, mean absolute error (MAE), and mean absolute percentage error (MAPE). Smaller values of these metrics indicate superior predictive accuracy. The formulas for these metrics are as follows:</p><disp-formula id="equWL8"><mml:math id="eqn11"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E16"><mml:math id="eqn12"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="equWL9"><mml:math id="eqn13"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>100</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfrac><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where <inline-formula><mml:math id="ieqn8"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the predictive value and <inline-formula><mml:math id="ieqn9"><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the actual value, and n is the total number of observations.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Characteristics of Influenza-Like Symptoms in Mainland China</title><p>We systematically collected ILI case data from southern China, northern China, and mainland China as a whole between the 1st week of 2014 and the 39th week of 2024. Time-series analysis revealed consistent seasonal patterns across all regions, characterized by winter peaks (January-February) and summer troughs (July-August), as visually demonstrated in <xref ref-type="fig" rid="figure1">Figure 1A&#x2013;C</xref>. The data revealed that the positive rate of ILI cases exhibited significant variability during 2014&#x2010;2024 (CV=0.99 nationwide, 0.96 in southern regions, and 1.32 in northern regions). Despite these cyclical patterns, the Augmented Dickey-Fuller test confirmed significant non-stationarity in the data, indicating inherent unpredictability in long-term trends.</p><p>A sharp decline in national ILI cases was observed in January 2020, coinciding with the introduction of COVID-19 containment measures such as social distancing and mask mandates. This trough was followed by a rebound to peak levels in January 2022 (<xref ref-type="fig" rid="figure1">Figure 1A</xref>). Regional analyses showed synchronized downward trajectories: southern China exhibited a sustained decline from July 2019 to July 2020 (<xref ref-type="fig" rid="figure1">Figure 1B</xref>), while northern China experienced a similar trend starting one month earlier (June 2019&#x2013;July 2020, <xref ref-type="fig" rid="figure1">Figure 1C</xref>). Decomposition of the post-2020 period revealed dual dynamics, with overall case counts showing steady growth from late 2020, while positivity rates initially declined before rising again in early 2021.</p><p>Between 2014 and 2020, flu activity peaks stayed consistent during expected high seasons. Starting in 2020, we saw a 4&#x2010;6week delay in peak timing, likely connected to COVID-19-related behavior changes like mask use and social distancing. This shift reached its highest point in spring 2023, then returned to typical winter-summer patterns by 2024. By late 2023, all areas showed regular seasonal trends again, confirming stable disease spread patterns. These patterns reveal how flu transmission timing changes over years and help build better prediction models.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Seasonal decomposition of influenza-like illness (ILI) in mainland China. (A) Multicycle superposition of ILI cases at the national level; (B) seasonal variation patterns in the southern region; and (C) seasonal variation patterns in the northern region.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e74423_fig01.png"/></fig></sec><sec id="s3-2"><title>Performance Comparison of Deep Learning Models</title><p>We evaluated the predictive performance of 5 deep learning models&#x2014;LSTM, N-BEATS, TFT, TiDE, and transformer&#x2014;in forecasting ILI positivity rates across national, southern, and northern datasets for weeks 1&#x2010;39 of 2024. Model training progress was monitored using fitting visualizations (<xref ref-type="fig" rid="figure2">Figure 2</xref>).</p><p>Among the 5 models, TiDE achieved the best overall performance, with predictions closely aligning with observed ILI-positive rates across all datasets. The models were assessed using 3 metrics: MSE, MAE, and MAPE, with lower values indicating better predictive accuracy.</p><p>At the national level, TiDE performed better than the other models, with an MAE of 5.551, MSE of 43.976, and MAPE of 72.413%. N-BEATS was close behind, showing an MAE of 9.4231, MSE of 133.1737, and MAPE of 105.72%. LSTM performed moderately, with an MAE of 6.934, MSE of 61.391, and MAPE of 88.793%. Transformers and TFT had higher error rates, with transformers having an MAE of 10.6128 and TFT showing the largest deviation, with a MAPE of 169.29% (<xref ref-type="fig" rid="figure3">Figure 3A</xref> and <xref ref-type="table" rid="table1">Table 1</xref>).</p><p>In the southern region, TiDE again achieved the best performance (MAE=7.554, MSE=89.708, MAPE=74.475%). N-BEATS ranked second, while TFT and transformer displayed considerable deviations. LSTM performed well but remained slightly behind TiDE (<xref ref-type="fig" rid="figure3">Figure 3B</xref>, <xref ref-type="table" rid="table1">Table 1</xref>). In the southern region, TiDE achieved the highest performance once more (MAE=7.554, MSE=89.708, MAPE=74.475%). N-BEATS was the runner-up, and both TFT and transformer had significant deviations. LSTM did well but was marginally behind TiDE (<xref ref-type="fig" rid="figure3">Figure 3B</xref> and <xref ref-type="table" rid="table1">Table 1</xref>).</p><p>In the northern region, predictive performance varied significantly. TiDE maintained the lowest MAE (4.131) and MSE (28.922) despite high percentage errors (MAPE=486.087%). N-BEATS showed stable performance (MAE=6.325, MSE=58.0936, MAPE=468.41%). LSTM and TFT produced particularly high MAPE values (1918.52% and 2215.66%, respectively), while the transformer achieved intermediate error rates (MAPE=1090.63%) (<xref ref-type="fig" rid="figure3">Figure 3C</xref> and <xref ref-type="table" rid="table1">Table 1</xref>).</p><p>Overall, TiDE consistently achieved the highest accuracy and stability across regions, while notable forecasting challenges remained in the northern dataset due to higher variability and error amplification.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>The comparative forecasting trajectories of multiple predictive models alongside actual observed data across 3 geographical divisions: Nation (<bold>A</bold>), South (<bold>B</bold>), and North (<bold>C</bold>). (from 2014 to 2023). &#x201C;Actual&#x201D; denotes the real data; LSTM represents the long short-term memory neural network model; N-BEATS (neural basis expansion analysis for time series), TFT (temporal fusion transformer), TiDE (time-series dense encoder), and transformer stand for distinct forecasting models. Specifically, the orange line indicates the actual data, the yellow line shows the predictive results from the LSTM model, the purple line represents predictions via the N-beats model, the green line reflects forecasts from the TFT model, the blue line is the predictive output of the TiDE model, and the pink line denotes predictions by the transformer model. ILI: influenza-like illness.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e74423_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>The forecasting curves of various models and the actual data series in 3 scenarios: Nation (A), South (B), and North (C). Comparison of different forecasting models. &#x201C;Actual&#x201D; denotes the real data; LSTM represents the long short-term memory neural network model; N-BEATS (neural basis expansion analysis for time series), TFT (temporal fusion transformer), TiDE (time-series dense encoder), and transformer stand for distinct forecasting models. Specifically, the black line indicates the actual data, the pink line shows the predictive results from the LSTM model, the purple line represents predictions via the N-BEATS model, the yellow line reflects forecasts from the TFT model, the blue line is the predictive output of the TiDE model, and the green line denotes predictions by the transformer model. ILI: influenza-like illness.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e74423_fig03.png"/></fig><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Comparison of the fitting and prediction accuracy of the models.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom"/><td align="left" valign="bottom"/><td align="left" valign="bottom"/><td align="left" valign="bottom" colspan="2">Training set</td><td align="left" valign="bottom"/><td align="left" valign="bottom">Test set</td><td align="left" valign="bottom"/></tr><tr><td align="left" valign="bottom">Region</td><td align="left" valign="top">Model</td><td align="left" valign="top">MAE<sup>a</sup></td><td align="left" valign="top">MSE<sup>b</sup></td><td align="left" valign="top">MAPE<sup>c</sup> (%)</td><td align="left" valign="top">MAE</td><td align="left" valign="top">MSE</td><td align="left" valign="top">MAPE (%)</td></tr></thead><tbody><tr><td align="left" valign="top"/><td align="left" valign="top">LSTM<sup>d</sup></td><td align="left" valign="top">2.022</td><td align="left" valign="top">9.341</td><td align="left" valign="top">747.490</td><td align="left" valign="top">6.934</td><td align="left" valign="top">61.391</td><td align="left" valign="top">88.793</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">N-BEATS<sup>e</sup></td><td align="left" valign="top">2.468</td><td align="left" valign="top">11.296</td><td align="left" valign="top">1105.892</td><td align="left" valign="top">9.423</td><td align="left" valign="top">133.174</td><td align="left" valign="top">105.717</td></tr><tr><td align="left" valign="top">Nation</td><td align="left" valign="top">TFT<sup>f</sup></td><td align="left" valign="top">2.290</td><td align="left" valign="top">12.059</td><td align="left" valign="top">564.371</td><td align="left" valign="top">12.538</td><td align="left" valign="top">235.157</td><td align="left" valign="top">169.290</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">TiDE<sup>g</sup></td><td align="left" valign="top">2.549</td><td align="left" valign="top">13.062</td><td align="left" valign="top">1338.139</td><td align="left" valign="top">5.551</td><td align="left" valign="top">43.976</td><td align="left" valign="top">72.413</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Transformer</td><td align="left" valign="top">5.658</td><td align="left" valign="top">47.188</td><td align="left" valign="top">3473.858</td><td align="left" valign="top">10.613</td><td align="left" valign="top">132.582</td><td align="left" valign="top">161.581</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">LSTM</td><td align="left" valign="top">2.216</td><td align="left" valign="top">10.390</td><td align="left" valign="top">573.356</td><td align="left" valign="top">9.460</td><td align="left" valign="top">126.399</td><td align="left" valign="top">85.560</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">N-BEATS</td><td align="left" valign="top">3.157</td><td align="left" valign="top">18.556</td><td align="left" valign="top">1079.920</td><td align="left" valign="top">9.898</td><td align="left" valign="top">160.962</td><td align="left" valign="top">88.502</td></tr><tr><td align="left" valign="top">South</td><td align="left" valign="top">TFT</td><td align="left" valign="top">2.134</td><td align="left" valign="top">9.599</td><td align="left" valign="top">328.874</td><td align="left" valign="top">13.385</td><td align="left" valign="top">243.345</td><td align="left" valign="top">106.853</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">TiDE</td><td align="left" valign="top">2.876</td><td align="left" valign="top">15.516</td><td align="left" valign="top">972.010</td><td align="left" valign="top">7.554</td><td align="left" valign="top">89.708</td><td align="left" valign="top">74.475</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Transformer</td><td align="left" valign="top">6.266</td><td align="left" valign="top">57.100</td><td align="left" valign="top">2719.575</td><td align="left" valign="top">11.539</td><td align="left" valign="top">157.023</td><td align="left" valign="top">132.782</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">LSTM</td><td align="left" valign="top">2.136</td><td align="left" valign="top">9.997</td><td align="left" valign="top">1156.550</td><td align="left" valign="top">16.119</td><td align="left" valign="top">314.581</td><td align="left" valign="top">1918.516</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">N-BEATS</td><td align="left" valign="top">2.431</td><td align="left" valign="top">13.461</td><td align="left" valign="top">1740.311</td><td align="left" valign="top">6.325</td><td align="left" valign="top">58.094</td><td align="left" valign="top">468.412</td></tr><tr><td align="left" valign="top">North</td><td align="left" valign="top">TFT</td><td align="left" valign="top">2.629</td><td align="left" valign="top">14.188</td><td align="left" valign="top">1639.950</td><td align="left" valign="top">13.610</td><td align="left" valign="top">336.891</td><td align="left" valign="top">2215.656</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">TiDE</td><td align="left" valign="top">2.701</td><td align="left" valign="top">15.989</td><td align="left" valign="top">2194.821</td><td align="left" valign="top">4.131</td><td align="left" valign="top">28.922</td><td align="left" valign="top">486.087</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Transformer</td><td align="left" valign="top">5.516</td><td align="left" valign="top">49.326</td><td align="left" valign="top">4291.066</td><td align="left" valign="top">9.267</td><td align="left" valign="top">101.251</td><td align="left" valign="top">1090.626</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>MAE: mean absolute error.</p></fn><fn id="table1fn2"><p><sup>b</sup>MSE: mean squared error.</p></fn><fn id="table1fn3"><p><sup>c</sup>MAPE: mean absolute percentage error.</p></fn><fn id="table1fn4"><p><sup>d</sup>LSTM: long short-term memory.</p></fn><fn id="table1fn5"><p><sup>e</sup>N-BEATS: neural basis expansion analysis for time series.</p></fn><fn id="table1fn6"><p><sup>f</sup>TFT: temporal fusion transformer.</p></fn><fn id="table1fn7"><p><sup>g</sup>TiDE: time-series dense encoder</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Auxiliary Effects of ChatGPT in Model Development</title><p>ChatGPT was systematically integrated as an auxiliary tool throughout the model development process. Standardized prompt templates were designed to specify functional requirements, architectural details, and expected outputs. Code generated by ChatGPT required only minimal adjustments, mainly related to dataset-specific preprocessing and hyperparameter configuration. This approach was particularly effective for implementing complex models such as TiDE and N-BEATS, which would otherwise demand extensive manual coding.</p><p>During the debugging process, a structured protocol was established, which recorded error messages, code context, and expected behavior for analysis by ChatGPT. For example, when resolving a syntax error related to type annotations (&#x201C;illegal target for annotation&#x201D;), ChatGPT accurately identified the source of the error as an illegal annotation on the right side of a variable assignment. This process significantly reduced debugging time and improved implementation efficiency. Similar benefits were observed in optimizing memory usage during the development of the transformer model; ChatGPT identified inefficient tensor operations causing computational bottlenecks (<xref ref-type="fig" rid="figure4">Figures 4</xref> and <xref ref-type="fig" rid="figure5">5</xref>).</p><p>The results indicate that LLM-assisted workflows can help overcome technical barriers and improve the efficiency of developing deep learning&#x2013;based epidemic prediction models.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Demonstrating ChatGPT&#x2019;s supporting role in model development: A case study on transformer code generation using prompt templates.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e74423_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>ChatGPT efficiently detects and corrects misdiagnoses, substantially reducing the duration of the debugging process.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e74423_fig05.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Overview</title><p>This study evaluated 5 deep learning models for predicting ILI rates across China while examining how ChatGPT supports predictive model creation. We compared the effectiveness of different AI architectures and implemented an organized AI-assisted development process. Our work resolves 2 key research gaps: limited testing of non-LSTM models and the absence of organized AI-supported approaches for flu prediction. The results advance practical modeling methods that health agencies can reliably apply for outbreak prediction. These outcomes highlight both the benefits and limitations of AI-powered disease forecasting tools, offering actionable guidance for improving public health monitoring systems and research methods.</p><p>The study used publicly available data on the positivity rate of ILI from the CNIC. The data are sourced from centralized weekly testing and reporting of clinical samples at national surveillance sites, having passed stringent quality control and outlier verification. For weeks with missing or delayed reports, we use linear interpolation to fill in the gaps and remove obviously abnormal samples to ensure the consistency and completeness of the data. The dataset includes data from the first week of 2014 to the 39th week of 2024, totaling over 10 years and about 545 consecutive time series samples, divided into national, southern, and northern levels to support analysis of regional differences. By analyzing long-term series data, it is clear that there are significant seasonal variations in Chinese mainland and ILI cases in the south and north (<xref ref-type="fig" rid="figure1">Figure 1</xref>). The high incidence in winter (January and February) and the low incidence in summer (July and August) are consistent with established epidemiology of influenza in temperate regions. The time series shows typical winter peaks and summer troughs; the seasonality aligns with previous studies by Ye et al [<xref ref-type="bibr" rid="ref26">26</xref>], Chen et al [<xref ref-type="bibr" rid="ref27">27</xref>], and Liu et al [<xref ref-type="bibr" rid="ref28">28</xref>], who documented similar patterns in China populations. This seasonal variation is closely related to many factors and provides key information for the construction and prediction of subsequent models. It emphasizes the importance of choosing appropriate modeling methods that can adapt to observed changes.</p><p>Even though we see a clear seasonal pattern in the data, there&#x2019;s still a lot of ups and downs. This variation likely comes from a mix of different factors. First, influenza viruses tend to change a lot&#x2014;mutating and drifting so their spread isn&#x2019;t always steady [<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. Second, the level of immunity in the population varies, especially as people lose immunity over time or get vaccinated each year differently [<xref ref-type="bibr" rid="ref32">32</xref>]. In 2022, changes in COVID-19 policies, lifting restrictions, and shifting how health care resources are used also played a big role in how cases are reported [<xref ref-type="bibr" rid="ref33">33</xref>]. Also, improvements in public habits, like wearing masks and people&#x2019;s willingness to seek health care, have affected the data [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. The pandemic also changed how people move around, which impacts how easily the virus spreads. All these factors together make the data unpredictable, making it tricky for models to predict accurately. They need to handle irregular patterns and unexpected changes.</p><p>On the validation set of weeks 1&#x2010;39 in 2024, the national MAE of the TiDE model was 5.55, MSE=43.98, and MAPE=72.41%; in the south, MAE=7.55, MSE=89.71, and MAPE=74.48%; in the north, MAE=4.13, MSE=28.92, and MAPE=486.09%. According to the results, the model effectively predicts outcomes in a variety of data and regions. The performance comparison of deep learning models highlighted significant regional variations in predictive ability. The encoder-decoder architecture of the TiDE model appears to more effectively understand the temporal complexities of ILI transmission patterns, as evidenced by its superior performance both nationally and in southern China. Yet, the considerably poorer results of all models in northern China, even the TiDE model, which fares relatively better, point to core challenges in modeling the ILI dynamics of this region. The regional differences are probably due to the more extreme seasonal temperature changes in northern China, variations in population density, and possibly greater inconsistencies in data from surveillance reporting systems. The introduction of these factors creates nonlinear complexities that current deep learning frameworks have difficulty fully understanding, pointing to the necessity for region-specific modeling techniques. The use of MSE as the loss function, along with the Adam optimizer and learning rate scheduling, also contributed to stable training and convergence across all model architectures, ensuring an equitable comparison of predictive performance.</p><p>Using ChatGPT as a helpful research tool offers a better way to build disease prediction models. Unlike older methods that demand strong coding skills, this AI-supported approach makes advanced analysis easier to use through ready-to-use question templates and error-checking steps. Importantly, ChatGPT wasn&#x2019;t part of the final prediction system&#x2014;it only helped create and improve the code. While AI tools have been tested for helping doctors make decisions, using them to build disease spread models is still new. This method allowed testing multiple AI models (LSTM, N-BEATS, TFT, TiDE, and transformer) side-by-side using the same basic setup.</p></sec><sec id="s4-2"><title>Limitations</title><p>This study has several limitations. First, we did not establish a comprehensive systematic uncertainty quantification framework&#x2014;encompassing intrinsic model stochasticity, data reporting delays, policy-induced systemic variations, and parametric or nonparametric uncertainties&#x2014;which would require dedicated computational infrastructure. Specifically, internal uncertainties originate from inherent model randomness (eg, parameter initialization and stochastic dropout mechanisms), external uncertainties relate to data latency, policy adjustments, and demographic fluctuations, while structural uncertainties reflect unmodeled latent factors in influenza transmission dynamics. Current LLMs (eg, ChatGPT) lack the mathematical rigor necessary to support such complex probabilistic modeling [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>] .</p><p>Second, we could not precisely quantify potential uncertainty sources during model development due to incomplete standardization of data logging. This study prioritized validating framework feasibility over achieving precise estimation of specific uncertainty parameters. We therefore recommend subsequent investigations using enhanced quantitative validation tools. The present work provides a qualitative analytical framework, suggesting future research should focus on 2 empirically supported directions: developing LLM-compatible uncertainty quantification modules and validating time-dependent impacts of uncertainty factors on predictive performance through longitudinal studies, approaches that have demonstrated feasibility in analogous infectious disease modeling research.</p></sec><sec id="s4-3"><title>Conclusions</title><p>Deep learning models could significantly improve early disease warning systems by accurately predicting flu-like outbreaks (ILI). Using ChatGPT-supported systems to automatically generate code, fix errors in real-time, and improve prediction models allows researchers to follow standardized procedures&#x2014;reducing technical skill demands and speeding up studies. However, these models need to prove they work reliably in different regions, especially in northern China&#x2019;s complex disease spread patterns, before widespread use. Their implementation must be verified against actual local health data through public health validation checks.</p><p>In summary, more testing and adjustments for regional differences are critical to make these models adaptable across areas and set practical guidelines for real-world use in health care systems.</p></sec></sec></body><back><ack><p>The study was supported by the Key Project of the First-class discipline innovation-driven talent program of Guangxi Medical University (to WW), Natural Science Foundation of Guangxi (2025GXNSFDA069044), Innovative Research Team Project of Guangxi Natural Science Foundation (2025GXNSFGA069002), the National Natural Science Foundation of China (NSFC; 82302550, 82460405), and the Guangxi Medical University Training Program for Young Leading Talents (to JJ). The funders had no role in study design, data collection and analysis, decision to publish, or preparation of the manuscript.</p></ack><notes><sec><title>Data Availability</title><p>The datasets generated or analyzed during the present study are available in the public database of the Chinese National Influenza Center [<xref ref-type="bibr" rid="ref38">38</xref>].</p></sec></notes><fn-group><fn fn-type="con"><p>JJ, LY, and HL conceptualized the study, WH and WW designed the study, and BZ, XX, MZ, and SL performed the literature search. WW and WH performed resource analysis and data extraction. ZY, JL, and RC provided key insights regarding data interpretation. WH, WW, and JJ wrote the first draft. HL and LY edited the paper, with all authors providing critical input and edits. All authors have read and agreed to the published version of the manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">CNIC</term><def><p>Chinese National Influenza Center</p></def></def-item><def-item><term id="abb2">ILI</term><def><p>influenza-like illness</p></def></def-item><def-item><term id="abb3">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb4">LSTM</term><def><p>long short-term memory</p></def></def-item><def-item><term id="abb5">MAE</term><def><p>mean absolute error</p></def></def-item><def-item><term id="abb6">MAPE</term><def><p>mean absolute percentage error</p></def></def-item><def-item><term id="abb7">MSE</term><def><p>mean squared error</p></def></def-item><def-item><term id="abb8">N-BEATS</term><def><p>neural basis expansion analysis for time series</p></def></def-item><def-item><term id="abb9">TFT</term><def><p>temporal fusion transformer</p></def></def-item><def-item><term id="abb10">TiDE</term><def><p>time-series dense encoder</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Labella</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Merel</surname><given-names>SE</given-names> </name></person-group><article-title>Influenza</article-title><source>Med Clin North Am</source><year>2013</year><month>07</month><volume>97</volume><issue>4</issue><fpage>621</fpage><lpage>645</lpage><pub-id pub-id-type="doi">10.1016/j.mcna.2013.03.001</pub-id><pub-id pub-id-type="medline">23809717</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Caceres</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Seibert</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cargnin Faccin</surname><given-names>F</given-names> </name><name name-style="western"><surname>Cardenas-Garcia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rajao</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Perez</surname><given-names>DR</given-names> </name></person-group><article-title>Influenza antivirals and animal models</article-title><source>FEBS Open Bio</source><year>2022</year><month>06</month><volume>12</volume><issue>6</issue><fpage>1142</fpage><lpage>1165</lpage><pub-id pub-id-type="doi">10.1002/2211-5463.13416</pub-id><pub-id pub-id-type="medline">35451200</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ohkusa</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Shigematsu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Taniguchi</surname><given-names>K</given-names> </name><name name-style="western"><surname>Okabe</surname><given-names>N</given-names> </name></person-group><article-title>Experimental surveillance using data on sales of over-the-counter medications--Japan, November 2003-April 2004</article-title><source>MMWR Suppl</source><year>2005</year><month>08</month><day>26</day><volume>54</volume><fpage>47</fpage><lpage>52</lpage><pub-id pub-id-type="medline">16177693</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vergu</surname><given-names>E</given-names> </name><name name-style="western"><surname>Grais</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Sarter</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Medication sales and syndromic surveillance, France</article-title><source>Emerg Infect Dis</source><year>2006</year><month>03</month><volume>12</volume><issue>3</issue><fpage>416</fpage><lpage>421</lpage><pub-id pub-id-type="doi">10.3201/eid1203.050573</pub-id><pub-id pub-id-type="medline">16704778</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hochreiter</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schmidhuber</surname><given-names>J</given-names> </name></person-group><article-title>Long short-term memory</article-title><source>Neural Comput</source><year>1997</year><month>11</month><day>15</day><volume>9</volume><issue>8</issue><fpage>1735</fpage><lpage>1780</lpage><pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id><pub-id pub-id-type="medline">9377276</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name><name name-style="western"><surname>He</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name></person-group><article-title>Prediction of monthly average and extreme atmospheric temperatures in Zhengzhou based on artificial neural network and deep learning models</article-title><source>Front For Glob Change</source><year>2023</year><volume>6</volume><pub-id pub-id-type="doi">10.3389/ffgc.2023.1249300</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name></person-group><article-title>Comparative analysis of multiple deep learning models for forecasting monthly ambient PM2.5 concentrations: a case study in Dezhou City, China</article-title><source>Atmosphere (Basel)</source><year>2024</year><volume>15</volume><issue>12</issue><fpage>1432</fpage><pub-id pub-id-type="doi">10.3390/atmos15121432</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name><name name-style="western"><surname>He</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name></person-group><article-title>Monthly climate prediction using deep convolutional neural network and long short-term memory</article-title><source>Sci Rep</source><year>2024</year><volume>14</volume><issue>1</issue><fpage>17748</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-68906-6</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name><name name-style="western"><surname>He</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name></person-group><article-title>Assessing the effectiveness of long short-term memory and artificial neural network in predicting daily ozone concentrations in Liaocheng City</article-title><source>Sci Rep</source><year>2025</year><month>02</month><day>25</day><volume>15</volume><issue>1</issue><fpage>6798</fpage><pub-id pub-id-type="doi">10.1038/s41598-025-91329-w</pub-id><pub-id pub-id-type="medline">40000767</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guo</surname><given-names>Q</given-names> </name><name name-style="western"><surname>He</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name></person-group><article-title>A performance comparison study on climate prediction in Weifang City using different deep learning models</article-title><source>Water (Basel)</source><year>2024</year><volume>16</volume><issue>19</issue><fpage>2870</fpage><pub-id pub-id-type="doi">10.3390/w16192870</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Carpov</surname><given-names>D</given-names> </name><name name-style="western"><surname>Chapados</surname><given-names>N</given-names> </name><name name-style="western"><surname>Bengio</surname><given-names>Y</given-names> </name></person-group><article-title>N-BEATS: neural basis expansion analysis for interpretable time series forecasting</article-title><comment>Preprint posted online on  Feb 20, 2020</comment><pub-id pub-id-type="doi">10.48550/arXiv.1905.10437</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lim</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ar&#x0131;k</surname><given-names>S&#x00D6;</given-names> </name><name name-style="western"><surname>Loeff</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pfister</surname><given-names>T</given-names> </name></person-group><article-title>Temporal Fusion Transformers for interpretable multi-horizon time series forecasting</article-title><source>Int J Forecast</source><year>2021</year><month>10</month><volume>37</volume><issue>4</issue><fpage>1748</fpage><lpage>1764</lpage><pub-id pub-id-type="doi">10.1016/j.ijforecast.2021.03.012</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fitzner</surname><given-names>J</given-names> </name><name name-style="western"><surname>Qasmieh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mounts</surname><given-names>AW</given-names> </name><etal/></person-group><article-title>Revision of clinical case definitions: influenza-like illness and severe acute respiratory infection</article-title><source>Bull World Health Organ</source><year>2018</year><month>02</month><day>1</day><volume>96</volume><issue>2</issue><fpage>122</fpage><lpage>128</lpage><pub-id pub-id-type="doi">10.2471/BLT.17.194514</pub-id><pub-id pub-id-type="medline">29403115</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Vaswani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shazeer</surname><given-names>N</given-names> </name><name name-style="western"><surname>Parmar</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Attention is all you need</article-title><comment>Preprint posted online on  Jun 12, 2017</comment><pub-id pub-id-type="doi">10.48550/arXiv.1706.03762</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Higgins</surname><given-names>JPT</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>G&#x00F8;tzsche</surname><given-names>PC</given-names> </name><etal/></person-group><article-title>The Cochrane Collaboration&#x2019;s tool for assessing risk of bias in randomised trials</article-title><source>BMJ</source><year>2011</year><month>10</month><day>18</day><volume>343</volume><fpage>d5928</fpage><pub-id pub-id-type="doi">10.1136/bmj.d5928</pub-id><pub-id pub-id-type="medline">22008217</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Bai</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>X</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>T</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>R</given-names> </name></person-group><article-title>Analysis and prediction of infectious diseases based on spatial visualization and machine learning</article-title><source>Sci Rep</source><year>2024</year><volume>14</volume><issue>1</issue><fpage>28659</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-80058-1</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Faes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wagner</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>DJ</given-names> </name><etal/></person-group><article-title>Automated deep learning design for medical image classification by health-care professionals with no coding experience: a feasibility study</article-title><source>Lancet Digit Health</source><year>2019</year><month>09</month><volume>1</volume><issue>5</issue><fpage>e232</fpage><lpage>e242</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(19)30108-6</pub-id><pub-id pub-id-type="medline">33323271</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><name name-style="western"><surname>See</surname><given-names>KC</given-names> </name></person-group><article-title>Artificial Intelligence for COVID-19: rapid review</article-title><source>J Med Internet Res</source><year>2020</year><month>10</month><day>27</day><volume>22</volume><issue>10</issue><fpage>e21476</fpage><pub-id pub-id-type="doi">10.2196/21476</pub-id><pub-id pub-id-type="medline">32946413</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Chua</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rickard</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lorenzo</surname><given-names>A</given-names> </name></person-group><article-title>ChatGPT and large language model (LLM) chatbots: the current state of acceptability and a proposal for guidelines on utilization in academic medicine</article-title><source>J Pediatr Urol</source><year>2023</year><month>10</month><volume>19</volume><issue>5</issue><fpage>598</fpage><lpage>604</lpage><pub-id pub-id-type="doi">10.1016/j.jpurol.2023.05.018</pub-id><pub-id pub-id-type="medline">37328321</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Levin</surname><given-names>G</given-names> </name><name name-style="western"><surname>Brezinov</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Meyer</surname><given-names>R</given-names> </name></person-group><article-title>Exploring the use of ChatGPT in OBGYN: a bibliometric analysis of the first ChatGPT-related publications</article-title><source>Arch Gynecol Obstet</source><year>2023</year><volume>308</volume><issue>6</issue><fpage>1785</fpage><lpage>1789</lpage><pub-id pub-id-type="doi">10.1007/s00404-023-07081-x</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ignjatovi&#x0107;</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stevanovi&#x0107;</surname><given-names>L</given-names> </name></person-group><article-title>Efficacy and limitations of ChatGPT as a biostatistical problem-solving tool in medical education in Serbia: a descriptive study</article-title><source>J Educ Eval Health Prof</source><year>2023</year><volume>20</volume><fpage>28</fpage><pub-id pub-id-type="doi">10.3352/jeehp.2023.20.28</pub-id><pub-id pub-id-type="medline">37840252</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esteva</surname><given-names>A</given-names> </name><name name-style="western"><surname>Robicquet</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ramsundar</surname><given-names>B</given-names> </name><etal/></person-group><article-title>A guide to deep learning in healthcare</article-title><source>Nat Med</source><year>2019</year><month>01</month><volume>25</volume><issue>1</issue><fpage>24</fpage><lpage>29</lpage><pub-id pub-id-type="doi">10.1038/s41591-018-0316-z</pub-id><pub-id pub-id-type="medline">30617335</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thirunavukarasu</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Ting</surname><given-names>DSJ</given-names> </name><name name-style="western"><surname>Elangovan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Gutierrez</surname><given-names>L</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>TF</given-names> </name><name name-style="western"><surname>Ting</surname><given-names>DSW</given-names> </name></person-group><article-title>Large language models in medicine</article-title><source>Nat Med</source><year>2023</year><month>08</month><volume>29</volume><issue>8</issue><fpage>1930</fpage><lpage>1940</lpage><pub-id pub-id-type="doi">10.1038/s41591-023-02448-8</pub-id><pub-id pub-id-type="medline">37460753</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><source>China National Influenza Center Database</source><access-date>2024-12-30</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ivdc.chinacdc.cn/cnic">https://ivdc.chinacdc.cn/cnic</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ye</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Understanding the complex seasonality of seasonal influenza A and B virus transmission: Evidence from six years of surveillance data in Shanghai, China</article-title><source>Int J Infect Dis</source><year>2019</year><month>04</month><volume>81</volume><fpage>57</fpage><lpage>65</lpage><pub-id pub-id-type="doi">10.1016/j.ijid.2019.01.027</pub-id><pub-id pub-id-type="medline">30684745</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>M</given-names> </name></person-group><article-title>Seasonality of influenza-like illness and short-term forecasting model in Chongqing from 2010 to 2022</article-title><source>BMC Infect Dis</source><year>2024</year><month>04</month><day>23</day><volume>24</volume><issue>1</issue><fpage>432</fpage><pub-id pub-id-type="doi">10.1186/s12879-024-09301-4</pub-id><pub-id pub-id-type="medline">38654199</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Impact of non-pharmaceutical interventions during COVID-19 on future influenza trends in Mainland China</article-title><source>BMC Infect Dis</source><year>2023</year><volume>23</volume><issue>1</issue><fpage>632</fpage><pub-id pub-id-type="doi">10.1186/s12879-023-08594-1</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nelson</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Holmes</surname><given-names>EC</given-names> </name></person-group><article-title>The evolution of epidemic influenza</article-title><source>Nat Rev Genet</source><year>2007</year><month>03</month><volume>8</volume><issue>3</issue><fpage>196</fpage><lpage>205</lpage><pub-id pub-id-type="doi">10.1038/nrg2053</pub-id><pub-id pub-id-type="medline">17262054</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Holmes</surname><given-names>EC</given-names> </name><name name-style="western"><surname>Gonz&#x00E1;lez-Candelas</surname><given-names>F</given-names> </name></person-group><article-title>The population genetics and evolutionary epidemiology of RNA viruses</article-title><source>Nat Rev Microbiol</source><year>2004</year><month>04</month><volume>2</volume><issue>4</issue><fpage>279</fpage><lpage>288</lpage><pub-id pub-id-type="doi">10.1038/nrmicro863</pub-id><pub-id pub-id-type="medline">15031727</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rambaut</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pybus</surname><given-names>OG</given-names> </name><name name-style="western"><surname>Nelson</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Viboud</surname><given-names>C</given-names> </name><name name-style="western"><surname>Taubenberger</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Holmes</surname><given-names>EC</given-names> </name></person-group><article-title>The genomic and epidemiological dynamics of human influenza A virus</article-title><source>Nature New Biol</source><year>2008</year><month>05</month><day>29</day><volume>453</volume><issue>7195</issue><fpage>615</fpage><lpage>619</lpage><pub-id pub-id-type="doi">10.1038/nature06945</pub-id><pub-id pub-id-type="medline">18418375</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Needle</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Russell</surname><given-names>RS</given-names> </name></person-group><article-title>Immunity debt, a gap in learning, or immune dysfunction?</article-title><source>Viral Immunol</source><year>2023</year><month>01</month><volume>36</volume><issue>1</issue><fpage>1</fpage><lpage>2</lpage><pub-id pub-id-type="doi">10.1089/vim.2022.0204</pub-id><pub-id pub-id-type="medline">36648772</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>WW</given-names> </name><name name-style="western"><surname>Mott</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Olsen</surname><given-names>SJ</given-names> </name></person-group><article-title>The role of non-pharmaceutical interventions on influenza circulation during the COVID-19 pandemic in nine tropical Asian countries</article-title><source>Influenza Other Respir Viruses</source><year>2022</year><month>05</month><volume>16</volume><issue>3</issue><fpage>568</fpage><lpage>576</lpage><pub-id pub-id-type="doi">10.1111/irv.12953</pub-id><pub-id pub-id-type="medline">34997697</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feng</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>T</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Impact of COVID-19 outbreaks and interventions on influenza in China and the United States</article-title><source>Nat Commun</source><year>2021</year><month>05</month><day>31</day><volume>12</volume><issue>1</issue><fpage>3249</fpage><pub-id pub-id-type="doi">10.1038/s41467-021-23440-1</pub-id><pub-id pub-id-type="medline">34059675</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hazra</surname><given-names>D</given-names> </name><name name-style="western"><surname>Chandy</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Thanjavurkar</surname><given-names>A</given-names> </name><etal/></person-group><article-title>A clinico-epidemiological profile, coinfections and outcome of patients with influenza like illnesses (ILI) presenting to the emergency department during the COVID-19 pandemic</article-title><source>J Family Med Prim Care</source><year>2023</year><month>04</month><volume>12</volume><issue>4</issue><fpage>672</fpage><lpage>678</lpage><pub-id pub-id-type="doi">10.4103/jfmpc.jfmpc_1705_22</pub-id><pub-id pub-id-type="medline">37312766</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arslan</surname><given-names>S</given-names> </name></person-group><article-title>Exploring the potential of ChatGPT in personalized obesity treatment</article-title><source>Ann Biomed Eng</source><year>2023</year><month>09</month><volume>51</volume><issue>9</issue><fpage>1887</fpage><lpage>1888</lpage><pub-id pub-id-type="doi">10.1007/s10439-023-03227-9</pub-id><pub-id pub-id-type="medline">37145177</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Giske</surname><given-names>CG</given-names> </name><name name-style="western"><surname>Bressan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Fiechter</surname><given-names>F</given-names> </name><etal/></person-group><article-title>GPT-4-based AI agents-the new expert system for detection of antimicrobial resistance mechanisms?</article-title><source>J Clin Microbiol</source><year>2024</year><month>11</month><day>13</day><volume>62</volume><issue>11</issue><fpage>e0068924</fpage><pub-id pub-id-type="doi">10.1128/jcm.00689-24</pub-id><pub-id pub-id-type="medline">39417635</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Siyao</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Gu</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Bailando++: 3D dance GPT with choreographic memory</article-title><source>IEEE Trans Pattern Anal Mach Intell</source><year>2023</year><month>12</month><volume>45</volume><issue>12</issue><fpage>14192</fpage><lpage>14207</lpage><pub-id pub-id-type="doi">10.1109/TPAMI.2023.3319435</pub-id><pub-id pub-id-type="medline">37751342</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="web"><article-title>NSTI</article-title><source>Public Health Science Data [Website in Chinese]</source><access-date>2025-06-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://www.phsciencedata.cn">http://www.phsciencedata.cn</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Prompts.</p><media xlink:href="jmir_v27i1e74423_app1.docx" xlink:title="DOCX File, 29 KB"/></supplementary-material></app-group></back></article>