<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e73601</article-id>
      <article-id pub-id-type="pmid">40397945</article-id>
      <article-id pub-id-type="doi">10.2196/73601</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Identifying Disinformation on the Extended Impacts of COVID-19: Methodological Investigation Using a Fuzzy Ranking Ensemble of Natural Language Processing Models</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Sarvestan</surname>
            <given-names> Javad</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Rathakrishnan</surname>
            <given-names>Akila</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Haider Bangyal</surname>
            <given-names>Waqas</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Chen</surname>
            <given-names>Jian-An</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-8289-7404</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Chung</surname>
            <given-names>Wu-Chun</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0001-1358-6579</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Hung</surname>
            <given-names>Che-Lun</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution/>
            <institution>Institute of Biomedical Informatics</institution>
            <institution>National Yang Ming Chiao Tung University</institution>
            <addr-line>No 155, Sec 2, Linong St Beitou Dist</addr-line>
            <addr-line>Taipei, 112</addr-line>
            <country>Taiwan</country>
            <phone>886 2 2826 7349</phone>
            <email>clhung@nycu.edu.tw</email>
          </address>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8906-9367</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Wu</surname>
            <given-names>Chun-Ying</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5053-1801</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Institute of Biomedical Informatics</institution>
        <institution>National Yang Ming Chiao Tung University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Information and Computer Engineering</institution>
        <institution>Chung Yuan Christian University</institution>
        <addr-line>Taoyuan</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Health Innovation Center</institution>
        <institution>National Yang Ming Chiao Tung University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Microbiota Research Center</institution>
        <institution>National Yang Ming Chiao Tung University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>College of Medicine</institution>
        <institution>China Medical University</institution>
        <addr-line>Taichung</addr-line>
        <country>Taiwan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Che-Lun Hung <email>clhung@nycu.edu.tw</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>21</day>
        <month>5</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e73601</elocation-id>
      <history>
        <date date-type="received">
          <day>7</day>
          <month>3</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>2</day>
          <month>4</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>10</day>
          <month>4</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>17</day>
          <month>4</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Jian-An Chen, Wu-Chun Chung, Che-Lun Hung, Chun-Ying Wu. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 21.05.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e73601" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>During the COVID-19 pandemic, the continuous spread of misinformation on the internet posed an ongoing threat to public trust and understanding of epidemic prevention policies. Although the pandemic is now under control, information regarding the risks of long-term COVID-19 effects and reinfection still needs to be integrated into COVID-19 policies.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to develop a robust and generalizable deep learning framework for detecting misinformation related to the prolonged impacts of COVID-19 by integrating pretrained language models (PLMs) with an innovative fuzzy rank-based ensemble approach.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A comprehensive dataset comprising 566 genuine and 2361 fake samples was curated from reliable open sources and processed using advanced techniques. The dataset was randomly split using the <italic>scikit-learn</italic> package to facilitate both training and evaluation. Deep learning models were trained for 20 epochs on a Tesla T4 for hierarchical attention networks (HANs) and an RTX A5000 (for the other models). To enhance performance, we implemented an ensemble learning strategy that incorporated a reparameterized Gompertz function, which assigned fuzzy ranks based on each model’s prediction confidence for each test case. This method effectively fused outputs from state-of-the-art PLMs such as robustly optimized bidirectional encoder representations from transformers pretraining approach (RoBERTa), decoding-enhanced bidirectional encoder representations from transformers with disentangled attention (DeBERTa), and XLNet.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>After training on the dataset, various classification methods were evaluated on the test set, including the fuzzy rank-based method and state-of-the-art large language models. Experimental results reveal that language models, particularly XLNet, outperform traditional approaches that combine term frequency–inverse document frequency features with support vector machine or utilize deep models like HAN. The evaluation metrics—including accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, and area under the curve (AUC)—indicated a clear performance advantage for models that had a larger number of parameters. However, this study also highlights that model architecture, training procedures, and optimization techniques are critical determinants of classification effectiveness. XLNet’s permutation language modeling approach enhances bidirectional context understanding, allowing it to surpass even larger models in the bidirectional encoder representations from transformers (BERT) series despite having relatively fewer parameters. Notably, the fuzzy rank-based ensemble method, which combines multiple language models, achieved impressive results on the test set, with an accuracy of 93.52%, a precision of 94.65%, an <italic>F</italic><sub>1</sub>-score of 96.03%, and an AUC of 97.15%.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The fusion of ensemble learning with PLMs and the Gompertz function, employing fuzzy rank-based methodology, introduces a novel prediction approach with prospects for enhancing accuracy and reliability. Additionally, the experimental results imply that training solely on textual content can yield high prediction accuracy, thereby providing valuable insights into the optimization of fake news detection systems. These findings not only aid in detecting misinformation but also have broader implications for the application of advanced deep learning techniques in public health policy and communication.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>misinformation</kwd>
        <kwd>COVID-19</kwd>
        <kwd>ensemble models</kwd>
        <kwd>fuzzy ranks</kwd>
        <kwd>language model</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>From 2019 to 2022, the global community faced challenges posed by the COVID-19 pandemic. In response, governments worldwide and the World Health Organization (WHO) collaborated extensively to reduce the spread of the virus. An increased demand for trustworthy information sources and accurate health guidance arose during this global health crisis. However, the surge in these informational needs overlapped with the rapid spread of misinformation and false news through social media platforms, leading to widespread public confusion.</p>
        <p>The WHO used the term “infodemic” to describe the spread of misinformation during the pandemic [<xref ref-type="bibr" rid="ref1">1</xref>]. They emphasized the potential threat that such misinformation posed to national epidemic prevention policies. Trust in incorrect or misleading information could result in adverse health behaviors and noncompliance with health policies, worsening the pandemic's challenges.</p>
        <p>While the distribution of COVID-19 vaccines contributed to the gradual control of the pandemic, the virus persisted, giving rise to postinfection symptoms known as long COVID, confirmed in at least 10% of people who contracted the virus [<xref ref-type="bibr" rid="ref2">2</xref>]. Additionally, instances of reinfection after initial recovery were observed, with research from the US Department of Veterans Affairs indicating increased risks of mortality, hospitalization, and postsymptomatic conditions for reinfected patients [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>Despite the diminishing immediate threat of COVID-19, the ongoing risks associated with long COVID and reinfection make it important to retain public attention on COVID-19–related policies and information. The challenges of fake news and misinformation persist as the world transitions into a postpandemic era coexisting with the virus. Specifically, issues related to long COVID and reinfection continue to be crucial points for misinformation. Therefore, the timely and accurate identification and classification of such misinformation is critical.</p>
      </sec>
      <sec>
        <title>Prior Work</title>
        <p>Throughout the COVID-19 pandemic, some studies used machine learning and deep learning techniques to address the challenge of detecting fake news and misinformation.</p>
        <p>Patwa et al [<xref ref-type="bibr" rid="ref4">4</xref>] collected COVID-19–related texts from publicly available fact-checking websites and social media platforms. Their approach involved term frequency–inverse document frequency (TF-IDF) for feature extraction and applying various machine learning algorithms, such as logistic regression, support vector machines, decision trees, and gradient boosting, for the binary classification of fake news. Das et al [<xref ref-type="bibr" rid="ref5">5</xref>] employed pretrained language models (PLMs), including robustly optimized bidirectional encoder representations from transformers pretraining approach (RoBERTa) and XLNet, for preprocessing and training on the same dataset. By combining predictions from multiple models through voting, they achieved admirable results in the CONSTRAINT 2021 COVID-19 Fake News Detection competition.</p>
        <p>Paka et al [<xref ref-type="bibr" rid="ref6">6</xref>] argued that relying solely on textual features might be insufficient for accurate fake news classification. To address this, they gathered COVID-19–related tweets from Twitter (now known as X), incorporating additional data such as the number of likes for a tweet, URL links, and each poster’s follower count. Introducing a multifeature classification approach for fake news using a cross-stitch unit combined with a long short-term memory architecture enhanced the classification accuracy.</p>
        <p>Furthermore, research teams focusing on the Chinese language used deep learning frameworks like recurrent neural network, convolutional neural network, and transformers to classify COVID-19 fake news in Chinese text [<xref ref-type="bibr" rid="ref7">7</xref>]. These endeavors highlight the global commitment to addressing the infodemic and giving accurate and reliable information.</p>
        <p>Additionally, the emergence of large language models (LLMs) based on transformers, such as ChatGPT [<xref ref-type="bibr" rid="ref8">8</xref>], has become prominent in recent years. These models, capable of understanding natural language and interacting with users, hold the potential to contribute to the development of more robust tools for combating the spread of false news across various domains.</p>
        <p>Beyond COVID-19–specific studies, recent research in other domains has demonstrated the effectiveness of machine learning in sentiment analysis and fake news detection. For example, one study proposed a highly efficient technique for polarity classification of X (formerly known as Twitter) data related to COVID-19 fake news [<xref ref-type="bibr" rid="ref9">9</xref>]. This work applied 5 machine learning classifiers—support vector machine, logistic regression, <italic>k</italic>-nearest neighbor, decision trees, and random forest—to predict whether news was fake or real, thereby completing the natural language processing (NLP) cycle from data corpus to classification. In another study, researchers examined consumer behavior toward online shopping using machine learning [<xref ref-type="bibr" rid="ref10">10</xref>]. They utilized a count vectorizer to tokenize text documents and build vocabularies, subsequently applying classification models, including <italic>k</italic>-nearest neighbor, random forest, and support vector machine (SVM), to analyze the sentiment scores of the product reviews. A third study focused on opinion mining regarding politics and inflation using a Roman Urdu dataset sourced from Kaggle [<xref ref-type="bibr" rid="ref11">11</xref>]. The researchers experimented with various text processing techniques and classification algorithms (naive Bayes, Bayes Net, KStar, decision tree, and random forest), demonstrating how attribute selection and preprocessing can improve accuracy. These diverse studies underscore the versatility and potential of machine learning approaches in various contexts, reinforcing and complementing efforts to address the challenges posed by COVID-19 misinformation.</p>
        <p>While numerous studies have explored text classification for fake news detection, many have primarily focused on traditional machine learning techniques or earlier deep learning models. However, few studies have compared these approaches with state-of-the-art LLMs that offer advanced natural language understanding capabilities. Moreover, although ensemble methods have been applied in some cases, there remains a lack of investigation into sophisticated ensemble strategies that leverage model confidence rankings and nonlinear fusion techniques to further improve classification performance. This study aims to bridge these gaps by providing a comprehensive comparison between conventional approaches and LLMs and proposing a novel ensemble framework that integrates fuzzy rank-based fusion with the Gompertz function.</p>
      </sec>
      <sec>
        <title>Study Goal</title>
        <p>Considering advancements in deep learning technologies and NLP, this study investigates the performance of various deep learning models in detecting fake news. The objective is to provide a scientific and efficient method for fake news detection in the postpandemic era. Texts about long COVID and reinfection were collected from open-source databases and through web crawling, followed by a preprocessing phase to clean and refine the data. Next, various machine learning and deep learning models were trained and evaluated based on their performance after preprocessing. Finally, a fuzzy rank-based ensemble approach combined multiple models. The performance of this ensemble method was then compared with the state-of-the-art LLM methods.</p>
        <p>The proposed method achieved an <italic>F</italic><sub>1</sub>-score of 96.03%, which can significantly help classify misinformation in real time. The results also demonstrate the effectiveness of language models in distinguishing misinformation.</p>
        <p>The main contributions of this study are threefold. First, an in-depth analysis of public datasets related to long COVID was conducted, revealing distinct distribution patterns between genuine and fake articles, which provides valuable insights into the nature and propagation of misinformation. Second, a systematic comparison of traditional text classification methods and state-of-the-art PLMs was carried out, delineating the strengths and limitations of each approach in the context of COVID-19 misinformation detection. Third, a novel ensemble method that combines a fuzzy rank approach with the Gompertz function was developed to enhance the classification performance of language models, resulting in a more robust and accurate detection framework.</p>
        <p>The rest of this paper is organized as follows. The next section outlines the methodology, including data collection, preprocessing, analysis, machine learning and deep learning models used, proposed ensemble technique, and implementation details. This is followed by the presentation of experimental results and a comparison of different approaches, as well as several real-case inference examples. Finally, the discussion section highlights the principal findings, acknowledges the study’s limitations, and concludes with a summary of key insights.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>The method comprises 4 key stages: data collection, data preprocessing, data analysis, and modeling. Initially, information was collected from diverse publicly available open sources. Given the inherent inconsistency of internet-based open sources, the gathered data underwent a preprocessing phase to enhance their cleanliness. After the preprocessing, a foundational analysis was undertaken to better understand the dataset’s characteristics. Finally, various deep learning models were trained and compared with other text classification methods. We introduced a fuzzy ranking method with the Gompertz function, which adjusts weights based on the confidence scores of each classifier to generate final predictions for each sample. This fusion of ensemble learning and the Gompertz function offers a fresh perspective on prediction methodologies.</p>
      </sec>
      <sec>
        <title>Data Collection</title>
        <sec>
          <title>Process Explanation</title>
          <p>Articles and claims related to COVID-19 were collected from diverse internet sources. The gathered materials underwent a filter phase using keywords associated with long COVID and reinfection, such as chronic, long-term, persistent, after-effects, sequelae, complications, recovery, post covid, post-covid, omicron, subvariant, reinfection, immune, and variant. The resulting dataset, categorized as either “genuine” or “fake,” originated from 3 primary sources: open-source datasets, fact-checking websites, and governmental bodies.</p>
        </sec>
        <sec>
          <title>Open-Source Dataset</title>
          <p><xref ref-type="boxed-text" rid="box1">Textbox 1</xref> outlines information related to the open-source datasets.</p>
          <boxed-text id="box1" position="float">
            <title>Information about the open-source datasets.</title>
            <list list-type="bullet">
              <list-item>
                <p><bold>Fighting an Infodemic</bold> [<xref ref-type="bibr" rid="ref4">4</xref>]: This dataset includes COVID-19–related topics from platforms like X (formerly known as Twitter), Facebook, and fact-checking websites. Utilized for the Constraint@AAAI2021 COVID-19 Fake News Detection in English competition, only labeled data from this dataset were used and are available on GitHub [<xref ref-type="bibr" rid="ref12">12</xref>].</p>
              </list-item>
              <list-item>
                <p><bold>CTF (COVID-19 Twitter Fake News)</bold> [<xref ref-type="bibr" rid="ref6">6</xref>]: Focused on tweets from X, this dataset includes labeled and unlabeled data concerning genuine and fake COVID-19 news. For this study, only the labeled text content data were utilized.</p>
              </list-item>
              <list-item>
                <p><bold>CoAID (COVID-19 Health Care Misinformation Data Set)</bold> [<xref ref-type="bibr" rid="ref13">13</xref>]: This diverse COVID-19 fake news dataset contains news from the internet and social media platforms, user engagements, tweets, and labels appearing on X.</p>
              </list-item>
              <list-item>
                <p><bold>FibVID (Fake News Information-Broadcasting Data Set of COVID-19)</bold> [<xref ref-type="bibr" rid="ref14">14</xref>]: This dataset collects claims from fact-checking websites like Snopes and Politifact, along with related discourse from X. It includes both COVID-19 and non–COVID-19 topics divided into 4 labels. For this study, only data related to COVID-19 from categories 0 and 1 were used.</p>
              </list-item>
              <list-item>
                <p><bold>FaCOV (COVID-19 Viral News and Rumors Fact-Check Articles Data Set)</bold> [<xref ref-type="bibr" rid="ref15">15</xref>]: Collected from 13 English fact-checking websites related to COVID-19, this dataset includes article titles, URLs, claims, and abstracts. Data with 2 category labels were used by merging titles and article contents.</p>
              </list-item>
            </list>
          </boxed-text>
        </sec>
        <sec>
          <title>Fact-Checking Websites</title>
          <p>While open-source databases offer significant support, they often have limitations regarding data timeframes. To overcome these restrictions, web scraping and data cleaning techniques were employed to gather more recent data from verified fact-checking websites such as Snopes [<xref ref-type="bibr" rid="ref16">16</xref>] and PolitiFact [<xref ref-type="bibr" rid="ref17">17</xref>], which are certified by the International Fact-Checking Network. The web crawling method was employed to systematically extract articles classified under “CORONAVIRUS” and “COVID-19” from Snopes (data up to August 31, 2023) and PolitiFact (data up to July 31, 2023).</p>
          <p>Alongside the article contents, labels were collected for model training. A total of 1500 and 806 texts were extracted from Snopes and PolitiFact, respectively. Subsequently, the collected data underwent keyword filtering to align more closely with the topic.</p>
          <p>In PolitiFact, articles are categorized into 6 labels: pants-on-fire, false, barely true, half-true, mostly true, and true. In contrast, Snopes classifies articles into 14 labels: true, mostly true, mixture, mostly false, false, unproven, outdated, miscaptioned, correct-attribution, misattributed, scam, legend, labeled-satire, and lost-legend. Based on the research by Khan et al [<xref ref-type="bibr" rid="ref18">18</xref>], the labels from diverse sources were reclassified into 2 categories: genuine and fake.</p>
        </sec>
        <sec>
          <title>Governmental Bodies</title>
          <p>Government and public institution websites, such as the WHO and the Centers for Disease Control and Prevention, were also regarded as primary sources. These institutions have consistently distributed up-to-date information and guidelines throughout the pandemic, establishing them as widely acknowledged, reliable, and accurate data sources. Articles related to “long COVID” and “reinfection” were collected from the COVID-19 sections of these websites. Because the original content was often lengthy, ChatGPT [<xref ref-type="bibr" rid="ref8">8</xref>] was used to refine and reorganize the content into short claims. The structured claims ensured appropriate length and clarity.</p>
          <p>Consequently, each claim was labeled as “genuine” owing to its reputable source. The dataset used for model training encapsulated the latest information obtained through these procedural steps. <xref ref-type="table" rid="table1">Table 1</xref> presents the filtered sample counts from various data sources.</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>Sample size from different sources.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <thead>
                <tr valign="top">
                  <td>Source</td>
                  <td>Time until</td>
                  <td>Sample size, n</td>
                  <td>Fake label, n</td>
                  <td>Genuine label, n</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>CTF<sup>a</sup></td>
                  <td>~2021</td>
                  <td>1292</td>
                  <td>1130</td>
                  <td>162</td>
                </tr>
                <tr valign="top">
                  <td>Fighting an Infodemic</td>
                  <td>~2021</td>
                  <td>218</td>
                  <td>62</td>
                  <td>156</td>
                </tr>
                <tr valign="top">
                  <td>CoAID<sup>b</sup></td>
                  <td>~2020</td>
                  <td>70</td>
                  <td>0</td>
                  <td>70</td>
                </tr>
                <tr valign="top">
                  <td>FibVID<sup>c</sup></td>
                  <td>~2020</td>
                  <td>615</td>
                  <td>318</td>
                  <td>297</td>
                </tr>
                <tr valign="top">
                  <td>FaCOV<sup>d</sup></td>
                  <td>~2021</td>
                  <td>811</td>
                  <td>811</td>
                  <td>0</td>
                </tr>
                <tr valign="top">
                  <td>PolitiFact</td>
                  <td>~2023</td>
                  <td>87</td>
                  <td>42</td>
                  <td>45</td>
                </tr>
                <tr valign="top">
                  <td>Snopes</td>
                  <td>~2023</td>
                  <td>15</td>
                  <td>9</td>
                  <td>6</td>
                </tr>
                <tr valign="top">
                  <td>CDC<sup>e</sup>+WHO<sup>f</sup></td>
                  <td>~2023</td>
                  <td>58</td>
                  <td>0</td>
                  <td>58</td>
                </tr>
                <tr valign="top">
                  <td>Total</td>
                  <td>—<sup>g</sup></td>
                  <td>3166</td>
                  <td>2372</td>
                  <td>794</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table1fn1">
                <p><sup>a</sup>CTF: COVID-19 Twitter Fake News.</p>
              </fn>
              <fn id="table1fn2">
                <p><sup>b</sup>CoAID: COVID-19 Health Care Misinformation Data Set.</p>
              </fn>
              <fn id="table1fn3">
                <p><sup>c</sup>FibVID: Fake News Information-Broadcasting Data Set of COVID-19.</p>
              </fn>
              <fn id="table1fn4">
                <p><sup>d</sup>FaCOV: COVID-19 Viral News and Rumors Fact-Check Articles Data Set.</p>
              </fn>
              <fn id="table1fn5">
                <p><sup>e</sup>CDC: Centers for Disease Control and Prevention.</p>
              </fn>
              <fn id="table1fn6">
                <p><sup>f</sup>WHO: World Health Organization.</p>
              </fn>
              <fn id="table1fn7">
                <p><sup>g</sup>Not applicable.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>Data Preprocessing</title>
        <p>Ensuring the absence of duplicate entries in the merged open-source datasets was crucial. To achieve this, we cross-referenced the data with existing open-source datasets. Any identified duplicate samples were eliminated to reduce redundancy, thereby preventing potential impacts on subsequent model training and analysis performance.</p>
        <p>During the preprocessing phase, social media posts and articles, known to include emojis and external links (URLs) frequently, underwent processing to enhance their suitability for distinguishing between genuine and fake news. The <italic>tweet-preprocessor</italic> package was used to eliminate emojis and URLs from the texts because of their low distribution in classification.</p>
        <p>All the labels were encoded as “0” (stands for genuine) and “1” (stands for fake), respectively. The distribution of our dataset is illustrated in Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. As the public data sources exhibited a bias toward the “fake” category, an imbalanced label distribution was observed in the dataset. Therefore, we employed stratified sampling, allocating 10% of the data for testing and 90% for training. This sampling approach ensures consistent label proportions in both the test and training sets, thereby preventing the potential insufficiency of “genuine” samples that may arise from random sampling.</p>
        <p>The preprocessing pipeline followed the aforementioned sequence. First, using pandas, duplicate entries were identified and removed, retaining one instance per unique sample. Second, emojis and URLs were removed with <italic>tweet-preprocessor</italic> to enhance textual clarity and clean the text data. Third, we carried out label encoding. Labels were encoded as “0” for genuine and “1” for fake news. Fourth, we split the dataset. Given the imbalanced nature of our data, which had a higher number of “fake” samples, we employed stratified sampling (using <italic>scikit-learn</italic>) to ensure consistent label proportions. Specifically, 90% (2634/2927) of the data were allocated for training, and 10% (293/2927) were allocated for testing.</p>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <sec>
          <title>Overview</title>
          <p>The data analysis included keyword occurrences, sentiment analysis, and subjectivity values. The analysis aimed to get deeper insights from the collected dataset and identified different distributions between genuine and fake articles.</p>
        </sec>
        <sec>
          <title>Keyword Occurrences</title>
          <p>By analyzing the frequency of certain keywords in the data, we were able to gain a better understanding of the public’s interests. In Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, we can see that over 50% of the fake samples contain the term “immune,” while only a few genuine samples feature it. The term “recovery” is primarily used in genuine samples but is also present in a similar proportion of fake samples. Other keywords such as “variant,” “complication,” and “chronic” also appear more frequently in fake samples. This suggests that such terms are often used to spread fake news.</p>
        </sec>
        <sec>
          <title>Sentiment Analysis</title>
          <p>Sentiment analysis can explain an article’s tone, whether positive, negative, or neutral. Additionally, it can estimate the subjectivity of the text, distinguishing between genuine information and our opinions. To gain deeper insights into the textual data sourced from open-source databases and fact-checking websites, the <italic>TextBlob</italic> package [<xref ref-type="bibr" rid="ref19">19</xref>] was used for sentiment analysis. <italic>TextBlob</italic>’s sentiment analysis assigns a polarity value ranging from –1 to 1, indicating the sentiment from entirely negative to entirely positive. To classify sentiments based on polarity, we categorized them into the 5 groups, outlined in <xref ref-type="boxed-text" rid="box2">Textbox 2</xref>.</p>
          <p>The sentiment analysis revealed the distribution of sentiments within the collected textual data. In Figure S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, most of the content falls into the “neutral” category. Approximately 65% (1902/2927) of the content in fake texts and 53% (300/566) in genuine texts were classified as “neutral.” Genuine texts showed a higher proportion (204/566, 36%) in the “slightly positive” category compared to fake texts (703/2927, 24%), indicating a tendency for genuine content to include more positive language. Conversely, there were no significant disparities between the distributions of the “strongly negative,” “slightly negative,” and “strongly positive” sentiment categories across the 2 labels.</p>
          <p>In addition to sentiment analysis, <italic>TextBlob</italic> also provides a subjectivity value ranging from 0 to 1, indicating the degree of subjectivity within the text, ranging from entirely objective to entirely subjective. To simplify comprehension, we categorized the degree of subjectivity into the following 5 groups, as outlined in <xref ref-type="boxed-text" rid="box3">Textbox 3</xref>.</p>
          <boxed-text id="box2" position="float">
            <title>Sentiment classification based on polarity score ranges.</title>
            <list list-type="bullet">
              <list-item>
                <p>Strongly negative: polarity values between –1 and –0.5</p>
              </list-item>
              <list-item>
                <p>Slightly negative: polarity values between –0.5 and –0.1</p>
              </list-item>
              <list-item>
                <p>Neutral: polarity values between –0.1 and 0.1</p>
              </list-item>
              <list-item>
                <p>Slightly positive: polarity values between 0.1 and 0.5</p>
              </list-item>
              <list-item>
                <p>Strongly positive: polarity values between 0.5 and 1</p>
              </list-item>
            </list>
          </boxed-text>
          <boxed-text id="box3" position="float">
            <title>Subjectivity classification based on score ranges.</title>
            <list list-type="bullet">
              <list-item>
                <p>Low subjectivity: values between 0 and 0.2</p>
              </list-item>
              <list-item>
                <p>Medium-low subjectivity: values between 0.2 and 0.4</p>
              </list-item>
              <list-item>
                <p>Medium subjectivity: values between 0.4 and 0.6</p>
              </list-item>
              <list-item>
                <p>Medium-high subjectivity: values between 0.6 and 0.8</p>
              </list-item>
              <list-item>
                <p>High subjectivity: values between 0.8 and 1</p>
              </list-item>
            </list>
          </boxed-text>
          <p>Based on the analysis illustrated in Figure S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, the distribution of subjectivity levels between genuine and fake texts appeared similar, mainly concentrated in the “medium subjectivity” category. Genuine texts had 38% (215/566) of their content in this category, while fake texts contained 37% (1083/2927). There was a 5% higher prevalence of fake texts in the “low subjectivity” category than in genuine texts. Conversely, in the “medium-high subjectivity” category, genuine texts surpassed fake texts by a margin of 3%.</p>
          <p>While minor differences were observed in the distribution of sentiment and subjectivity between genuine and fake texts, these differences may not serve as definitive classification criteria. Moreover, sentiment analysis encounters challenges in natural language comprehension, such as accurately identifying sarcasm. Therefore, more precise approaches, such as machine learning algorithms and deep learning models, are required to differentiate between genuine and fake news concerning long COVID and reinfections.</p>
        </sec>
      </sec>
      <sec>
        <title>Overview of Models</title>
        <sec>
          <title>Comparative Evaluation</title>
          <p>In this study, a thorough comparison was executed using various classification methods. Traditional machine learning algorithms that used text content features were employed to establish a baseline. Deep learning models, from the hierarchical attention network (HAN) to the bidirectional encoder representations from transformers (BERT) series, were also used to take advantage of the advanced abilities to handle complex textual data information. Additionally, embedding models based on LLMs were used to compare the performance between fee-required models and open-source deep models. The experimental approach helped us evaluate different methods in distinguishing articles with genuine and fake information, especially regarding long-term COVID-19 and reinfections.</p>
        </sec>
        <sec>
          <title>Support Vector Machine</title>
          <p>To establish a baseline model, this study initially selected SVM [<xref ref-type="bibr" rid="ref20">20</xref>]. In the text classification, linear classifiers are commonly considered strong baseline models. By comparing the performance of the linear classifier with that of deep learning models, we can verify their effectiveness when fine-tuned and employed [<xref ref-type="bibr" rid="ref21">21</xref>]. To apply SVM for classification tasks, unigram TF-IDF features were generated from the training set data, and the SVM model was trained using these features for binary classification.</p>
        </sec>
        <sec>
          <title>Hierarchical Attention Network</title>
          <p>HAN [<xref ref-type="bibr" rid="ref22">22</xref>] integrates attention mechanisms at multiple levels, focusing on word and sentence levels, to capture diverse hierarchical structures within documents. We used recurrent neural networks combined with word-attention and sentence-attention layers for text classification, resulting in state-of-the-art performance across 6 datasets. After training this model on the training set, we compared it with the proposed method.</p>
        </sec>
        <sec>
          <title>Pretrained Language Models</title>
          <p>In addition to deep learning models like HAN, this study also fine-tuned PLMs to use their text-understanding capabilities for fake news detection. PLMs, including BERT [<xref ref-type="bibr" rid="ref23">23</xref>], RoBERTa [<xref ref-type="bibr" rid="ref24">24</xref>], decoding-enhanced BERT with disentangled attention (DeBERTa) [<xref ref-type="bibr" rid="ref25">25</xref>], and XLNet [<xref ref-type="bibr" rid="ref26">26</xref>], are state-of-the-art models widely used in NLP tasks. BERT, introduced by Google, uses masked language modeling and next-sentence prediction to generate contextualized word representations. RoBERTa, an enhancement of BERT by Meta, removes next-sentence prediction and incorporates optimization strategies for improved performance. DeBERTa introduces disentangled attention and enhanced mask decoder mechanisms to further refine self-attention, achieving superior results across NLP tasks. XLNet, developed by Google, employs permutation language modeling and the Transformer-XL architecture to effectively understand bidirectional contextual comprehension, especially in processing long texts, surpassing BERT and RoBERTa in various benchmarks.</p>
        </sec>
        <sec>
          <title>Large Language Models</title>
          <p>The success of LLMs in recent years has significantly advanced applications in the field of NLP. For the classification task in this study, we used OpenAI’s generative pretrained transformer (GPT) embedding model “text-embedding-ada-002” [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>] and Google’s Gemini embedding model [<xref ref-type="bibr" rid="ref29">29</xref>] to transform the training set data, combining it with machine learning methods such as SVM for training. Integrating knowledge from large language models and the training dataset helped improve the text classification predictions. Furthermore, we directly applied ChatGPT-4 [<xref ref-type="bibr" rid="ref30">30</xref>], the state-of-the-art LLM, to infer the texts in the test set. This allowed us to compare the performance of LLMs with our proposed method.</p>
        </sec>
      </sec>
      <sec>
        <title>Fuzzy Rank-Based Ensemble Technique</title>
        <p>Ensemble learning combines the strengths of individual models to yield predictions that outperform any contributing model. This study proposed an approach to enhance prediction performance by incorporating the Gompertz function into ensemble learning techniques.</p>
        <p>We employed a fuzzy rank-based ensemble technique [<xref ref-type="bibr" rid="ref31">31</xref>], where the confidence of each classifier in its predictions was given priority for each test case. This differed from traditional ensemble methods like the average or weighted average rules, which assign predefined fixed weights to classifiers. Moreover, the reparameterized Gompertz function was used to compute the fuzzy ranks of each pretrained model for detection. Incorporating state-of-the-art PLMs like RoBERTa, DeBERTa, and XLNet further improved our approach. These PLMs bring advanced language understanding abilities to the ensemble, contributing to its robust performance. Afterward, the predictions of the 3 models were fused. <xref rid="figure1" ref-type="fig">Figure 1</xref> outlines the process of the proposed method.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Diagram of the fuzzy ensemble process using multiple language models. DeBERTa: decoding-enhanced bidirectional encoder representations from transformers with disentangled attention; RoBERTa: Robustly optimized bidirectional encoder representations from transformers pretraining approach.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e73601_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Implementation</title>
        <p>The data were randomly split using the <italic>scikit-learn</italic> [<xref ref-type="bibr" rid="ref32">32</xref>] package. This study employed 5-fold cross-validation to train the SVM and determine the best-performing model for the final evaluation. The PLMs were fine-tuned using Hugging Face checkpoints, with the AdamW optimizer [<xref ref-type="bibr" rid="ref33">33</xref>] and cross-entropy loss. The learning rate was set to 2e-5, and the training was carried out for 20 epochs. HAN was trained for the same number of epochs with its default settings. The training procedure was repeated 5 times with different random initial weights, and checkpoints of models were selected for final evaluation based on the highest validation <italic>F</italic><sub>1</sub>-score [<xref ref-type="bibr" rid="ref34">34</xref>]. HAN was trained on the Tesla T4, while all other deep models were trained on the RTX A5000.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>No ethical review was required for this study because it did not involve human participants.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>The model performance was evaluated using well-known metrics: accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, and area under the curve (ROC). The results shown in <xref ref-type="table" rid="table2">Table 2</xref> compare the proposed fuzzy method with other approaches, including traditional machine learning algorithms, deep learning networks, pretrained networks, and state-of-the-art LLMs.</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Comparison of the performance of different models for test data using various evaluation metrics includes accuracy, precision, recall, F1-score, and AUC.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="200"/>
          <col width="160"/>
          <col width="160"/>
          <col width="160"/>
          <col width="160"/>
          <col width="160"/>
          <thead>
            <tr valign="top">
              <td>Model</td>
              <td>Accuracy (%)</td>
              <td>Precision (%)</td>
              <td>Recall (%)</td>
              <td><italic>F</italic><sub>1</sub>-score (%)</td>
              <td>AUC<sup>a</sup> (%)</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>TF-IDF<sup>b</sup> + SVM<sup>c</sup></td>
              <td>89.08</td>
              <td>91.46</td>
              <td>95.34</td>
              <td>93.36</td>
              <td>92.02</td>
            </tr>
            <tr valign="top">
              <td>HAN<sup>d</sup></td>
              <td>90.78</td>
              <td>94.09</td>
              <td>94.49</td>
              <td>94.29</td>
              <td>93.09</td>
            </tr>
            <tr valign="top">
              <td>BERT<sup>e</sup></td>
              <td>91.13</td>
              <td>93.75</td>
              <td>95.34</td>
              <td>94.54</td>
              <td>96.31</td>
            </tr>
            <tr valign="top">
              <td>RoBERTa<sup>f,g</sup></td>
              <td>91.81</td>
              <td>93.44</td>
              <td>96.61</td>
              <td>95.00</td>
              <td>96.59</td>
            </tr>
            <tr valign="top">
              <td>DeBERTa<sup>h,g</sup></td>
              <td>91.81</td>
              <td>94.17</td>
              <td>95.76</td>
              <td>94.96</td>
              <td>95.75</td>
            </tr>
            <tr valign="top">
              <td>XLNet</td>
              <td>92.83</td>
              <td>94.24</td>
              <td>97.03</td>
              <td>95.62</td>
              <td>96.12</td>
            </tr>
            <tr valign="top">
              <td>GPT<sup>i</sup> embedding + SVM</td>
              <td>92.83</td>
              <td>93.52</td>
              <td>97.88</td>
              <td>95.65</td>
              <td>94.88</td>
            </tr>
            <tr valign="top">
              <td>Gemini embedding + SVM</td>
              <td>91.47</td>
              <td>92.71</td>
              <td>97.03</td>
              <td>94.82</td>
              <td>93.27</td>
            </tr>
            <tr valign="top">
              <td>GPT-4</td>
              <td>82.25</td>
              <td>91.82</td>
              <td>85.59</td>
              <td>88.60</td>
              <td>N/A<sup>j</sup></td>
            </tr>
            <tr valign="top">
              <td>Soft voting</td>
              <td>93.17</td>
              <td>94.63</td>
              <td>97.03</td>
              <td>95.82</td>
              <td>97.14</td>
            </tr>
            <tr valign="top">
              <td>Fuzzy rank-based method</td>
              <td>93.52</td>
              <td>94.65</td>
              <td>97.46</td>
              <td>96.03</td>
              <td>97.15</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table2fn1">
            <p><sup>a</sup>AUC: area under the curve.</p>
          </fn>
          <fn id="table2fn2">
            <p><sup>b</sup>TF-IDF: term frequency–inverse document frequency</p>
          </fn>
          <fn id="table2fn3">
            <p><sup>c</sup>SVM: support vector machine.</p>
          </fn>
          <fn id="table2fn4">
            <p><sup>d</sup>HAN: hierarchical attention network.</p>
          </fn>
          <fn id="table2fn5">
            <p><sup>e</sup>BERT: bidirectional encoder representations from transformers.</p>
          </fn>
          <fn id="table2fn6">
            <p><sup>f</sup>RoBERT: robustly optimized BERT pretraining approach.</p>
          </fn>
          <fn id="table2fn7">
            <p><sup>g</sup>These models were used in the fuzzy and soft voting methods.</p>
          </fn>
          <fn id="table2fn8">
            <p><sup>h</sup>DeBERTa: decoding-enhanced BERT with disentangled attention.</p>
          </fn>
          <fn id="table2fn9">
            <p><sup>i</sup>GPT: generative pretrained transformer.</p>
          </fn>
          <fn id="table2fn10">
            <p><sup>j</sup>N/A: not applicable.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
      <p>TF-IDF with SVM achieved an accuracy of 89.08%, a precision of 91.46%, a recall of 95.34%, an <italic>F</italic><sub>1</sub>-score of 93.36%, and an AUC (area under the curve) of 92.02%. While these metrics indicate acceptable performance, they were still beaten by attention-based deep models like the HAN and BERT series in the experiments.</p>
      <p>When comparing HAN with TF-IDF and BERT, we observed that HAN outperformed TF-IDF in terms of accuracy (90.78% vs 89.08%), <italic>F</italic><sub>1</sub>-score (94.29% vs 93.36%), and AUC (93.09% vs 92.02%), indicating its superior performance in the fake news detection task. However, BERT achieved slightly higher accuracy and AUC than HAN.</p>
      <p>The fuzzy method achieved impressive results on the test set, with an accuracy of 93.52%, a precision of 94.65%, an <italic>F</italic><sub>1</sub>-score of 96.03%, and an AUC of 97.15%, representing the highest performance among all the evaluated methods. The conventional soft voting ensemble method on 3 PLMs also yielded a high AUC of 97.14%, while combining the GPT embedding model with SVM yielded the highest recall of 97.88%. This experiment demonstrates the effectiveness of the proposed approach across multiple evaluation metrics, showcasing its potential for robust classification tasks. <xref rid="figure2" ref-type="fig">Figure 2</xref> shows the confusion matrix with the top 4 <italic>F</italic><sub>1</sub>-scores in the experiment.</p>
      <p><xref ref-type="table" rid="table3">Table 3</xref> shows the actual case of using the fuzzy method to detect fake and genuine news that was not included in the test set. We used 4 isolated samples, 2 genuine and 2 fake, varying in length. The result shows that our method accurately detected the genuineness of the content regardless of its length, revealing its robustness across different text lengths.</p>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Confusion matrix of (A) soft voting method, (B) fuzzy rank-based method, (C) generative pretrained transformer (GPT) embedding method, and (D) XLNet on the held-out test dataset. SVM: support vector machine.</p>
        </caption>
        <graphic xlink:href="jmir_v27i1e73601_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Real case inference using fuzzy rank ensemble method.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="740"/>
          <col width="70"/>
          <col width="90"/>
          <col width="100"/>
          <thead>
            <tr valign="top">
              <td>Content/main claim</td>
              <td>Length</td>
              <td>Prediction</td>
              <td>Ground truth</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>A German study has revealed that long COVID is linked to the vaccine.</td>
              <td>15</td>
              <td>Fake</td>
              <td>Fake</td>
            </tr>
            <tr valign="top">
              <td>COVID-19 vaccination before infection is strongly linked to reduced risk of developing long COVID.</td>
              <td>15</td>
              <td>Genuine</td>
              <td>Genuine</td>
            </tr>
            <tr valign="top">
              <td>Long COVID's causes and risk factors remain a subject of ongoing research, with potential factors including reactivation of SARS-CoV-2 particles, overactive immune responses, and the development of autoantibodies attacking organs. Certain groups, such as those with severe COVID-19 history, underlying health conditions, or lacking vaccination, are at higher risk, alongside other factors like sex, age, initial immune response, and viral variants. Health inequities may also contribute, especially affecting racial or ethnic minority groups and individuals with disabilities.</td>
              <td>367</td>
              <td>Genuine</td>
              <td>Genuine</td>
            </tr>
            <tr valign="top">
              <td>While Omicron's subvariants find new ways to evade vaccines and destabilize immune systems, another pandemic has overwhelmed officials who are supposed to be in charge of public health. In any case, COVID-19, a novel virus that can wreak havoc with vital organs in the body, continues to evolve at a furious pace. In response, officials have largely abandoned any coherent response, including masking, testing, tracing, and even basic data collection. Yes, the people have been abandoned. So don't expect “normal” to return to your hospital, your airport, your nation, your community, or your life anytime soon.</td>
              <td>469</td>
              <td>Fake</td>
              <td>Fake</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This study presents a novel approach for detecting misinformation on the long-term effects of COVID-19 by combining state-of-the-art PLMs with a fuzzy rank-based ensemble method that incorporates the Gompertz function. The experimental results demonstrate that using language models, particularly XLNet, for fake news detection outperforms traditional TF-IDF features combined with SVM or deep models like HAN. Some insights can be observed in <xref ref-type="table" rid="table4">Table 4</xref>, which shows a trend where models with more parameters tend to achieve better classification accuracy. Models such as BERT, RoBERTa, DeBERTa, and XLNet, which possess significantly more parameters than HAN, demonstrated superior performance across various evaluation metrics. Although XLNet had fewer parameters than some models in the BERT series, it still outperformed them in the experiment. This result suggests that classification effectiveness depends not only on the number of parameters but also on the model architecture, training methods, and optimization techniques. XLNet’s success can be attributed to its permutation language modeling approach, which enhances bidirectional context understanding while reducing some limitations in BERT. The fuzzy rank-based ensemble method further enhanced performance by dynamically weighting individual model predictions based on their confidence levels for each test case. This adaptive fusion resulted in an accuracy of 93.52% and an <italic>F</italic><sub>1</sub>-score of 96.03%.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Parameter counts for each deep model used in the experiment.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>Model</td>
                <td>Parameter counts</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>HAN<sup>a</sup></td>
                <td>2,343,202</td>
              </tr>
              <tr valign="top">
                <td>BERT<sup>b</sup></td>
                <td>109,483,778</td>
              </tr>
              <tr valign="top">
                <td>RoBERTa<sup>c</sup></td>
                <td>124,647,170</td>
              </tr>
              <tr valign="top">
                <td>DeBERTa<sup>d</sup></td>
                <td>139,193,858</td>
              </tr>
              <tr valign="top">
                <td>XLNet</td>
                <td>117,310,466</td>
              </tr>
              <tr valign="top">
                <td>GPT<sup>e</sup> embedding</td>
                <td>Unknown</td>
              </tr>
              <tr valign="top">
                <td>Gemini-embedding</td>
                <td>Unknown</td>
              </tr>
              <tr valign="top">
                <td>GPT-4</td>
                <td>Unknown</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>HAN: hierarchical attention network.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>BERT: bidirectional encoder representations from transformers.</p>
            </fn>
            <fn id="table4fn3">
              <p><sup>c</sup>RoBERTa: robustly optimized BERT pretraining approach.</p>
            </fn>
            <fn id="table4fn4">
              <p><sup>d</sup>DeBERTa: decoding-enhanced BERT with disentangled attention.</p>
            </fn>
            <fn id="table4fn5">
              <p><sup>e</sup>GPT: generative pretrained transformer.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>Additionally, using LLMs’ embedding models led to superior performance than traditional TF-IDF features. In this study, the GPT embedding model performed slightly better than Gemini, possibly due to differences in the length of the embedding vectors. GPT defaulted to 1536, while Gemini defaulted to 768. Despite yielding acceptable outcomes, directly using GPT-4 fell short compared to training SVM on vector-transformed training data using embedding models. This result implies that LLMs still benefit from training data in fake news detection tasks. Although the LLMs’ embedding models showed remarkable performance in the experiment, accessing this kind of embedding model via an application programming interface incurs charges. However, the fuzzy method can be combined with open-source PLMs to achieve even better results. Moreover, compared to using a single language model or soft voting method with predefined weights, the fuzzy fusion-based technique allowed us to determine ensemble model weights for each test case, resulting in superior performance.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>One limitation of the study is the presence of data imbalance, which suggests a potential bias toward the prevalence of fake information on the internet. Addressing this issue would require gathering a more extensive and up-to-date dataset of genuine information to achieve a better balance and representativeness in the training data.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This study provides a comprehensive investigation into the detection of COVID-19–related misinformation by leveraging advanced deep learning techniques. An in-depth analysis of open-source datasets related to long COVID revealed distinct distribution patterns between genuine and fake articles, offering valuable insights into the nature and propagation of misinformation. A systematic comparison between traditional text classification methods and state-of-the-art PLMs highlighted the strengths and limitations of each approach when applied to misinformation detection. Furthermore, the development of a novel ensemble method that integrates a fuzzy rank approach with the Gompertz function significantly enhanced the accuracy and robustness of the text classification.</p>
        <p>By developing an ensemble method that integrates fuzzy ranking with the Gompertz function, this research introduces a novel perspective on enhancing classification stability and accuracy. The proposed approach moves beyond simple majority voting or static weight assignments by incorporating dynamic confidence-based fusion, offering a refined framework for complex classification tasks in NLP.</p>
        <p>Beyond its methodological contributions, this study holds practical relevance for addressing real-world challenges. The proposed detection system, which performs well using text-only inputs, demonstrates strong potential as a scalable tool for the real-time monitoring of internet misinformation. By helping to distinguish between credible and misleading information, the system may support public health efforts, reduce confusion among the public, and contribute to more transparent digital communication in health-related discourse.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Additional figures.</p>
        <media xlink:href="jmir_v27i1e73601_app1.docx" xlink:title="DOCX File , 243 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BERT</term>
          <def>
            <p>bidirectional encoder representations from transformers</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DeBERTa</term>
          <def>
            <p>decoding-enhanced BERT with disentangled attention</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">GPT</term>
          <def>
            <p>generative pretrained transformer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">HAN</term>
          <def>
            <p>hierarchical attention network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PLM</term>
          <def>
            <p>pretrained language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">RoBERTa</term>
          <def>
            <p>robustly optimized BERT pretraining approach</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">TF-IDF</term>
          <def>
            <p>term frequency–inverse document frequency</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">WHO</term>
          <def>
            <p>World Health Organization</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We used ChatGPT (OpenAI) to improve the grammar and clarity of English expressions in the manuscript. All content was reviewed and revised by the authors to ensure accuracy and appropriateness for academic publication. This study was financially supported by the National Science and Technology Council (NSTC) of Taiwan (NSTC 112-2314-B-A49-049-MY3, 113-2634-F-A49-003, and 113-2321-B-A49-011).</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The code and datasets generated or analyzed during this study are not publicly available due to an ongoing patent application but may be available from the corresponding author on reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>JAC conceptualized the study, with oversight from CLH and CYW. JAC curated the data and conducted the formal analysis, with support from CLH. CLH managed funding acquisition. CLH, CYW, and JAC led the investigation. JAC designed the methodology, which CLH, CYW, and WCC oversaw. CLH, CYW, and JAC administered the project. JAC provided the resources and developed the software. CLH and CYW supervised the work. CLH and WCC validated the findings. JAC created the visualizations. JAC and CLH wrote the original draft, while CLH, CYW, and WCC reviewed and edited the manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Infodemic</article-title>
          <source>World Health Organization</source>
          <access-date>2023-09-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/health-topics/infodemic">https://www.who.int/health-topics/infodemic</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>HE</given-names>
            </name>
            <name name-style="western">
              <surname>McCorkell</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Vogel</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>Long COVID: major findings, mechanisms and recommendations</article-title>
          <source>Nat Rev Microbiol</source>
          <year>2023</year>
          <month>03</month>
          <day>13</day>
          <volume>21</volume>
          <issue>3</issue>
          <fpage>133</fpage>
          <lpage>146</lpage>
          <pub-id pub-id-type="doi">10.1038/s41579-022-00846-2</pub-id>
          <pub-id pub-id-type="medline">36639608</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41579-022-00846-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC9839201</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bowe</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Aly</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Acute and postacute sequelae associated with SARS-CoV-2 reinfection</article-title>
          <source>Nat Med</source>
          <year>2022</year>
          <month>11</month>
          <day>10</day>
          <volume>28</volume>
          <issue>11</issue>
          <fpage>2398</fpage>
          <lpage>2405</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36357676"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41591-022-02051-3</pub-id>
          <pub-id pub-id-type="medline">36357676</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-022-02051-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC9671810</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patwa</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pykl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Guptha</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Kumari</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Akhtar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ekbal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chakraborty</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Fighting an Infodemic: COVID-19 Fake News Dataset</article-title>
          <year>2021</year>
          <conf-name>International Workshop on ​Combating On​line Ho​st​ile Posts in ​Regional Languages dur​ing Emerge​ncy Si​tuation</conf-name>
          <conf-date>February 8</conf-date>
          <conf-loc>Online</conf-loc>
          <publisher-name>Springer, Cham</publisher-name>
          <fpage>21</fpage>
          <lpage>29</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-73696-5_3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Das</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Basak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutta</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A Heuristic-driven Ensemble Framework for COVID-19 Fake News Detection</article-title>
          <source>arXix</source>
          <comment>Preprint posted online on January 10, 2021</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2101.03545"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-030-73696-5_16</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Paka</surname>
              <given-names>WS</given-names>
            </name>
            <name name-style="western">
              <surname>Bansal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kaushik</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sengupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chakraborty</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Cross-SEAN: a cross-stitch semi-supervised neural attention model for COVID-19 fake news detection</article-title>
          <source>Appl Soft Comput</source>
          <year>2021</year>
          <month>08</month>
          <volume>107</volume>
          <fpage>107393</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36568256"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.asoc.2021.107393</pub-id>
          <pub-id pub-id-type="medline">36568256</pub-id>
          <pub-id pub-id-type="pii">S1568-4946(21)00316-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC9761197</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zafarani</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>CHECKED: Chinese COVID-19 fake news dataset</article-title>
          <source>Soc Netw Anal Min</source>
          <year>2021</year>
          <month>06</month>
          <day>22</day>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>58</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34178179"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13278-021-00766-8</pub-id>
          <pub-id pub-id-type="medline">34178179</pub-id>
          <pub-id pub-id-type="pii">766</pub-id>
          <pub-id pub-id-type="pmcid">PMC8217979</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <source>ChatGPT</source>
          <access-date>2023-10-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://chat.openai.com">https://chat.openai.com</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bangyal</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bashir</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ubakanma</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Polarity classification of Twitter data using machine learning approach</article-title>
          <year>2023</year>
          <conf-name>2023 International Conference on Human-Centered Cognitive Systems (HCCS 2023)</conf-name>
          <conf-date>December 16-17, 2023</conf-date>
          <conf-loc>Cardiff, United Kingdom</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/hccs59561.2023.10452557</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bangyal</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ashraf</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shakir</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ur Rehaman</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>A review on consumer behavior towards online shopping using machine learning</article-title>
          <source>Int J Emerg Multidiscip Comput Sci Artif Intell</source>
          <year>2022</year>
          <month>05</month>
          <day>30</day>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>105</fpage>
          <lpage>114</lpage>
          <pub-id pub-id-type="doi">10.54938/ijemdcsai.2022.01.1.84</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shafqat</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bangyal</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Almakhles</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Opinion mining of politics and inflation using Roman Urdu dataset</article-title>
          <source>Hum-Centered Cogn Syst HCCS</source>
          <year>2022</year>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/hccs55241.2022.10090276</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="web">
          <article-title>covid_fake_news/data at main · diptamath/covid_fake_news</article-title>
          <source>GitHub</source>
          <access-date>2023-10-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/diptamath/covid_fake_news/tree/main/data">https://github.com/diptamath/covid_fake_news/tree/main/data</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>CoAID: COVID-19 healthcare misinformation dataset</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on May 22, 2020</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2006.00885"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2006.00885</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Aum</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>FibVID: Comprehensive fake news diffusion dataset during the COVID-19 period</article-title>
          <source>Telemat Inform</source>
          <year>2021</year>
          <month>11</month>
          <volume>64</volume>
          <fpage>101688</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36567815"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.tele.2021.101688</pub-id>
          <pub-id pub-id-type="medline">36567815</pub-id>
          <pub-id pub-id-type="pii">S0736-5853(21)00127-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC9759652</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Datta</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>FaCov: COVID-19 viral news and rumors fact-check articles dataset</article-title>
          <source>Proc Int AAAI Conf Web Soc Media</source>
          <year>2022</year>
          <conf-name>6th International AAAI Conference on Web and Social Media</conf-name>
          <conf-date>June 6-9, 2022</conf-date>
          <conf-loc>Atlanta, GA</conf-loc>
          <pub-id pub-id-type="doi">10.1609/icwsm.v16i1.19383</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <article-title>COVID archives</article-title>
          <source>Snopes</source>
          <access-date>2023-10-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.snopes.com/tag/covid-19/">https://www.snopes.com/tag/covid-19/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <article-title>Fact-checks</article-title>
          <source>PolitiFact</source>
          <access-date>2023-10-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.politifact.com/factchecks/list/?category=coronavirus">https://www.politifact.com/factchecks/list/?category=coronavirus</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Khondaker</surname>
              <given-names>MTI</given-names>
            </name>
            <name name-style="western">
              <surname>Afroz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Uddin</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A benchmark study of machine learning models for online fake news detection</article-title>
          <source>Mach Learn Appl</source>
          <year>2021</year>
          <month>06</month>
          <volume>4</volume>
          <fpage>100032</fpage>
          <pub-id pub-id-type="doi">10.1016/j.mlwa.2021.100032</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="web">
          <article-title>TextBlob: simplified text processing</article-title>
          <source>TextBlob</source>
          <access-date>2023-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://textblob.readthedocs.io/en/dev/">https://textblob.readthedocs.io/en/dev/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cortes</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vapnik</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Support-vector networks</article-title>
          <source>Mach Learn</source>
          <year>1995</year>
          <month>9</month>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>273</fpage>
          <lpage>297</lpage>
          <pub-id pub-id-type="doi">10.1007/BF00994018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Linear classifier: an often-forgotten baseline for text classification</article-title>
          <year>2023</year>
          <conf-name>61st Annual Meeting of the Association for Computational Linguistics</conf-name>
          <conf-date>July 9-14, 2023</conf-date>
          <conf-loc>Toronto, ON</conf-loc>
          <pub-id pub-id-type="doi">10.18653/v1/2023.acl-short.160</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dyer</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Smola</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hovy</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Hierarchical attention networks for document classification</article-title>
          <year>2016</year>
          <conf-name>2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT 2016)</conf-name>
          <conf-date>June 12-17, 2016</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
          <pub-id pub-id-type="doi">10.18653/v1/n16-1174</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Devlin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>M-W</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Toutanova</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>BERT: Pre-training of deep bidirectional transformers for language understanding</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on May 24, 2019</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1810.04805"/>
          </comment>
          <pub-id pub-id-type="doi">10.5260/chara.21.2.8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ott</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Goyal</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zettlemoyer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Stoyanov</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>RoBERTa: A robustly optimized BERT pretraining approach</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on Jul 26, 2019</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1907.11692"/>
          </comment>
          <pub-id pub-id-type="doi">10.5260/chara.21.2.8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>DeBERTa: decoding-enhanced BERT with disentangled attention</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on October 6, 2021</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2006.03654"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2006.03654</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Carbonell</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Salakhutdinov</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>XLNet: generalized autoregressive pretraining for language understanding</article-title>
          <year>2019</year>
          <conf-name>Annual Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 8-14, 2019</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.nips.cc/paper_files/paper/2019/hash/dc6a7e655d7e5840e66733e9ee67cc69-Abstract.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <source>OpenAI</source>
          <access-date>2023-12-04</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://platform.openai.com">https://platform.openai.com</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <article-title>New and improved embedding model</article-title>
          <source>OpenAI</source>
          <access-date>2024-03-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/blog/new-and-improved-embedding-model">https://openai.com/blog/new-and-improved-embedding-model</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>Gemini Team</collab>
            <name name-style="western">
              <surname>Anil</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Borgeaud</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Alayrac</surname>
              <given-names>J-b</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Soricut</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Schalkwyk</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>Am</given-names>
            </name>
            <name name-style="western">
              <surname>Hauth</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Millican</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Silver</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Petrov</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Antonoglou</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Schrittwieser</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Glaese</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pitler</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lillicrap</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lazaridou</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Firat</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Molloy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Isard</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Barham</surname>
              <given-names>Pr</given-names>
            </name>
            <name name-style="western">
              <surname>Hennigan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Viola</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Reynolds</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Doherty</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rutherford</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Moreira</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ayoub</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Goel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tucker</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Piqueras</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Krikun</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Barr</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Savinov</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Danihelka</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Roelofs</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Andreassen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>von</surname>
              <given-names>GT</given-names>
            </name>
            <name name-style="western">
              <surname>Yagati</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kazemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Khalman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sygnowski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Frechette</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Culp</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Proleev</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Luan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lottes</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schucher</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lebron</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Rrustemi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Clay</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Crone</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kocisky</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Perz</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Bloniarz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rae</surname>
              <given-names>Jw</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sifre</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Maggioni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alcober</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Garrette</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Barnes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Thakoor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Austin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Barth-Maron</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chaabouni</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fatiha</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ahuja</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cogan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Grimstad</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hartman</surname>
              <given-names>Aj</given-names>
            </name>
            <name name-style="western">
              <surname>Chadwick</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tomar</surname>
              <given-names>Gs</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Senter</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Taropa</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Pillai</surname>
              <given-names>Ts</given-names>
            </name>
            <name name-style="western">
              <surname>Devlin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Laskin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Casas</surname>
              <given-names>DdL</given-names>
            </name>
            <name name-style="western">
              <surname>Valter</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Blanco</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Badia</surname>
              <given-names>Ap</given-names>
            </name>
            <name name-style="western">
              <surname>Reitter</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rivera</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Brin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Surita</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Labanowski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Parisotto</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Olszewska</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Addanki</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Miech</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Louis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shafey</surname>
              <given-names>Le</given-names>
            </name>
            <name name-style="western">
              <surname>Teplyashin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Catt</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Attaluri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Balaguer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xiang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ashwood</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Briukhov</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Webson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ganapathy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sanghavi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kannan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>M-w</given-names>
            </name>
            <name name-style="western">
              <surname>Stjerngren</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Djolonga</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bapna</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Aitchison</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pejman</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Michalewski</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Love</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ahn</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bloxwich</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Humphreys</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sellam</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bradbury</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Godbole</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Samangooei</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Damoc</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kaskasoli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>SMR</given-names>
            </name>
            <name name-style="western">
              <surname>Vasudevan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Riesa</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lepikhin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tanburn</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Srinivasan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hodkinson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shyam</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ferret</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hand</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Garg</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Paine</surname>
              <given-names>Tl</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Giang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Neitz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abbas</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>York</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reid</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cole</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chowdhery</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Rogozi?ska</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nikolaev</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sprechmann</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nado</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zilka</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Prost</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Monteiro</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Welty</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Newlan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Allamanis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Ch</given-names>
            </name>
            <name name-style="western">
              <surname>de</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Gilmer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Saroufim</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rijhwani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shrivastava</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Baddepudi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Goldin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ozturel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cassirer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sohn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sachan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Amplayo</surname>
              <given-names>Rk</given-names>
            </name>
            <name name-style="western">
              <surname>Swanson</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Petrova</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Narayan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Guez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Brahma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Landon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Villela</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Rahtz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Giménez</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yeung</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Keeling</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Georgiev</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mincu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Haykal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Saputro</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Vodrahalli</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cankara</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fernando</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hawkins</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Neyshabur</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hutter</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Castro-Ros</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Driessche</surname>
              <given-names>Gvd</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Komarek</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>McIlroy</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lu?i?</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Farhan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Sharman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Natsev</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bansal</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shakeri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Butterfield</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rubenstein</surname>
              <given-names>Pk</given-names>
            </name>
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mensch</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Soparkar</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lenc</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pope</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Maggiore</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kay</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jhakra</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Maynez</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Phuong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tobin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tacchetti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Trebacz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Robinson</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Katariya</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Riedel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ghelani</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Aroyo</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Slone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Houlsby</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Xiong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gribovskaya</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Adler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wirth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kagohara</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pavagadhi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bridgers</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bortsova</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ghemawat</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Powell</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bolina</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Iinuma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zablotskaia</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Besley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>D-w</given-names>
            </name>
            <name name-style="western">
              <surname>Dozat</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Comanescu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Si</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Greer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Polacek</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kaufman</surname>
              <given-names>Rl</given-names>
            </name>
            <name name-style="western">
              <surname>Tokumine</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Buchatskaya</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Miao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Elhawaty</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Siddhant</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tomasev</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Greer</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ashraf</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Roy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Filos</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Besta</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Blevins</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Klimenko</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yeh</surname>
              <given-names>C-k</given-names>
            </name>
            <name name-style="western">
              <surname>Changpinyo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Pajarskas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Muir</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Lan</surname>
              <given-names>Cl</given-names>
            </name>
            <name name-style="western">
              <surname>Haridasan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Marathe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Douglas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Samuel</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Austin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chiu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lorenzo</surname>
              <given-names>Ja</given-names>
            </name>
            <name name-style="western">
              <surname>Sjösund</surname>
              <given-names>Ll</given-names>
            </name>
            <name name-style="western">
              <surname>Cevey</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gleicher</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Avrahami</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Boral</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Srinivasan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Selo</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>May</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Aisopos</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hussenot</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Soares</surname>
              <given-names>Lb</given-names>
            </name>
            <name name-style="western">
              <surname>Baumli</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>Mb</given-names>
            </name>
            <name name-style="western">
              <surname>Recasens</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Caine</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Pritzel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pavetic</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pardo</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Gergely</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Frye</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ramasesh</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Horgan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Badola</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kassner</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Roy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dyer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Campos</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Tomala</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Badawy</surname>
              <given-names>De</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Mustafa</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Jindal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vikram</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Caelles</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hemsley</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Thornton</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Stokowiec</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Thacker</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ünlü</surname>
              <given-names>Ç</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Saleh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Svensson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bileschi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Patil</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Anand</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ring</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tsihlas</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Vezer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Selvi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Shevlane</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rodriguez</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kwiatkowski</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Daruki</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rong</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dafoe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>FitzGerald</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gu-Lemberg</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hendricks</surname>
              <given-names>La</given-names>
            </name>
            <name name-style="western">
              <surname>Pellat</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Feinberg</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Cobon-Kerr</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sainath</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rauh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hashemi</surname>
              <given-names>Sh</given-names>
            </name>
            <name name-style="western">
              <surname>Ives</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hasson</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Noland</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Byrd</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Sottiaux</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Paganini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lespiau</surname>
              <given-names>J-b</given-names>
            </name>
            <name name-style="western">
              <surname>Moufarek</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shivakumar</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>van</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Mandhane</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Goyal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Brock</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sheahan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Raki?evi?</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dehghani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Mittal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Noury</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sezener</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Huot</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lamm</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Elsayed</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Mahdieh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tenney</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hua</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Petrychenko</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kane</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Scandinaro</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Uesato</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Datta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sadovsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bunyan</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Rabiej</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Vasudevan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Leurent</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Alnahlawi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Georgescu</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Rabinovitch</surname>
              <given-names>Pg</given-names>
            </name>
            <name name-style="western">
              <surname>Stanczyk</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Steiner</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Naskar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Azzam</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Paszke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chiu</surname>
              <given-names>C-c</given-names>
            </name>
            <name name-style="western">
              <surname>Elias</surname>
              <given-names>Js</given-names>
            </name>
            <name name-style="western">
              <surname>Mohiuddin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Muhammad</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Miao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vieillard</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Gemini: A Family of Highly Capable Multimodal Models</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on December 19, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2312.11805</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>OpenAI</collab>
          </person-group>
          <article-title>GPT-4 technical report</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on March 15, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2303.08774"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kundu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Basak</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmadian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ferrara</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sarkar</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Fuzzy rank-based fusion of CNN models using Gompertz function for screening COVID-19 CT-scans</article-title>
          <source>Sci Rep</source>
          <year>2021</year>
          <month>07</month>
          <day>08</day>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>14133</fpage>
          <pub-id pub-id-type="doi">10.1038/s41598-021-93658-y</pub-id>
          <pub-id pub-id-type="medline">34238992</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-021-93658-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC8266871</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pedregosa</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Varoquaux</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gramfort</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Thirion</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Grisel</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Blondel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Prettenhofer</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Scikit-learn: machine learning in Python</article-title>
          <source>J Mach Learn Res</source>
          <year>2011</year>
          <volume>12</volume>
          <issue>85</issue>
          <fpage>2825</fpage>
          <lpage>2830</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmlr.org/papers/v12/pedregosa11a.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Loshchilov</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hutter</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Decoupled weight decay regularization</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on November 14, 2017</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1711.05101"/>
          </comment>
          <pub-id pub-id-type="doi">10.5260/chara.21.2.8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chalkidis</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Jana</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hartung</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bommarito</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Androutsopoulos</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Katz</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Aletras</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>LexGLUE: A benchmark data set for legal language understanding in English</article-title>
          <year>2022</year>
          <conf-name>60th Annual Meeting of the Association for Computational Linguistics (ACL 2022)</conf-name>
          <conf-date>May 22-27, 2022</conf-date>
          <conf-loc>Dublin, Ireland</conf-loc>
          <pub-id pub-id-type="doi">10.18653/v1/2022.acl-long.297</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
