<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="review-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v26i1e51250</article-id>
      <article-id pub-id-type="pmid">38607660</article-id>
      <article-id pub-id-type="doi">10.2196/51250</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Application of AI in Multilevel Pain Assessment Using Facial Images: Systematic Review and Meta-Analysis</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Arab-Zozani</surname>
            <given-names>Morteza</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhang</surname>
            <given-names>Meina</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Huo</surname>
            <given-names>Jian</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0004-8968-7204</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Yu</surname>
            <given-names>Yan</given-names>
          </name>
          <degrees>MMS</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0005-8823-4219</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Lin</surname>
            <given-names>Wei</given-names>
          </name>
          <degrees>MMS</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0006-4366-6787</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Hu</surname>
            <given-names>Anmin</given-names>
          </name>
          <degrees>MMS</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6507-6423</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Wu</surname>
            <given-names>Chaoran</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Department of Anesthesia</institution>
            <institution>Shenzhen People's Hospital, The First Affiliated Hospital of Southern University of Science and Technology</institution>
            <institution>Shenzhen Key Medical Discipline</institution>
            <addr-line>No 1017, Dongmen North Road</addr-line>
            <addr-line>Shenzhen, 518020</addr-line>
            <country>China</country>
            <phone>86 18100282848</phone>
            <email>wu.chaoran@szhospital.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-5193-7813</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Boston Intelligent Medical Research Center</institution>
        <institution>Shenzhen United Scheme Technology Company Limited</institution>
        <addr-line>Boston, MA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Anesthesia</institution>
        <institution>Shenzhen People's Hospital, The First Affiliated Hospital of Southern University of Science and Technology</institution>
        <institution>Shenzhen Key Medical Discipline</institution>
        <addr-line>Shenzhen</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Shenzhen United Scheme Technology Company Limited</institution>
        <addr-line>Shenzhen</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>The Second Clinical Medical College</institution>
        <institution>Jinan University</institution>
        <addr-line>Shenzhen</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Chaoran Wu <email>wu.chaoran@szhospital.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>12</day>
        <month>4</month>
        <year>2024</year>
      </pub-date>
      <volume>26</volume>
      <elocation-id>e51250</elocation-id>
      <history>
        <date date-type="received">
          <day>26</day>
          <month>7</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>18</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>8</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>28</day>
          <month>2</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Jian Huo, Yan Yu, Wei Lin, Anmin Hu, Chaoran Wu. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 12.04.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2024/1/e51250" xlink:type="simple"/>
      <related-article related-article-type="correction-forward" xlink:title="This is a corrected version. See correction statement in:" xlink:href="https://www.jmir.org/2024/1/e59628" vol="26" page="e59628"> </related-article>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The continuous monitoring and recording of patients’ pain status is a major problem in current research on postoperative pain management. In the large number of original or review articles focusing on different approaches for pain assessment, many researchers have investigated how computer vision (CV) can help by capturing facial expressions. However, there is a lack of proper comparison of results between studies to identify current research gaps.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The purpose of this systematic review and meta-analysis was to investigate the diagnostic performance of artificial intelligence models for multilevel pain assessment from facial images.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The PubMed, Embase, IEEE, Web of Science, and Cochrane Library databases were searched for related publications before September 30, 2023. Studies that used facial images alone to estimate multiple pain values were included in the systematic review. A study quality assessment was conducted using the Quality Assessment of Diagnostic Accuracy Studies, 2nd edition tool. The performance of these studies was assessed by metrics including sensitivity, specificity, log diagnostic odds ratio (LDOR), and area under the curve (AUC). The intermodal variability was assessed and presented by forest plots.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 45 reports were included in the systematic review. The reported test accuracies ranged from 0.27-0.99, and the other metrics, including the mean standard error (MSE), mean absolute error (MAE), intraclass correlation coefficient (ICC), and Pearson correlation coefficient (PCC), ranged from 0.31-4.61, 0.24-2.8, 0.19-0.83, and 0.48-0.92, respectively. In total, 6 studies were included in the meta-analysis. Their combined sensitivity was 98% (95% CI 96%-99%), specificity was 98% (95% CI 97%-99%), LDOR was 7.99 (95% CI 6.73-9.31), and AUC was 0.99 (95% CI 0.99-1). The subgroup analysis showed that the diagnostic performance was acceptable, although imbalanced data were still emphasized as a major problem. All studies had at least one domain with a high risk of bias, and for 20% (9/45) of studies, there were no applicability concerns.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This review summarizes recent evidence in automatic multilevel pain estimation from facial expressions and compared the test accuracy of results in a meta-analysis. Promising performance for pain estimation from facial images was established by current CV algorithms. Weaknesses in current studies were also identified, suggesting that larger databases and metrics evaluating multiclass classification performance could improve future studies.</p>
        </sec>
        <sec sec-type="trial registration">
          <title>Trial Registration</title>
          <p>PROSPERO CRD42023418181; https://www.crd.york.ac.uk/prospero/display_record.php?RecordID=418181</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>computer vision</kwd>
        <kwd>facial image</kwd>
        <kwd>monitoring</kwd>
        <kwd>multilevel pain assessment</kwd>
        <kwd>pain</kwd>
        <kwd>postoperative</kwd>
        <kwd>status</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>The definition of pain was revised to “an unpleasant sensory and emotional experience associated with, or resembling that associated with, actual or potential tissue damage” in 2020 [<xref ref-type="bibr" rid="ref1">1</xref>]. Acute postoperative pain management is important, as pain intensity and duration are critical influencing factors for the transition of acute pain to chronic postsurgical pain [<xref ref-type="bibr" rid="ref2">2</xref>]. To avoid the development of chronic pain, guidelines were promoted and discussed to ensure safe and adequate pain relief for patients, and clinicians were recommended to use a validated pain assessment tool to track patients’ responses [<xref ref-type="bibr" rid="ref3">3</xref>]. However, these tools, to some extent, depend on communication between physicians and patients, and continuous data cannot be provided [<xref ref-type="bibr" rid="ref4">4</xref>]. The continuous assessment and recording of patient pain intensity will not only reduce caregiver burden but also provide data for chronic pain research. Therefore, automatic and accurate pain measurements are necessary.</p>
      <p>Researchers have proposed different approaches to measuring pain intensity. Physiological signals, for example, electroencephalography and electromyography, have been used to estimate pain [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. However, it was reported that current pain assessment from physiological signals has difficulties isolating stress and pain with machine learning techniques, as they share conceptual and physiological similarities [<xref ref-type="bibr" rid="ref8">8</xref>]. Recent studies have also investigated pain assessment tools for certain patient subgroups. For example, people with deafness or an intellectual disability may not be able to communicate well with nurses, and an objective pain evaluation would be a better option [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Measuring pain intensity from patient behaviors, such as facial expressions, is also promising for most patients [<xref ref-type="bibr" rid="ref4">4</xref>]. As the most comfortable and convenient method, computer vision techniques require no attachments to patients and can monitor multiple participants using 1 device [<xref ref-type="bibr" rid="ref4">4</xref>]. However, pain intensity, which is important for pain research, is often not reported.</p>
      <p>With the growing trend of assessing pain intensity using artificial intelligence (AI), it is necessary to summarize current publications to determine the strengths and gaps of current studies. Existing research has reviewed machine learning applications for acute postoperative pain prediction, continuous pain detection, and pain intensity estimation [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. Input modalities, including facial recordings and physiological signals such as electroencephalography and electromyography, were also reviewed [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. There have also been studies focusing on deep learning approaches [<xref ref-type="bibr" rid="ref11">11</xref>]. AI was applied in children and infant pain evaluation as well [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. However, no study has focused on pain intensity measurement, and no comparison of test accuracy results has been made.</p>
      <p>Current AI applications in pain research can be categorized into 3 types: pain assessment, pain prediction and decision support, and pain self-management [<xref ref-type="bibr" rid="ref14">14</xref>]. We consider accurate and automatic pain assessment to be the most important area and the foundation of future pain research. In this study, we performed a systematic review and meta-analysis to assess the diagnostic performance of current publications for multilevel pain evaluation.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p>This study was registered with PROSPERO (International Prospective Register of Systematic Reviews; CRD42023418181) and carried out strictly following the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines [<xref ref-type="bibr" rid="ref17">17</xref>]<bold>.</bold></p>
      <sec>
        <title>Study Eligibility</title>
        <p>Studies that reported AI techniques for multiclass pain intensity classification were eligible. Records including nonhuman or infant participants or 2-class pain detection were excluded. Only studies using facial images of the test participants were accepted. Clinically used pain assessment tools, such as the visual analog scale (VAS) and numerical rating scale (NRS), and other pain intensity indicators, were rejected in the meta-analysis. <xref ref-type="boxed-text" rid="box1">Textbox 1</xref> presents the eligibility criteria.</p>
        <boxed-text id="box1" position="float">
          <title>Study eligibility criteria.</title>
          <p>
            <bold>Study characteristics and inclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Participants: children and adults aged 12 months or older</p>
            </list-item>
            <list-item>
              <p>Setting: no restrictions</p>
            </list-item>
            <list-item>
              <p>Index test: artificial intelligence models that measure pain intensity from facial images</p>
            </list-item>
            <list-item>
              <p>Reference standard: no restrictions for systematic review; Prkachin and Solomon pain intensity score for meta-analysis</p>
            </list-item>
            <list-item>
              <p>Study design: no need to specify</p>
            </list-item>
          </list>
          <p>
            <bold>Study characteristics and exclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Participants: infants aged 12 months or younger and animal subjects</p>
            </list-item>
            <list-item>
              <p>Setting: no need to specify</p>
            </list-item>
            <list-item>
              <p>Index test: studies that use other information such as physiological signals</p>
            </list-item>
            <list-item>
              <p>Reference standard: other pain evaluation tools, e.g., NRS, VAS, were excluded from meta-analysis</p>
            </list-item>
            <list-item>
              <p>Study design: reviews</p>
            </list-item>
          </list>
          <p>
            <bold>Report characteristics and inclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Year: published between January 1, 2012, and September 30, 2023</p>
            </list-item>
            <list-item>
              <p>Language: English only</p>
            </list-item>
            <list-item>
              <p>Publication status: published</p>
            </list-item>
            <list-item>
              <p>Test accuracy metrics: no restrictions for systematic reviews; studies that reported contingency tables were included for meta-analysis</p>
            </list-item>
          </list>
          <p>
            <bold>Report characteristics and exclusion criteria</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Year: no need to specify</p>
            </list-item>
            <list-item>
              <p>Language: no need to specify</p>
            </list-item>
            <list-item>
              <p>Publication status: preprints not accepted</p>
            </list-item>
            <list-item>
              <p>Test accuracy metrics: studies that reported insufficient metrics were excluded from meta-analysis</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Search Strategy</title>
        <p>In this systematic review, databases including PubMed, Embase, IEEE, Web of Science, and the Cochrane Library were searched until December 2022, and no restrictions were applied. Keywords were “artificial intelligence” AND “pain recognition.” <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> shows the detailed search strategy.</p>
      </sec>
      <sec>
        <title>Data Extraction</title>
        <p>A total of 2 viewers screened titles and abstracts and selected eligible records independently to assess eligibility, and disagreements were solved by discussion with a third collaborator. A consentient data extraction sheet was prespecified and used to summarize study characteristics independently. Table S5 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> shows the detailed items and explanations for data extraction. Diagnostic accuracy data were extracted into contingency tables, including true positives, false positives, false negatives, and true negatives. The data were used to calculate the pooled diagnostic performance of the different models. Some studies included multiple models, and these models were considered independent of each other.</p>
      </sec>
      <sec>
        <title>Study Quality Assessment</title>
        <p>All included studies were independently assessed by 2 viewers using the Quality Assessment of Diagnostic Accuracy Studies 2 (QUADAS-2) tool [<xref ref-type="bibr" rid="ref18">18</xref>]. QUADAS-2 assesses bias risk across 4 domains, which are patient selection, index test, reference standard, and flow and timing. The first 3 domains are also assessed for applicability concerns. In the systematic review, a specific extension of QUADAS-2, namely, QUADAS-AI, was used to specify the signaling questions [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
      </sec>
      <sec>
        <title>Meta-Analysis</title>
        <p>Meta-analyses were conducted between different AI models. Models with different algorithms or training data were considered different. To evaluate the performance differences between models, the contingency tables during model validation were extracted. Studies that did not report enough diagnostic accuracy data were excluded from meta-analysis.</p>
        <p>Hierarchical summary receiver operating characteristic (SROC) curves were fitted to evaluate the diagnostic performance of AI models. These curves were plotted with 95% CIs and prediction regions around averaged sensitivity, specificity, and area under the curve estimates. Heterogeneity was assessed visually by forest plots. A funnel plot was constructed to evaluate the risk of bias.</p>
        <p>Subgroup meta-analyses were conducted to evaluate the performance differences at both the model level and task level, and subgroups were created based on different tasks and the proportion of positive and negative samples.</p>
        <p>All statistical analyses and plots were produced using RStudio (version 4.2.2; R Core Team) and the R package <italic>meta4diag</italic> (version 2.1.1; Guo J and Riebler A) [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Study Selection and Included Study Characteristics</title>
        <p>A flow diagram representing the study selection process is shown in (<xref rid="figure1" ref-type="fig">Figure 1</xref>). After removing 1039 duplicates, the titles and abstracts of a total of 5653 papers were screened, and the percentage agreement of title or abstract screening was 97%. After screening, 51 full-text reports were assessed for eligibility, among which 45 reports were included in the systematic review [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref65">65</xref>]. The percentage agreement of the full-text review was 87%. In 40 of the included studies, contingency tables could not be made. Meta-analyses were conducted based on 8 AI models extracted from 6 studies. Individual study characteristics included in the systematic review are provided in <xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref>. The facial feature extraction method can be categorized into 2 classes: geometrical features (GFs) and deep features (DFs). One typical method of extracting GFs is to calculate the distance between facial landmarks. DFs are usually extracted by convolution operations. A total of 20 studies included temporal information, but most of them (18) extracted temporal information through the 3D convolution of video sequences. Feature transformation was also commonly applied to reduce the time for training or fuse features extracted by different methods before inputting them into the classifier. For classifiers, support vector machines (SVMs) and convolutional neural networks (CNNs) were mostly used. <xref ref-type="table" rid="table1">Table 1</xref> presents the model designs of the included studies.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Preferred Reporting Items for Systematic Review and Meta-Analysis (PRISMA) flowchart of study selection.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e51250_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Model designs of studies included in the systematic review.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="300"/>
            <col width="180"/>
            <col width="170"/>
            <col width="180"/>
            <col width="170"/>
            <thead>
              <tr valign="top">
                <td>Author and year</td>
                <td>Facial feature descriptor</td>
                <td>Temporal features<sup>a</sup></td>
                <td>Feature transformation</td>
                <td>Classification method</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Hammal and Cohn (2012) [<xref ref-type="bibr" rid="ref21">21</xref>]</td>
                <td>CAPP</td>
                <td>–</td>
                <td>Log-normal filters</td>
                <td>SVM<sup>b</sup></td>
              </tr>
              <tr valign="top">
                <td>Adibuzzaman et al (2015) [<xref ref-type="bibr" rid="ref22">22</xref>]</td>
                <td>PCA</td>
                <td>–</td>
                <td>None</td>
                <td>Euclidean distance; angular distance; SVM</td>
              </tr>
              <tr valign="top">
                <td>Majumder et al (2015) [<xref ref-type="bibr" rid="ref23">23</xref>]</td>
                <td>GF<sup>c</sup>; DDF</td>
                <td>+</td>
                <td>None</td>
                <td>GMM<sup>d</sup>; SVM</td>
              </tr>
              <tr valign="top">
                <td>Rathee and Ganotra (2015) [<xref ref-type="bibr" rid="ref24">24</xref>]</td>
                <td>TPS<sup>e</sup></td>
                <td>+</td>
                <td>DML<sup>f</sup></td>
                <td>SVM</td>
              </tr>
              <tr valign="top">
                <td>Sikka et al (2015) [<xref ref-type="bibr" rid="ref25">25</xref>]</td>
                <td>CERT</td>
                <td>++</td>
                <td>None</td>
                <td>Linear regression</td>
              </tr>
              <tr valign="top">
                <td>Rathee and Ganotra (2016) [<xref ref-type="bibr" rid="ref26">26</xref>]</td>
                <td>Gabor; LBP; HOG</td>
                <td>–</td>
                <td>MDML<sup>g</sup></td>
                <td>SVM</td>
              </tr>
              <tr valign="top">
                <td>Zhou et al (2016) [<xref ref-type="bibr" rid="ref27">27</xref>]</td>
                <td>AAM<sup>h</sup></td>
                <td>++</td>
                <td>Flattening</td>
                <td>RCNN</td>
              </tr>
              <tr valign="top">
                <td>Egede et al (2017) [<xref ref-type="bibr" rid="ref28">28</xref>]</td>
                <td>GF; HOG; CNN;</td>
                <td>++</td>
                <td>RVR<sup>i</sup></td>
                <td>RVR</td>
              </tr>
              <tr valign="top">
                <td>Martinez et al (2017) [<xref ref-type="bibr" rid="ref29">29</xref>]</td>
                <td>PSPI<sup>j</sup>; I-FES<sup>k</sup>;</td>
                <td>++</td>
                <td>None</td>
                <td>LSTM<sup>l</sup>-RNN; RNN-HCRF<sup>m</sup></td>
              </tr>
              <tr valign="top">
                <td>Bourou et al (2018) [<xref ref-type="bibr" rid="ref30">30</xref>]</td>
                <td>GF; Color</td>
                <td>–</td>
                <td>Statistical metrics</td>
                <td>GLMM<sup>n</sup></td>
              </tr>
              <tr valign="top">
                <td>Haque et al (2018) [<xref ref-type="bibr" rid="ref31">31</xref>]</td>
                <td>CNN-RGB</td>
                <td>++</td>
                <td>Fine-tuned VGGFace</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Semwal et al (2018) [<xref ref-type="bibr" rid="ref32">32</xref>]</td>
                <td>2D-Conv</td>
                <td>–</td>
                <td>Maxpooling</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2018) [<xref ref-type="bibr" rid="ref33">33</xref>]</td>
                <td>Pretrained CASIA</td>
                <td>++</td>
                <td>VLAD<sup>o</sup></td>
                <td>BE</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2018) [<xref ref-type="bibr" rid="ref34">34</xref>]</td>
                <td>3D-convolution</td>
                <td>++</td>
                <td>Average pooling</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Wang and Sun (2018) [<xref ref-type="bibr" rid="ref35">35</xref>]</td>
                <td>3D-convolution; HOG; DFGS</td>
                <td>++</td>
                <td>SVR<sup>p</sup></td>
                <td>SVR</td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2019) [<xref ref-type="bibr" rid="ref36">36</xref>]</td>
                <td>Fine-tuned VGGFace</td>
                <td>–</td>
                <td>None</td>
                <td>RNN</td>
              </tr>
              <tr valign="top">
                <td>Casti et al (2019) [<xref ref-type="bibr" rid="ref37">37</xref>]</td>
                <td>LBP</td>
                <td>–</td>
                <td>MDS<sup>q</sup></td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Lee and Wang (2019) [<xref ref-type="bibr" rid="ref38">38</xref>]</td>
                <td>CNN-RGB</td>
                <td>–</td>
                <td>None</td>
                <td>ELM<sup>r</sup></td>
              </tr>
              <tr valign="top">
                <td>Saha et al (2019) [<xref ref-type="bibr" rid="ref39">39</xref>]</td>
                <td>PCA</td>
                <td>–</td>
                <td>None</td>
                <td>NR</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2019) [<xref ref-type="bibr" rid="ref40">40</xref>]</td>
                <td>3D-convolution</td>
                <td>++</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2020) [<xref ref-type="bibr" rid="ref41">41</xref>]</td>
                <td>VGGFace</td>
                <td>–</td>
                <td>PCA;DNN</td>
                <td>EDLM<sup>s</sup></td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2020) [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td>VGGFace</td>
                <td>–</td>
                <td>PCA</td>
                <td>EDLM<sup>s</sup></td>
              </tr>
              <tr valign="top">
                <td>Dragomir et al (2020) [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td>ResNet</td>
                <td>–</td>
                <td>None</td>
                <td>ResNet</td>
              </tr>
              <tr valign="top">
                <td>Huang et al (2020) [<xref ref-type="bibr" rid="ref44">44</xref>]</td>
                <td>AAM</td>
                <td>++</td>
                <td>RNN-GRU</td>
                <td>SVM</td>
              </tr>
              <tr valign="top">
                <td>Mallol-Ragolta et al (2020) [<xref ref-type="bibr" rid="ref45">45</xref>]</td>
                <td>GF; HOG; HOG; OpenFace; VGGFace; ResNet-50</td>
                <td>–</td>
                <td>None</td>
                <td>LSTM-RNN</td>
              </tr>
              <tr valign="top">
                <td>Peng et al (2020) [<xref ref-type="bibr" rid="ref46">46</xref>]</td>
                <td>DCNN<sup>t</sup></td>
                <td>–</td>
                <td>Probabilistic combination</td>
                <td>Multiscale deep fusion network</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian et al (2020) [<xref ref-type="bibr" rid="ref47">47</xref>]</td>
                <td>GSM<sup>u</sup></td>
                <td>++</td>
                <td>Aggregation</td>
                <td>SNN</td>
              </tr>
              <tr valign="top">
                <td>Xu and de Sa (2020) [<xref ref-type="bibr" rid="ref48">48</xref>]</td>
                <td>Handcrafted</td>
                <td>++</td>
                <td>None</td>
                <td>NN</td>
              </tr>
              <tr valign="top">
                <td>Pikulkaew et al (2021) [<xref ref-type="bibr" rid="ref49">49</xref>]</td>
                <td>None</td>
                <td>–</td>
                <td>None</td>
                <td>DCNN</td>
              </tr>
              <tr valign="top">
                <td>Rezaei et al (2021) [<xref ref-type="bibr" rid="ref50">50</xref>]</td>
                <td>CNN</td>
                <td>++</td>
                <td>Flattening</td>
                <td>NN</td>
              </tr>
              <tr valign="top">
                <td>Semwal and Londhe (2021) [<xref ref-type="bibr" rid="ref51">51</xref>]</td>
                <td>CNN</td>
                <td>–</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Semwal and Londhe (2021) [<xref ref-type="bibr" rid="ref52">52</xref>]</td>
                <td>VGGNet; MobileNet; GoogLeNet</td>
                <td>–</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Szczapa et al (2021) [<xref ref-type="bibr" rid="ref53">53</xref>]</td>
                <td>Landmark trajectory</td>
                <td>++</td>
                <td>None</td>
                <td>SVR</td>
              </tr>
              <tr valign="top">
                <td>Ting et al (2021) [<xref ref-type="bibr" rid="ref54">54</xref>]</td>
                <td>None</td>
                <td>++</td>
                <td>DOML<sup>v</sup></td>
                <td>NN</td>
              </tr>
              <tr valign="top">
                <td>Xin et al (2021) [<xref ref-type="bibr" rid="ref55">55</xref>]</td>
                <td>CNN</td>
                <td>++</td>
                <td>None</td>
                <td>LIAN<sup>w</sup></td>
              </tr>
              <tr valign="top">
                <td>Alghamdi and Alaghband (2022) [<xref ref-type="bibr" rid="ref56">56</xref>]</td>
                <td>OpenCV</td>
                <td>–</td>
                <td>Flattening</td>
                <td>Shallow CNN</td>
              </tr>
              <tr valign="top">
                <td>Barua et al (2022) [<xref ref-type="bibr" rid="ref57">57</xref>]</td>
                <td>P-DarkNet19</td>
                <td>–</td>
                <td>INCA</td>
                <td>k-NN</td>
              </tr>
              <tr valign="top">
                <td>Fontaine et al (2022) [<xref ref-type="bibr" rid="ref58">58</xref>]</td>
                <td>OpenCV</td>
                <td>–</td>
                <td>None</td>
                <td>CNN; SVM</td>
              </tr>
              <tr valign="top">
                <td>Hosseini et al 2022) [<xref ref-type="bibr" rid="ref59">59</xref>]</td>
                <td>Convolution</td>
                <td>–</td>
                <td>None</td>
                <td>DCNN</td>
              </tr>
              <tr valign="top">
                <td>Huang et al (2022) [<xref ref-type="bibr" rid="ref60">60</xref>]</td>
                <td>3D-CNN (S3D-G)</td>
                <td>++</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Islamadina et al (2022) [<xref ref-type="bibr" rid="ref61">61</xref>]</td>
                <td>CNN</td>
                <td>–</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Swetha et al (2022) [<xref ref-type="bibr" rid="ref62">62</xref>]</td>
                <td>None</td>
                <td>–</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Wu et al (2022) [<xref ref-type="bibr" rid="ref63">63</xref>]</td>
                <td>CNN</td>
                <td>+</td>
                <td>Siamese network; BiLSTM<sup>x</sup></td>
                <td>NN</td>
              </tr>
              <tr valign="top">
                <td>Ismail and Waseem (2023) [<xref ref-type="bibr" rid="ref64">64</xref>]</td>
                <td>CNN</td>
                <td>–</td>
                <td>None</td>
                <td>CNN</td>
              </tr>
              <tr valign="top">
                <td>Vu and Beurton-Aimar (2023) [<xref ref-type="bibr" rid="ref65">65</xref>]</td>
                <td>CNN</td>
                <td>–</td>
                <td>Average pooling</td>
                <td>LSTM network</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>No temporal features are shown by – symbol, time information extracted from 2 images at different time by +, and deep temporal features extracted through the convolution of video sequences by ++.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>GF: geometric feature.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>GMM: gaussian mixture model.</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>TPS: thin plate spline.</p>
            </fn>
            <fn id="table1fn6">
              <p><sup>f</sup>DML: distance metric learning.</p>
            </fn>
            <fn id="table1fn7">
              <p><sup>g</sup>MDML: multiview distance metric learning.</p>
            </fn>
            <fn id="table1fn8">
              <p><sup>h</sup>AAM: active appearance model.</p>
            </fn>
            <fn id="table1fn9">
              <p><sup>i</sup>RVR: relevance vector regressor.</p>
            </fn>
            <fn id="table1fn10">
              <p><sup>j</sup>PSPI: Prkachin and Solomon pain intensity.</p>
            </fn>
            <fn id="table1fn11">
              <p><sup>k</sup>I-FES: individual facial expressiveness score.</p>
            </fn>
            <fn id="table1fn12">
              <p><sup>l</sup>LSTM: long short-term memory.</p>
            </fn>
            <fn id="table1fn13">
              <p><sup>m</sup>HCRF: hidden conditional random field.</p>
            </fn>
            <fn id="table1fn14">
              <p><sup>n</sup>GLMM: generalized linear mixed model.</p>
            </fn>
            <fn id="table1fn15">
              <p><sup>o</sup>VLAD: vector of locally aggregated descriptor.</p>
            </fn>
            <fn id="table1fn16">
              <p><sup>p</sup>SVR: support vector regression.</p>
            </fn>
            <fn id="table1fn17">
              <p><sup>q</sup>MDS: multidimensional scaling.</p>
            </fn>
            <fn id="table1fn18">
              <p><sup>r</sup>ELM: extreme learning machine.</p>
            </fn>
            <fn id="table1fn19">
              <p><sup>s</sup>Labeled to distinguish different architectures of ensembled deep learning models.</p>
            </fn>
            <fn id="table1fn20">
              <p><sup>t</sup>DCNN: deep convolutional neural network.</p>
            </fn>
            <fn id="table1fn21">
              <p><sup>u</sup>GSM: gaussian scale mixture.</p>
            </fn>
            <fn id="table1fn22">
              <p><sup>v</sup>DOML: distance ordering metric learning.</p>
            </fn>
            <fn id="table1fn23">
              <p><sup>w</sup>LIAN: locality and identity aware network.</p>
            </fn>
            <fn id="table1fn24">
              <p><sup>x</sup>BiLSTM: bidirectional long short-term memory.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Characteristics of model training and validation.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="250"/>
            <col width="120"/>
            <col width="120"/>
            <col width="80"/>
            <col width="120"/>
            <col width="120"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td>Author and year</td>
                <td>Database</td>
                <td>Objects</td>
                <td>Output levels</td>
                <td>Validation method</td>
                <td>External validation</td>
                <td>Evaluation metrics</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Hammal and Cohn (2012) [<xref ref-type="bibr" rid="ref21">21</xref>]</td>
                <td>UNBC<sup>a</sup></td>
                <td>Frame</td>
                <td>4</td>
                <td>5-fold; LOSO<sup>b</sup></td>
                <td>No</td>
                <td>ICC<sup>c</sup> 0.85, 0.55; <italic>F</italic><sub>1</sub>-score 0.96, 0.67</td>
              </tr>
              <tr valign="top">
                <td>Adibuzzaman et al (2015) [<xref ref-type="bibr" rid="ref22">22</xref>]</td>
                <td>Self-prepared</td>
                <td>Image</td>
                <td>3</td>
                <td>10-fold</td>
                <td>Yes</td>
                <td>Sensitivity 0.53; Specificity 0.7</td>
              </tr>
              <tr valign="top">
                <td>Majumder et al (2015) [<xref ref-type="bibr" rid="ref23">23</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>5-fold</td>
                <td>No</td>
                <td>Accuracy 87.43</td>
              </tr>
              <tr valign="top">
                <td>Rathee and Ganotra (2015) [<xref ref-type="bibr" rid="ref24">24</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOO; 10-fold</td>
                <td>No</td>
                <td>Accuracy 0.96; CT<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>Sikka et al (2015) [<xref ref-type="bibr" rid="ref25">25</xref>]</td>
                <td>Self-prepared</td>
                <td>Sequence</td>
                <td>11</td>
                <td>LOSO</td>
                <td>No</td>
                <td>AUC<sup>e</sup> 0.94; Cohen κ0.61</td>
              </tr>
              <tr valign="top">
                <td>Rathee and Ganotra (2016) [<xref ref-type="bibr" rid="ref26">26</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>5-fold</td>
                <td>No</td>
                <td>Accuracy 0.75</td>
              </tr>
              <tr valign="top">
                <td>Zhou et al (2016) [<xref ref-type="bibr" rid="ref27">27</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MSE<sup>f</sup> 1.54; PCC<sup>g</sup> 0.65</td>
              </tr>
              <tr valign="top">
                <td>Egede et al (2017) [<xref ref-type="bibr" rid="ref28">28</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>RMSE<sup>h</sup>&lt;1; PCC 0.67</td>
              </tr>
              <tr valign="top">
                <td>Martinez et al (2017) [<xref ref-type="bibr" rid="ref29">29</xref>]</td>
                <td>UNBC</td>
                <td>Sequence</td>
                <td>11</td>
                <td>Split</td>
                <td>No</td>
                <td>MAE<sup>i</sup> 2.8; ICC<sup>j</sup> 0.19</td>
              </tr>
              <tr valign="top">
                <td>Bourou et al (2018) [<xref ref-type="bibr" rid="ref30">30</xref>]</td>
                <td>BioVid</td>
                <td>Frame</td>
                <td>5</td>
                <td>10-fold</td>
                <td>No</td>
                <td>Accuracy 0.27; RCI 0.03</td>
              </tr>
              <tr valign="top">
                <td>Haque et al (2018) [<xref ref-type="bibr" rid="ref31">31</xref>]</td>
                <td>MIntPain</td>
                <td>Frame</td>
                <td>5</td>
                <td>5-fold</td>
                <td>No</td>
                <td>CT</td>
              </tr>
              <tr valign="top">
                <td>Semwal and Londhe (2018) [<xref ref-type="bibr" rid="ref32">32</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>3</td>
                <td>Split</td>
                <td>No</td>
                <td>CT; Accuracy 0.93</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2018) [<xref ref-type="bibr" rid="ref33">33</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MSE 0.69; PCC 0.81</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2018) [<xref ref-type="bibr" rid="ref34">34</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MSE 0.53; ICC 0.75; PCC 0.84</td>
              </tr>
              <tr valign="top">
                <td>Wang and Sun (2018) [<xref ref-type="bibr" rid="ref35">35</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>RMSE 0.94; PCC 0.68</td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2019) [<xref ref-type="bibr" rid="ref36">36</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>LOSO</td>
                <td>No</td>
                <td>Accuracy 0.75; AUC 0.83; MSE 0.95</td>
              </tr>
              <tr valign="top">
                <td>Casti et al (2019) [<xref ref-type="bibr" rid="ref37">37</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>11</td>
                <td>Split</td>
                <td>No</td>
                <td>Recall 0.92; Precision 0.82</td>
              </tr>
              <tr valign="top">
                <td>Lee and Wang (2019) [<xref ref-type="bibr" rid="ref38">38</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>5-fold</td>
                <td>No</td>
                <td>MSE 1.22; PCC 0.5</td>
              </tr>
              <tr valign="top">
                <td>Saha et al (2019) [<xref ref-type="bibr" rid="ref39">39</xref>]</td>
                <td>Self-prepared</td>
                <td>Image</td>
                <td>3</td>
                <td>10-fold</td>
                <td>No</td>
                <td>Accuracy 0.71; CT</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian and Hadid (2019) [<xref ref-type="bibr" rid="ref40">40</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>5; 16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MSE 0.32; PCC 0.92; AUC 0.86</td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2020) [<xref ref-type="bibr" rid="ref41">41</xref>]</td>
                <td>MIntPain; UNBC</td>
                <td>Frame</td>
                <td>5</td>
                <td>10-fold</td>
                <td>No</td>
                <td>Accuracy 0.89; AUC 0.93</td>
              </tr>
              <tr valign="top">
                <td>Bargshady et al (2020) [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>10-fold</td>
                <td>No</td>
                <td>Accuracy 0.91; AUC 0.98</td>
              </tr>
              <tr valign="top">
                <td>Dragomir et al (2020) [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td>BioVid</td>
                <td>Frame</td>
                <td>5</td>
                <td>CV</td>
                <td>No</td>
                <td>Accuracy 36.6</td>
              </tr>
              <tr valign="top">
                <td>Huang et al (2020) [<xref ref-type="bibr" rid="ref44">44</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>6</td>
                <td>Split</td>
                <td>No</td>
                <td>PCC 0.89; ICC 0.72; MSE 0.21; MAE 0.24</td>
              </tr>
              <tr valign="top">
                <td>Mallol-Ragolta et al (2020) [<xref ref-type="bibr" rid="ref45">45</xref>]</td>
                <td>EmoPain</td>
                <td>Frame</td>
                <td>11</td>
                <td>Split</td>
                <td>No</td>
                <td>CCC<sup>k</sup> 0.174</td>
              </tr>
              <tr valign="top">
                <td>Peng et al (2020) [<xref ref-type="bibr" rid="ref46">46</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>5</td>
                <td>NR</td>
                <td>No</td>
                <td>Accuracy 0.80; PCC 0.6; MAE 0.57; MSE 0.82</td>
              </tr>
              <tr valign="top">
                <td>Tavakolian et al (2020) [<xref ref-type="bibr" rid="ref47">47</xref>]</td>
                <td>BioVid; UNBC</td>
                <td>Frame</td>
                <td>5; 16</td>
                <td>LOSO</td>
                <td>Yes</td>
                <td>MSE 1.03, 0.92; AUC 0.69, 0.71</td>
              </tr>
              <tr valign="top">
                <td>Xu and de Sa (2020) [<xref ref-type="bibr" rid="ref48">48</xref>]</td>
                <td>UNBC</td>
                <td>Sequence</td>
                <td>6; 11; 16; 16</td>
                <td>5-fold</td>
                <td>No</td>
                <td>MSE 4.61; MAE 1.73; ICC 0.61; PCC 0.67</td>
              </tr>
              <tr valign="top">
                <td>Pikulkaew et al (2021) [<xref ref-type="bibr" rid="ref49">49</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>3</td>
                <td>NR</td>
                <td>No</td>
                <td>Accuracy 0.93</td>
              </tr>
              <tr valign="top">
                <td>Rezaei et al (2021) [<xref ref-type="bibr" rid="ref50">50</xref>]</td>
                <td>UofR; UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>5-fold</td>
                <td>Yes</td>
                <td>PCC 0.48-0.7; ICC 0.31-0.59<sup>l</sup></td>
              </tr>
              <tr valign="top">
                <td>Semwal and Londhe (2021) [<xref ref-type="bibr" rid="ref51">51</xref>]</td>
                <td>Self-prepared</td>
                <td>Frame</td>
                <td>4</td>
                <td>5-fold</td>
                <td>No</td>
                <td>CT; Accuracy 0.97</td>
              </tr>
              <tr valign="top">
                <td>Semwal and Londhe (2021) [<xref ref-type="bibr" rid="ref52">52</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>5</td>
                <td>10-fold</td>
                <td>No</td>
                <td>CT; <italic>F</italic><sub>1</sub>-score 0.91</td>
              </tr>
              <tr valign="top">
                <td>Szczapa et al (2021) [<xref ref-type="bibr" rid="ref53">53</xref>]</td>
                <td>UNBC</td>
                <td>Sequence</td>
                <td>11</td>
                <td>5-fold; LOO; LOSO</td>
                <td>No</td>
                <td>MAE 2.44; RMSE 3.15</td>
              </tr>
              <tr valign="top">
                <td>Ting et al (2021) [<xref ref-type="bibr" rid="ref54">54</xref>]</td>
                <td>UNBC</td>
                <td>Sequence</td>
                <td>11</td>
                <td>5-fold; LOSO</td>
                <td>No</td>
                <td>MAE 1.62; MSE 4.39; ICC 0.66</td>
              </tr>
              <tr valign="top">
                <td>Xin et al (2021) [<xref ref-type="bibr" rid="ref55">55</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>LOSO</td>
                <td>No</td>
                <td>Accuracy 0.89; ICC 0.61; PCC 0.81; MAE 0.45; MSE 0.66</td>
              </tr>
              <tr valign="top">
                <td>Alghamdi and Alaghband (2022) [<xref ref-type="bibr" rid="ref56">56</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>Split</td>
                <td>No</td>
                <td>Accuracy 0.99</td>
              </tr>
              <tr valign="top">
                <td>Barua et al (2022) [<xref ref-type="bibr" rid="ref57">57</xref>]</td>
                <td>DISFA; UNBC</td>
                <td>Frame</td>
                <td>4</td>
                <td>10-fold</td>
                <td>No</td>
                <td>CT; Accuracy 0.95</td>
              </tr>
              <tr valign="top">
                <td>Fontaine et al (2022) [<xref ref-type="bibr" rid="ref58">58</xref>]</td>
                <td>Self-prepared</td>
                <td>Frame</td>
                <td>4</td>
                <td>Split</td>
                <td>No</td>
                <td>Sensitivity 0.90</td>
              </tr>
              <tr valign="top">
                <td>Hosseini et al (2022) [<xref ref-type="bibr" rid="ref59">59</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>7</td>
                <td>NR</td>
                <td>No</td>
                <td>Accuracy 0.85; AUC 0.88; PCC 0.83</td>
              </tr>
              <tr valign="top">
                <td>Huang et al (2022) [<xref ref-type="bibr" rid="ref60">60</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MAE 0.4; MSE 0.76; PCC 0.82</td>
              </tr>
              <tr valign="top">
                <td>Islamadina et al (2022) [<xref ref-type="bibr" rid="ref61">61</xref>]</td>
                <td>MIntPian</td>
                <td>Frame</td>
                <td>5</td>
                <td>CV</td>
                <td>No</td>
                <td>CT; Accuracy 1.0</td>
              </tr>
              <tr valign="top">
                <td>Swetha et al (2022) [<xref ref-type="bibr" rid="ref62">62</xref>]</td>
                <td>Self-prepared</td>
                <td>Frame</td>
                <td>4</td>
                <td>NR</td>
                <td>No</td>
                <td>Accuracy 0.75</td>
              </tr>
              <tr valign="top">
                <td>Wu et al (2022) [<xref ref-type="bibr" rid="ref63">63</xref>]</td>
                <td>Self-prepared</td>
                <td>Frame; sequence</td>
                <td>3</td>
                <td>Split</td>
                <td>No</td>
                <td>Accuracy 0.81</td>
              </tr>
              <tr valign="top">
                <td>Ismail and Waseem 2023 [<xref ref-type="bibr" rid="ref64">64</xref>]</td>
                <td>UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>5-fold</td>
                <td>No</td>
                <td>MAE 0.36; MSE 1.73; Accuracy 0.82</td>
              </tr>
              <tr valign="top">
                <td>Vu and Beurton-Aimar 2023 [<xref ref-type="bibr" rid="ref65">65</xref>]</td>
                <td>DISFA; UNBC</td>
                <td>Frame</td>
                <td>16</td>
                <td>LOSO</td>
                <td>No</td>
                <td>MSE 0.57; MAE 0.35; ICC 0.83; PCC 0.81</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>UNBC: University of Northern British Columbia-McMaster shoulder pain expression archive database.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>LOSO: leave one subject out cross-validation.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>ICC: intraclass correlation coefficient.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>CT: contingency table.</p>
            </fn>
            <fn id="table2fn5">
              <p><sup>e</sup>AUC: area under the curve.</p>
            </fn>
            <fn id="table2fn6">
              <p><sup>f</sup>MSE: mean standard error.</p>
            </fn>
            <fn id="table2fn7">
              <p><sup>g</sup>PCC: Pearson correlation coefficient.</p>
            </fn>
            <fn id="table2fn8">
              <p><sup>h</sup>RMSE: root mean standard error.</p>
            </fn>
            <fn id="table2fn9">
              <p><sup>i</sup>MAE: mean absolute error.</p>
            </fn>
            <fn id="table2fn10">
              <p><sup>j</sup>ICC: intraclass coefficient.</p>
            </fn>
            <fn id="table2fn11">
              <p><sup>k</sup>CCC: concordance correlation coefficient.</p>
            </fn>
            <fn id="table2fn12">
              <p><sup>l</sup>Reported both external and internal validation results and summarized as intervals.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p><xref ref-type="table" rid="table2">Table 2</xref> summarizes the characteristics of model training and validation. Most studies used publicly available databases, for example, the University of Northern British Columbia-McMaster shoulder pain expression archive database [<xref ref-type="bibr" rid="ref57">57</xref>]. Table S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> summarizes the public databases. A total of 7 studies used self-prepared databases. Frames from video sequences were the most used test objects, as 37 studies output frame-level pain intensity, while few measure pain intensity from video sequences or photos. It was common that a study redefined pain levels to have fewer classes than ground-truth labels. For model validation, cross-validation and leave-one-subject-out validation were commonly used. Only 3 studies performed external validation. For reporting test accuracies, different evaluation metrics were used, including sensitivity, specificity, mean absolute error (MAE), mean standard error (MSE), Pearson correlation coefficient (PCC), and intraclass coefficient (ICC).</p>
      </sec>
      <sec>
        <title>Methodological Quality of Included Studies</title>
        <p>Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> presents the study quality summary, as assessed by QUADAS-2. There was a risk of bias in all studies, specifically in terms of patient selection, caused by 2 issues. First, the training data are highly imbalanced, and any method to adjust the data distribution may introduce bias. Next, the QUADAS-AI correspondence letter [<xref ref-type="bibr" rid="ref19">19</xref>] specifies that preprocessing of images that changes the image size or resolution may introduce bias. However, the applicability concern is low, as the images properly represent the feeling of pain. Studies that used cross-fold validation or leave-one-out cross-validation were considered to have a low risk of bias. Although the Prkachin and Solomon pain intensity (PSPI) score was used by most of the studies, its ability to represent individual pain levels was not clinically validated; as such, the risk of bias and applicability concerns were considered high when the PSPI score was used as the index test. As an advantage of computer vision techniques, the time interval between the index tests was short and was assessed as having a low risk of bias. Risk proportions are shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. For all 315 entries, 39% (124) were assessed as high-risk. In total, 5 studies had the lowest risk of bias, with 6 domains assessed as low risk [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref59">59</xref>].</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Risk of bias and applicability proportions. QUADAS-2: Quality Assessment of Diagnostic Accuracy Studies 2.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e51250_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Pooled Performance of Included Models</title>
        <p>In 6 studies included in the meta-analysis, there were 8 different models. The characteristics of these models are summarized in Table S1 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Classification of PSPI scores greater than 0, 2, 3, 6, and 9 was selected and considered as different tasks to create contingency tables. The test performance is shown in <xref rid="figure3" ref-type="fig">Figure 3</xref> as hierarchical SROC curves; 27 contingency tables were extracted from 8 models. The sensitivity, specificity, and LDOR were calculated, and the combined sensitivity was 98% (95% CI 96%-99%), the specificity was 98% (95% CI 97%-99%), the LDOR was 7.99 (95% CI 6.73-9.31) and the AUC was 0.99 (95% CI 0.99-1).</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Summary receiver operating characteristic (SROC) curve plots of the summarized results.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e51250_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Subgroup Analysis</title>
        <p>In this study, subgroup analysis was conducted to investigate the performance differences within models. A total of 8 models were separated and summarized as a forest plot in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. For model 1, the pooled sensitivity, specificity, and LDOR were 95% (95% CI 86%-99%), 99% (95% CI 98%-100%), and 8.38 (95% CI 6.09-11.19), respectively. For model 2, the pooled sensitivity, specificity, and LDOR were 94% (95% CI 84%-99%), 95% (95% CI 88%-99%), and 6.23 (95% CI 3.52-9.04), respectively. For model 3, the pooled sensitivity, specificity, and LDOR were 100% (95% CI 99%-100%), 100% (95% CI 99%-100%), and 11.55% (95% CI 8.82-14.43), respectively. For model 4, the pooled sensitivity, specificity, and LDOR were 83% (95% CI 43%-99%), 94% (95% CI 79%-99%), and 5.14 (95% CI 0.93-9.31), respectively. For model 5, the pooled sensitivity, specificity, and LDOR were 92% (95% CI 68%-99%), 94% (95% CI 78%-99%), and 6.12 (95% CI 1.82-10.16), respectively. For model 6, the pooled sensitivity, specificity, and LDOR were 94% (95% CI 74%-100%), 94% (95% CI 78%-99%), and 6.59 (95% CI 2.21-11.13), respectively. For model 7, the pooled sensitivity, specificity, and LDOR were 98% (95% CI 90%-100%), 97% (95% CI 87%-100%), and 8.31 (95% CI 4.3-12.29), respectively. For model 8, the pooled sensitivity, specificity, and LDOR were 98% (95% CI 93%-100%), 97% (95% CI 88%-100%), and 8.65 (95% CI 4.84-12.67), respectively.</p>
      </sec>
      <sec>
        <title>Heterogeneity Analysis</title>
        <p>The meta-analysis results indicated that AI models are applicable for estimating pain intensity from facial images. However, extreme heterogeneity existed within the models except for models 3 and 5, which were proposed by Rathee and Ganotra [<xref ref-type="bibr" rid="ref24">24</xref>] and Semwal and Londhe [<xref ref-type="bibr" rid="ref32">32</xref>]. A funnel plot is presented in <xref rid="figure4" ref-type="fig">Figure 4</xref>. A high risk of bias was observed.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>In the funnel plot of the test results, significant heterogeneity was observed. DOR: diagnostic odds ratio; LDOR: log diagnostic odds ratio.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e51250_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>Pain management has long been a critical problem in clinical practice, and the use of AI may be a solution. For acute pain management, automatic measurement of pain can reduce the burden on caregivers and provide timely warnings. For chronic pain management, as specified by Glare et al [<xref ref-type="bibr" rid="ref2">2</xref>], further research is needed, and measurements of pain presence, intensity, and quality are one of the issues to be solved for chronic pain studies. Computer vision could improve pain monitoring through real-time detection for clinical use and data recording for prospective pain studies. To our knowledge, this is the first meta-analysis dedicated to AI performance in multilevel pain level classification.</p>
      <p>In this study, one model’s performance at specific pain levels was described by stacking multiple classes into one to make each task a binary classification problem. After careful selection in both the medical and engineering databases, we observed promising results of AI in evaluating multilevel pain intensity through facial images, with high sensitivity (98%), specificity (98%), LDOR (7.99), and AUC (0.99). It is reasonable to believe that AI can accurately evaluate pain intensity from facial images. Moreover, the study quality and risk of bias were evaluated using an adapted QUADAS-2 assessment tool, which is a strength of this study.</p>
      <p>To investigate the source of heterogeneity, it was assumed that a well-designed model should have familiar size effects regarding different levels, and a subgroup meta-analysis was conducted. The funnel and forest plots exhibited extreme heterogeneity. The model’s performance at specific pain levels was described and summarized by a forest plot. Within-model heterogeneity was observed in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref57">57</xref>] except for 2 models. Models 3 and 5 were different in many aspects, including their algorithms and validation methods, but were both trained with a relatively small data set, and the proportion of positive and negative classes was relatively close to 1. Because training with imbalanced data is a critical problem in computer vision studies [<xref ref-type="bibr" rid="ref66">66</xref>], for example, in the University of Northern British Columbia-McMaster pain data set, fewer than 10 frames out of 48,398 had a PSPI score greater than 13. Here, we emphasized that imbalanced data sets are one major cause of heterogeneity, resulting in the poorer performance of AI algorithms.</p>
      <p>We tentatively propose a method to minimize the effect of training with imbalanced data by stacking multiple classes into one class, which is already presented in studies included in the systematic review [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Common methods to minimize bias include resampling and data augmentation [<xref ref-type="bibr" rid="ref66">66</xref>]. This proposed method is used in the meta-analysis to compare the test results of different studies as well. The stacking method is available when classes are only different in intensity. A disadvantage of combined classes is that the model would be insufficient in clinical practice when the number of classes is low. Commonly used pain evaluation tools, such as VAS, have 10 discrete levels. It is recommended that future studies set the number of pain levels to be at least 10 for model training.</p>
      <p>This study is limited for several reasons. First, insufficient data were included because different performance metrics (mean standard error and mean average error) were used in most studies, which could not be summarized into a contingency table. To create a contingency table that can be included in a meta-analysis, the study should report the following: the number of objects used in each pain class for model validation, and the accuracy, sensitivity, specificity, and <italic>F</italic><sub>1</sub>-score for each pain class. This table cannot be created if a study reports the MAE, PCC, and other commonly used metrics in AI development. Second, a small study effect was observed in the funnel plot, and the heterogeneity could not be minimized. Another limitation is that the PSPI score is not clinically validated and is not the only tool that assesses pain from facial expressions. There are other clinically validated pain intensity assessment methods, such as the Faces Pain Scale-revised, Wong-Baker Faces Pain Rating Scale, and Oucher Scale [<xref ref-type="bibr" rid="ref3">3</xref>]. More databases could be created based on the above-mentioned tools. Finally, AI-assisted pain assessments were supposed to cover larger populations, including incommunicable patients, for example, patients with dementia or patients with masked faces. However, only 1 study considered patients with dementia, which was also caused by limited databases [<xref ref-type="bibr" rid="ref50">50</xref>].</p>
      <p>AI is a promising tool that can help in pain research in the future. In this systematic review and meta-analysis, one approach using computer vision was investigated to measure pain intensity from facial images. Despite some risk of bias and applicability concerns, CV models can achieve excellent test accuracy. Finally, more CV studies in pain estimation, reporting accuracy in contingency tables, and more pain databases are encouraged for future studies. Specifically, the creation of a balanced public database that contains not only healthy but also nonhealthy participants should be prioritized. The recording process would be better in a clinical environment. Then, it is recommended that researchers report the validation results in terms of accuracy, sensitivity, specificity, or contingency tables, as well as the number of objects for each pain class, for the inclusion of a meta-analysis.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>PRISMA checklist, risk of bias summary, search strategy, database summary and reported items and explanations.</p>
        <media xlink:href="jmir_v26i1e51250_app1.doc" xlink:title="DOC File , 154 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Study performance summary.</p>
        <media xlink:href="jmir_v26i1e51250_app2.xls" xlink:title="XLS File  (Microsoft Excel File), 35 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Forest plot presenting pooled performance of subgroups in meta-analysis.</p>
        <media xlink:href="jmir_v26i1e51250_app3.png" xlink:title="PNG File , 257 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CV</term>
          <def>
            <p>computer vision</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DF</term>
          <def>
            <p>deep feature</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">GF</term>
          <def>
            <p>geometrical feature</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ICC</term>
          <def>
            <p>intraclass correlation coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">LDOR</term>
          <def>
            <p>log diagnostic odds ratio</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MAE</term>
          <def>
            <p>mean absolute error</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">NRS</term>
          <def>
            <p>numerical rating scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">PCC</term>
          <def>
            <p>Pearson correlation coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">PRISMA</term>
          <def>
            <p>Preferred Reporting Items for Systematic Review and Meta-Analysis</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">PROSPERO</term>
          <def>
            <p>International Prospective Register of Systematic Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">PSPI</term>
          <def>
            <p>Prkachin and Solomon pain intensity</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">QUADAS-2</term>
          <def>
            <p>Quality Assessment of Diagnostic Accuracy Studies 2</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">SROC</term>
          <def>
            <p>summary receiver operating characteristic</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb17">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb18">VAS</term>
          <def>
            <p>visual analog scale</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>WL, AH, and CW contributed to the literature search and data extraction. JH and YY wrote the first draft of the manuscript. All authors contributed to the conception and design of the study, the risk of bias evaluation, data analysis and interpretation, and contributed to and approved the final version of the manuscript.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The data sets generated during and analyzed during this study are available in the Figshare repository [<xref ref-type="bibr" rid="ref67">67</xref>].</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Raja</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Carr</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Finnerup</surname>
              <given-names>NB</given-names>
            </name>
            <name name-style="western">
              <surname>Flor</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gibson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Keefe</surname>
              <given-names>FJ</given-names>
            </name>
            <name name-style="western">
              <surname>Mogil</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Ringkamp</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sluka</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Stevens</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sullivan</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Tutelman</surname>
              <given-names>PR</given-names>
            </name>
            <name name-style="western">
              <surname>Ushida</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vader</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The revised International Association for the Study of Pain definition of pain: concepts, challenges, and compromises</article-title>
          <source>Pain</source>
          <year>2020</year>
          <volume>161</volume>
          <issue>9</issue>
          <fpage>1976</fpage>
          <lpage>1982</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32694387"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/j.pain.0000000000001939</pub-id>
          <pub-id pub-id-type="medline">32694387</pub-id>
          <pub-id pub-id-type="pii">00006396-202009000-00006</pub-id>
          <pub-id pub-id-type="pmcid">PMC7680716</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glare</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Aubrey</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Myles</surname>
              <given-names>PS</given-names>
            </name>
          </person-group>
          <article-title>Transition from acute to chronic pain after surgery</article-title>
          <source>Lancet</source>
          <year>2019</year>
          <volume>393</volume>
          <issue>10180</issue>
          <fpage>1537</fpage>
          <lpage>1546</lpage>
          <pub-id pub-id-type="doi">10.1016/S0140-6736(19)30352-6</pub-id>
          <pub-id pub-id-type="medline">30983589</pub-id>
          <pub-id pub-id-type="pii">S0140-6736(19)30352-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gordon</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>de Leon-Casasola</surname>
              <given-names>OA</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenberg</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Bickler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cassidy</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Chittenden</surname>
              <given-names>EH</given-names>
            </name>
            <name name-style="western">
              <surname>Degenhardt</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Griffith</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Manworren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>McCarberg</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Montgomery</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Perkal</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Suresh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sluka</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Strassels</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Thirlby</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Viscusi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Walco</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Warner</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Weisman</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CL</given-names>
            </name>
          </person-group>
          <article-title>Management of postoperative pain: a clinical practice guideline from the American Pain Society, the American Society of Regional Anesthesia and Pain Medicine, and the American Society of Anesthesiologists' Committee on Regional Anesthesia, Executive Committee, and Administrative Council</article-title>
          <source>J Pain</source>
          <year>2016</year>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>131</fpage>
          <lpage>157</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1526-5900(15)00995-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jpain.2015.12.008</pub-id>
          <pub-id pub-id-type="medline">26827847</pub-id>
          <pub-id pub-id-type="pii">S1526-5900(15)00995-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Seus</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wollenberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Weitz</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kunz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lautenbacher</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Garbas</surname>
              <given-names>JU</given-names>
            </name>
            <name name-style="western">
              <surname>Schmid</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <article-title>Automatic detection of pain from facial expressions: a survey</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2021</year>
          <volume>43</volume>
          <issue>6</issue>
          <fpage>1815</fpage>
          <lpage>1831</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2019.2958341</pub-id>
          <pub-id pub-id-type="medline">31825861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mussigmann</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bardel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lefaucheur</surname>
              <given-names>JP</given-names>
            </name>
          </person-group>
          <article-title>Resting-State Electroencephalography (EEG) biomarkers of chronic neuropathic pain. A systematic review</article-title>
          <source>Neuroimage</source>
          <year>2022</year>
          <volume>258</volume>
          <fpage>119351</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1053-8119(22)00470-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.neuroimage.2022.119351</pub-id>
          <pub-id pub-id-type="medline">35659993</pub-id>
          <pub-id pub-id-type="pii">S1053-8119(22)00470-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moscato</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cortelli</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chiari</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Physiological responses to pain in cancer patients: a systematic review</article-title>
          <source>Comput Methods Programs Biomed</source>
          <year>2022</year>
          <volume>217</volume>
          <fpage>106682</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0169-2607(22)00067-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cmpb.2022.106682</pub-id>
          <pub-id pub-id-type="medline">35172252</pub-id>
          <pub-id pub-id-type="pii">S0169-2607(22)00067-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thiam</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hihn</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Kestler</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Schwenker</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Multi-modal pain intensity assessment based on physiological signals: a deep learning perspective</article-title>
          <source>Front Physiol</source>
          <year>2021</year>
          <volume>12</volume>
          <fpage>720464</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34539444"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fphys.2021.720464</pub-id>
          <pub-id pub-id-type="medline">34539444</pub-id>
          <pub-id pub-id-type="pmcid">PMC8440852</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rojas</surname>
              <given-names>RF</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Waddington</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Goecke</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A systematic review of neurophysiological sensing for the assessment of acute pain</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>76</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00810-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00810-1</pub-id>
          <pub-id pub-id-type="medline">37100924</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00810-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC10133304</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mansutti</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Tomé-Pires</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chiappinotto</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Palese</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Facilitating pain assessment and communication in people with deafness: a systematic review</article-title>
          <source>BMC Public Health</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>1594</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcpublichealth.biomedcentral.com/articles/10.1186/s12889-023-16535-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12889-023-16535-5</pub-id>
          <pub-id pub-id-type="medline">37608263</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12889-023-16535-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC10464447</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>El-Tallawy</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Nagiub</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Pain management in the most vulnerable intellectual disability: a review</article-title>
          <source>Pain Ther</source>
          <year>2023</year>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>939</fpage>
          <lpage>961</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37284926"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s40122-023-00526-w</pub-id>
          <pub-id pub-id-type="medline">37284926</pub-id>
          <pub-id pub-id-type="pii">10.1007/s40122-023-00526-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC10290021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gkikas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tsiknakis</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic assessment of pain based on deep learning methods: a systematic review</article-title>
          <source>Comput Methods Programs Biomed</source>
          <year>2023</year>
          <volume>231</volume>
          <fpage>107365</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0169-2607(23)00032-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cmpb.2023.107365</pub-id>
          <pub-id pub-id-type="medline">36764062</pub-id>
          <pub-id pub-id-type="pii">S0169-2607(23)00032-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Borna</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Haider</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Maita</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Torres</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Avila</surname>
              <given-names>FR</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>De Sario Velasquez</surname>
              <given-names>GD</given-names>
            </name>
            <name name-style="western">
              <surname>McLeod</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bruce</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>RE</given-names>
            </name>
            <name name-style="western">
              <surname>Forte</surname>
              <given-names>AJ</given-names>
            </name>
          </person-group>
          <article-title>A review of voice-based pain detection in adults using artificial intelligence</article-title>
          <source>Bioengineering (Basel)</source>
          <year>2023</year>
          <volume>10</volume>
          <issue>4</issue>
          <fpage>500</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bioengineering10040500"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bioengineering10040500</pub-id>
          <pub-id pub-id-type="medline">37106687</pub-id>
          <pub-id pub-id-type="pii">bioengineering10040500</pub-id>
          <pub-id pub-id-type="pmcid">PMC10135816</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Sario</surname>
              <given-names>GD</given-names>
            </name>
            <name name-style="western">
              <surname>Haider</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Maita</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Torres-Guzman</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Emam</surname>
              <given-names>OS</given-names>
            </name>
            <name name-style="western">
              <surname>Avila</surname>
              <given-names>FR</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Borna</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McLeod</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bruce</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>RE</given-names>
            </name>
            <name name-style="western">
              <surname>Forte</surname>
              <given-names>AJ</given-names>
            </name>
          </person-group>
          <article-title>Using AI to detect pain through facial expressions: a review</article-title>
          <source>Bioengineering (Basel)</source>
          <year>2023</year>
          <volume>10</volume>
          <issue>5</issue>
          <fpage>548</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bioengineering10050548"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bioengineering10050548</pub-id>
          <pub-id pub-id-type="medline">37237618</pub-id>
          <pub-id pub-id-type="pii">bioengineering10050548</pub-id>
          <pub-id pub-id-type="pmcid">PMC10215219</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Herr</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Demir</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez</surname>
              <given-names>KD</given-names>
            </name>
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>NC</given-names>
            </name>
          </person-group>
          <article-title>Using artificial intelligence to improve pain assessment and pain management: a scoping review</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2023</year>
          <volume>30</volume>
          <issue>3</issue>
          <fpage>570</fpage>
          <lpage>587</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36458955"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocac231</pub-id>
          <pub-id pub-id-type="medline">36458955</pub-id>
          <pub-id pub-id-type="pii">6865111</pub-id>
          <pub-id pub-id-type="pmcid">PMC9933069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hughes</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Chivers</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hoti</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The clinical suitability of an artificial intelligence-enabled pain assessment tool for use in infants: feasibility and usability evaluation study</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e41992</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e41992/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/41992</pub-id>
          <pub-id pub-id-type="medline">36780223</pub-id>
          <pub-id pub-id-type="pii">v25i1e41992</pub-id>
          <pub-id pub-id-type="pmcid">PMC9972204</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-guided postoperative pain assessment in children</article-title>
          <source>Pain</source>
          <year>2023</year>
          <volume>164</volume>
          <issue>9</issue>
          <fpage>2029</fpage>
          <lpage>2035</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37146182"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/j.pain.0000000000002900</pub-id>
          <pub-id pub-id-type="medline">37146182</pub-id>
          <pub-id pub-id-type="pii">00006396-990000000-00299</pub-id>
          <pub-id pub-id-type="pmcid">PMC10436358</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Page</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>McKenzie</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Boutron</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffmann</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Mulrow</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Shamseer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tetzlaff</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Chou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Glanville</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Grimshaw</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Hróbjartsson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lalu</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Loder</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Mayo-Wilson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>McDonald</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McGuinness</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Welch</surname>
              <given-names>VA</given-names>
            </name>
            <name name-style="western">
              <surname>Whiting</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title>
          <source>BMJ</source>
          <year>2021</year>
          <volume>372</volume>
          <fpage>n71</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.bmj.com/lookup/pmidlookup?view=long&amp;pmid=33782057"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id>
          <pub-id pub-id-type="medline">33782057</pub-id>
          <pub-id pub-id-type="pmcid">PMC8005924</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Whiting</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Rutjes</surname>
              <given-names>AWS</given-names>
            </name>
            <name name-style="western">
              <surname>Westwood</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Mallett</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Deeks</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Reitsma</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Leeflang</surname>
              <given-names>MMG</given-names>
            </name>
            <name name-style="western">
              <surname>Sterne</surname>
              <given-names>JAC</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PMM</given-names>
            </name>
          </person-group>
          <article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title>
          <source>Ann Intern Med</source>
          <year>2011</year>
          <volume>155</volume>
          <issue>8</issue>
          <fpage>529</fpage>
          <lpage>536</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/abs/10.7326/0003-4819-155-8-201110180-00009?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id>
          <pub-id pub-id-type="medline">22007046</pub-id>
          <pub-id pub-id-type="pii">155/8/529</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sounderajah</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ashrafian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>NH</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Golub</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kahn</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Esteva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Karthikesalingam</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mateen</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Milea</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Treanor</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cushnan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>McPherson</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Glocker</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Greaves</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Harling</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ordish</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Deeks</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Leeflang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Diamond</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McInnes</surname>
              <given-names>MDF</given-names>
            </name>
            <name name-style="western">
              <surname>McCradden</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abràmoff</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Normahani</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Markar</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Mallett</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shetty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Denniston</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Whiting</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Darzi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A quality assessment tool for artificial intelligence-centered diagnostic test accuracy studies: QUADAS-AI</article-title>
          <source>Nat Med</source>
          <year>2021</year>
          <volume>27</volume>
          <issue>10</issue>
          <fpage>1663</fpage>
          <lpage>1665</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.nature.com/articles/s41591-021-01517-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41591-021-01517-0</pub-id>
          <pub-id pub-id-type="medline">34635854</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-021-01517-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Riebler</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>meta4diag: Bayesian bivariate meta-analysis of diagnostic test studies for routine practice</article-title>
          <source>J Stat Soft</source>
          <year>2018</year>
          <volume>83</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.18637/jss.v083.i01</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hammal</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cohn</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>Automatic detection of pain intensity</article-title>
          <source>Proc ACM Int Conf Multimodal Interact</source>
          <year>2012</year>
          <volume>2012</volume>
          <fpage>47</fpage>
          <lpage>52</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32724903"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/2388676.2388688</pub-id>
          <pub-id pub-id-type="medline">32724903</pub-id>
          <pub-id pub-id-type="pmcid">PMC7385931</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adibuzzaman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ostberg</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ahamed</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Povinelli</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sindhu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Love</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kawsar</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ahsan</surname>
              <given-names>GMT</given-names>
            </name>
          </person-group>
          <article-title>Assessment of pain using facial pictures taken with a smartphone</article-title>
          <year>2015</year>
          <conf-name>2015 IEEE 39th Annual Computer Software and Applications Conference</conf-name>
          <conf-date>July 01-05, 2015</conf-date>
          <conf-loc>Taichung, Taiwan</conf-loc>
          <fpage>726</fpage>
          <lpage>731</lpage>
          <pub-id pub-id-type="doi">10.1109/compsac.2015.150</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Majumder</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Behera</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Subramanian</surname>
              <given-names>VK</given-names>
            </name>
          </person-group>
          <article-title>Shoulder pain intensity recognition using Gaussian mixture models</article-title>
          <year>2015</year>
          <conf-name>2015 IEEE International WIE Conference on Electrical and Computer Engineering (WIECON-ECE)</conf-name>
          <conf-date>December 19-20, 2015</conf-date>
          <conf-loc>Dhaka, Bangladesh</conf-loc>
          <fpage>130</fpage>
          <lpage>134</lpage>
          <pub-id pub-id-type="doi">10.1109/wiecon-ece.2015.7444016</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rathee</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ganotra</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A novel approach for pain intensity detection based on facial feature deformations</article-title>
          <source>J Vis Commun Image Represent</source>
          <year>2015</year>
          <volume>33</volume>
          <fpage>247</fpage>
          <lpage>254</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvcir.2015.09.007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sikka</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Diaz</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Goodwin</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Craig</surname>
              <given-names>KD</given-names>
            </name>
            <name name-style="western">
              <surname>Bartlett</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>JS</given-names>
            </name>
          </person-group>
          <article-title>Automated assessment of children's postoperative pain using computer vision</article-title>
          <source>Pediatrics</source>
          <year>2015</year>
          <volume>136</volume>
          <issue>1</issue>
          <fpage>e124</fpage>
          <lpage>e131</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/26034245"/>
          </comment>
          <pub-id pub-id-type="doi">10.1542/peds.2015-0029</pub-id>
          <pub-id pub-id-type="medline">26034245</pub-id>
          <pub-id pub-id-type="pii">peds.2015-0029</pub-id>
          <pub-id pub-id-type="pmcid">PMC4485009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rathee</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ganotra</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Multiview distance metric learning on facial feature descriptors for automatic pain intensity detection</article-title>
          <source>Comput Vis Image Und</source>
          <year>2016</year>
          <volume>147</volume>
          <fpage>77</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cviu.2015.12.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Recurrent convolutional neural network regression for continuous pain intensity estimation in video</article-title>
          <year>2016</year>
          <conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</conf-name>
          <conf-date>June 26-July 01, 2016</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvprw.2016.191</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Egede</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Valstar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Martinez</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Fusing deep learned and hand-crafted features of appearance, shape, and dynamics for automatic pain estimation</article-title>
          <year>2017</year>
          <conf-name>2017 12th IEEE International Conference on Automatic Face &amp; Gesture Recognition (FG 2017)</conf-name>
          <conf-date>May 30-June 03, 2017</conf-date>
          <conf-loc>Washington, DC</conf-loc>
          <fpage>689</fpage>
          <lpage>696</lpage>
          <pub-id pub-id-type="doi">10.1109/fg.2017.87</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Martinez</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Rudovic</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Personalized automatic estimation of self-reported pain intensity from facial expressions</article-title>
          <year>2017</year>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</conf-name>
          <conf-date>July 21-26, 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>2318</fpage>
          <lpage>2327</lpage>
          <pub-id pub-id-type="doi">10.1109/cvprw.2017.286</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bourou</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pampouchidou</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tsiknakis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Marias</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Simos</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Video-based pain level assessment: feature selection and inter-subject variability modeling</article-title>
          <year>2018</year>
          <conf-name>2018 41st International Conference on Telecommunications and Signal Processing (TSP)</conf-name>
          <conf-date>July 04-06, 2018</conf-date>
          <conf-loc>Athens, Greece</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/tsp.2018.8441252</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haque</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Bautista</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Noroozi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kulkarni</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Laursen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Irani</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Deep multimodal pain recognition: a database and comparison of spatio-temporal visual modalities</article-title>
          <year>2018</year>
          <conf-name>2018 13th IEEE International Conference on Automatic Face &amp; Gesture Recognition (FG 2018)</conf-name>
          <conf-date>May 15-19, 2018</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <fpage>250</fpage>
          <lpage>257</lpage>
          <pub-id pub-id-type="doi">10.1109/fg.2018.00044</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Semwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Londhe</surname>
              <given-names>ND</given-names>
            </name>
          </person-group>
          <article-title>Automated pain severity detection using convolutional neural network</article-title>
          <year>2018</year>
          <conf-name>2018 International Conference on Computational Techniques, Electronics and Mechanical Systems (CTEMS)</conf-name>
          <conf-date>December 21-22, 2018</conf-date>
          <conf-loc>Belgaum, India</conf-loc>
          <fpage>66</fpage>
          <lpage>70</lpage>
          <pub-id pub-id-type="doi">10.1109/ctems.2018.8769123</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tavakolian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hadid</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep binary representation of facial expressions: a novel framework for automatic pain intensity recognition</article-title>
          <year>2018</year>
          <conf-name>2018 25th IEEE International Conference on Image Processing (ICIP)</conf-name>
          <conf-date>October 07-10, 2018</conf-date>
          <conf-loc>Athens, Greece</conf-loc>
          <fpage>1952</fpage>
          <lpage>1956</lpage>
          <pub-id pub-id-type="doi">10.1109/icip.2018.8451681</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tavakolian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hadid</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep spatiotemporal representation of the face for automatic pain intensity estimation</article-title>
          <year>2018</year>
          <conf-name>2018 24th International Conference on Pattern Recognition (ICPR)</conf-name>
          <conf-date>August 20-24, 2018</conf-date>
          <conf-loc>Beijing, China</conf-loc>
          <fpage>350</fpage>
          <lpage>354</lpage>
          <pub-id pub-id-type="doi">10.1109/icpr.2018.8545324</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Pain intensity estimation using deep spatiotemporal and handcrafted features</article-title>
          <source>IEICE Trans Inf &amp; Syst</source>
          <year>2018</year>
          <volume>E101.D</volume>
          <issue>6</issue>
          <fpage>1572</fpage>
          <lpage>1580</lpage>
          <pub-id pub-id-type="doi">10.1587/transinf.2017edp7318</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bargshady</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Soar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Deo</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Whittaker</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A joint deep neural network model for pain recognition from face</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE 4th International Conference on Computer and Communication Systems (ICCCS)</conf-name>
          <conf-date>February 23-25, 2019</conf-date>
          <conf-loc>Singapore</conf-loc>
          <fpage>52</fpage>
          <lpage>56</lpage>
          <pub-id pub-id-type="doi">10.1109/ccoms.2019.8821779</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casti</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mencattini</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Comes</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Callari</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Di Giuseppe</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Natoli</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dauri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Daprati</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Martinelli</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Calibration of vision-based measurement of pain intensity with multiple expert observers</article-title>
          <source>IEEE Trans Instrum Meas</source>
          <year>2019</year>
          <volume>68</volume>
          <issue>7</issue>
          <fpage>2442</fpage>
          <lpage>2450</lpage>
          <pub-id pub-id-type="doi">10.1109/tim.2019.2909603</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>CW</given-names>
            </name>
          </person-group>
          <article-title>Facial pain intensity estimation for ICU patient with partial occlusion coming from treatment</article-title>
          <year>2019</year>
          <conf-name>BIBE 2019; The Third International Conference on Biological Information and Biomedical Engineering</conf-name>
          <conf-date>June 20-22, 2019</conf-date>
          <conf-loc>Hangzhou, China</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saha</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Ahsan</surname>
              <given-names>GMT</given-names>
            </name>
            <name name-style="western">
              <surname>Gani</surname>
              <given-names>MO</given-names>
            </name>
            <name name-style="western">
              <surname>Ahamed</surname>
              <given-names>SI</given-names>
            </name>
          </person-group>
          <article-title>Personalized pain study platform using evidence-based continuous learning tool</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)</conf-name>
          <conf-date>July 15-19, 2019</conf-date>
          <conf-loc>Milwaukee, WI</conf-loc>
          <fpage>490</fpage>
          <lpage>495</lpage>
          <pub-id pub-id-type="doi">10.1109/compsac.2019.10254</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tavakolian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hadid</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A spatiotemporal convolutional neural network for automatic pain intensity estimation from facial dynamics</article-title>
          <source>Int J Comput Vis</source>
          <year>2019</year>
          <volume>127</volume>
          <issue>10</issue>
          <fpage>1413</fpage>
          <lpage>1425</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1007/s11263-019-01191-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11263-019-01191-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bargshady</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Deo</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Soar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Whittaker</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Ensemble neural network approach detecting pain intensity from facial expressions</article-title>
          <source>Artif Intell Med</source>
          <year>2020</year>
          <volume>109</volume>
          <fpage>101954</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2020.101954</pub-id>
          <pub-id pub-id-type="medline">34756219</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(20)31219-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bargshady</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Deo</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Soar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Whittaker</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Enhanced deep learning algorithm development to detect pain intensity from facial expression images</article-title>
          <source>Expert Syst Appl</source>
          <year>2020</year>
          <volume>149</volume>
          <fpage>113305</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2020.113305</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dragomir</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Florea</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pupezescu</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Automatic subject independent pain intensity estimation using a deep learning approach</article-title>
          <year>2020</year>
          <conf-name>2020 International Conference on e-Health and Bioengineering (EHB)</conf-name>
          <conf-date>October 29-30, 2020</conf-date>
          <conf-loc>Iasi, Romania</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1109/ehb50910.2020.9280190</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Mwesigye</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Pain-attentive network: a deep spatio-temporal attention model for pain estimation</article-title>
          <source>Multimed Tools Appl</source>
          <year>2020</year>
          <volume>79</volume>
          <issue>37-38</issue>
          <fpage>28329</fpage>
          <lpage>28354</lpage>
          <pub-id pub-id-type="doi">10.1007/s11042-020-09397-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mallol-Ragolta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A curriculum learning approach for pain intensity recognition from facial expressions</article-title>
          <year>2020</year>
          <conf-name>2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)</conf-name>
          <conf-date>November 16-20, 2020</conf-date>
          <conf-loc>Buenos Aires, Argentina</conf-loc>
          <fpage>829</fpage>
          <lpage>833</lpage>
          <pub-id pub-id-type="doi">10.1109/fg47880.2020.00083</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Pain intensity recognition via multi‐scale deep network</article-title>
          <source>IET Image Process</source>
          <year>2020</year>
          <volume>14</volume>
          <issue>8</issue>
          <fpage>1645</fpage>
          <lpage>1652</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ietresearch.onlinelibrary.wiley.com/doi/10.1049/iet-ipr.2019.1448"/>
          </comment>
          <pub-id pub-id-type="doi">10.1049/iet-ipr.2019.1448</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tavakolian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Self-supervised pain intensity estimation from facial videos via statistical spatiotemporal distillation</article-title>
          <source>Pattern Recognit Lett</source>
          <year>2020</year>
          <volume>140</volume>
          <fpage>26</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1016/j.patrec.2020.09.012</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>de Sa</surname>
              <given-names>VR</given-names>
            </name>
          </person-group>
          <article-title>Exploring multidimensional measurements for pain evaluation using facial action units</article-title>
          <year>2020</year>
          <conf-name>2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)</conf-name>
          <conf-date>November 16-20, 2020</conf-date>
          <conf-loc>Buenos Aires, Argentina</conf-loc>
          <fpage>786</fpage>
          <lpage>792</lpage>
          <pub-id pub-id-type="doi">10.1109/fg47880.2020.00087</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pikulkaew</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Boonchieng</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Boonchieng</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chouvatut</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>2D facial expression and movement of motion for pain identification with deep learning methods</article-title>
          <source>IEEE Access</source>
          <year>2021</year>
          <volume>9</volume>
          <fpage>109903</fpage>
          <lpage>109914</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2021.3101396</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rezaei</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moturu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Prkachin</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjistavropoulos</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Taati</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Unobtrusive pain monitoring in older adults with dementia using pairwise and contrastive training</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2021</year>
          <volume>25</volume>
          <issue>5</issue>
          <fpage>1450</fpage>
          <lpage>1462</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2020.3045743</pub-id>
          <pub-id pub-id-type="medline">33338024</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Semwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Londhe</surname>
              <given-names>ND</given-names>
            </name>
          </person-group>
          <article-title>S-PANET: a shallow convolutional neural network for pain severity assessment in uncontrolled environment</article-title>
          <year>2021</year>
          <conf-name>2021 IEEE 11th Annual Computing and Communication Workshop and Conference (CCWC)</conf-name>
          <conf-date>January 27-30, 2021</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <fpage>0800</fpage>
          <lpage>0806</lpage>
          <pub-id pub-id-type="doi">10.1109/ccwc51732.2021.9376052</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Semwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Londhe</surname>
              <given-names>ND</given-names>
            </name>
          </person-group>
          <article-title>ECCNet: an ensemble of compact convolution neural network for pain severity assessment from face images</article-title>
          <year>2021</year>
          <conf-name>2021 11th International Conference on Cloud Computing, Data Science &amp; Engineering (Confluence)</conf-name>
          <conf-date>January 28-29, 2021</conf-date>
          <conf-loc>Noida, India</conf-loc>
          <fpage>761</fpage>
          <lpage>766</lpage>
          <pub-id pub-id-type="doi">10.1109/confluence51648.2021.9377197</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szczapa</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Daoudi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Berretti</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pala</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Del Bimbo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hammal</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Automatic estimation of self-reported pain by interpretable representations of motion dynamics</article-title>
          <year>2021</year>
          <conf-name>2020 25th International Conference on Pattern Recognition (ICPR)</conf-name>
          <conf-date>January 10-15, 2021</conf-date>
          <conf-loc>Milan, Italy</conf-loc>
          <fpage>2544</fpage>
          <lpage>2550</lpage>
          <pub-id pub-id-type="doi">10.1109/icpr48806.2021.9412292</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>YC</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Tsai</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>CH</given-names>
            </name>
          </person-group>
          <article-title>Distance ordering: a deep supervised metric learning for pain intensity estimation</article-title>
          <year>2021</year>
          <conf-name>2021 20th IEEE International Conference on Machine Learning and Applications (ICMLA)</conf-name>
          <conf-date>December 13-16, 2021</conf-date>
          <conf-loc>Pasadena, CA</conf-loc>
          <fpage>1083</fpage>
          <lpage>1088</lpage>
          <pub-id pub-id-type="doi">10.1109/icmla52953.2021.00177</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Pain expression assessment based on a locality and identity aware network</article-title>
          <source>IET Image Process</source>
          <year>2021</year>
          <volume>15</volume>
          <issue>12</issue>
          <fpage>2948</fpage>
          <lpage>2958</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ietresearch.onlinelibrary.wiley.com/doi/10.1049/ipr2.12282"/>
          </comment>
          <pub-id pub-id-type="doi">10.1049/ipr2.12282</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alghamdi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Alaghband</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Facial expressions based automatic pain assessment system</article-title>
          <source>Appl Sci</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>13</issue>
          <fpage>6423</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/2076-3417/12/13/6423"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/app12136423</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barua</surname>
              <given-names>PD</given-names>
            </name>
            <name name-style="western">
              <surname>Baygin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dogan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Baygin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Arunkumar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Fujita</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tuncer</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Palmer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Azizan</surname>
              <given-names>MMB</given-names>
            </name>
            <name name-style="western">
              <surname>Kadri</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Acharya</surname>
              <given-names>UR</given-names>
            </name>
          </person-group>
          <article-title>Automated detection of pain levels using deep feature extraction from shutter blinds-based dynamic-sized horizontal patches with facial images</article-title>
          <source>Sci Rep</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>17297</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-022-21380-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-022-21380-4</pub-id>
          <pub-id pub-id-type="medline">36241674</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-022-21380-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC9568538</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fontaine</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vielzeuf</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Genestier</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Limeux</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Santucci-Sivilotto</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mory</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Darmon</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lanteri-Minet</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mokhtar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Laine</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vistoli</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence to evaluate postoperative pain based on facial expression recognition</article-title>
          <source>Eur J Pain</source>
          <year>2022</year>
          <volume>26</volume>
          <issue>6</issue>
          <fpage>1282</fpage>
          <lpage>1291</lpage>
          <pub-id pub-id-type="doi">10.1002/ejp.1948</pub-id>
          <pub-id pub-id-type="medline">35352426</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosseini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chuah</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Orooji</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rafatirad</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rafatirad</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Homayoun</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Convolution neural network for pain intensity assessment from facial expression</article-title>
          <year>2022</year>
          <conf-name>2022 44th Annual International Conference of the IEEE Engineering in Medicine &amp; Biology Society (EMBC)</conf-name>
          <conf-date>July 11-15, 2022</conf-date>
          <conf-loc>Glasgow, Scotland</conf-loc>
          <fpage>2697</fpage>
          <lpage>2702</lpage>
          <pub-id pub-id-type="doi">10.1109/embc48229.2022.9871770</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qing</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>HybNet: a hybrid network structure for pain intensity estimation</article-title>
          <source>Vis Comput</source>
          <year>2021</year>
          <volume>38</volume>
          <issue>3</issue>
          <fpage>871</fpage>
          <lpage>882</lpage>
          <pub-id pub-id-type="doi">10.1007/s00371-021-02056-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Islamadina</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Saddami</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Oktiana</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abidin</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Muharar</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Arnia</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Performance of deep learning benchmark models on thermal imagery of pain through facial expressions</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE International Conference on Communication, Networks and Satellite (COMNETSAT)</conf-name>
          <conf-date>November 03-05, 2022</conf-date>
          <conf-loc>Solo, Indonesia</conf-loc>
          <fpage>374</fpage>
          <lpage>379</lpage>
          <pub-id pub-id-type="doi">10.1109/comnetsat56033.2022.9994546</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Swetha</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Praiscia</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Juliet</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Pain assessment model using facial recognition</article-title>
          <year>2022</year>
          <conf-name>2022 6th International Conference on Intelligent Computing and Control Systems (ICICCS)</conf-name>
          <conf-date>May 25-27, 2022</conf-date>
          <conf-loc>Madurai, India</conf-loc>
          <fpage>1</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1109/iciccs53718.2022.9788265</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>SF</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>TL</given-names>
            </name>
            <name name-style="western">
              <surname>Shih</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>CH</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>SFY</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>YS</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Chao</surname>
              <given-names>WC</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based pain classifier based on the facial expression in critically ill patients</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2022</year>
          <volume>9</volume>
          <fpage>851690</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35372435"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2022.851690</pub-id>
          <pub-id pub-id-type="medline">35372435</pub-id>
          <pub-id pub-id-type="pmcid">PMC8968070</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ismail</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Waseem</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Towards a deep learning pain-level detection deployment at UAE for patient-centric-pain management and diagnosis support: framework and performance evaluation</article-title>
          <source>Procedia Comput Sci</source>
          <year>2023</year>
          <volume>220</volume>
          <fpage>339</fpage>
          <lpage>347</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1877-0509(23)00579-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.procs.2023.03.044</pub-id>
          <pub-id pub-id-type="medline">37089761</pub-id>
          <pub-id pub-id-type="pii">S1877-0509(23)00579-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC10110340</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vu</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Beurton-Aimar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Learning to focus on region-of-interests for pain intensity estimation</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)</conf-name>
          <conf-date>January 05-08, 2023</conf-date>
          <conf-loc>Waikoloa Beach, HI</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/fg57933.2023.10042583</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaur</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Pannu</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Malhi</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>A systematic review on imbalanced data challenges in machine learning: applications and solutions</article-title>
          <source>ACM Comput Surv</source>
          <year>2019</year>
          <volume>52</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>36</lpage>
          <pub-id pub-id-type="doi">10.1145/3343440</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="web">
          <article-title>Data for meta-analysis of pain assessment from facial images</article-title>
          <source>Figshare</source>
          <year>2023</year>
          <access-date>2024-03-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://figshare.com/articles/dataset/Data_for_Meta-Analysis_of_Pain_Assessment_from_Facial_Images/24531466/1">https://figshare.com/articles/dataset/Data_for_Meta-Analysis_of_Pain_Assessment_from_Facial_Images/24531466/1</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
