<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<?covid-19-tdm?>
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v23i4e24191</article-id>
      <article-id pub-id-type="pmid">33739930</article-id>
      <article-id pub-id-type="doi">10.2196/24191</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Measuring Stress in Health Professionals Over the Phone Using Automatic Speech Analysis During the COVID-19 Pandemic: Observational Pilot Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Khaleghi</surname>
            <given-names>Ali</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>D'Arcy</surname>
            <given-names>Shona</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>van der Velde</surname>
            <given-names>Enno</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>König</surname>
            <given-names>Alexandra</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Stars Team</institution>
            <institution>Institut national de recherche en informatique et en automatique</institution>
            <addr-line>2004 Route des Lucioles, 06902</addr-line>
            <addr-line>Sophia Antipolis</addr-line>
            <addr-line>Valbonne, 06200</addr-line>
            <country>France</country>
            <phone>33 +33652021156</phone>
            <email>alexandra.konig@inria.fr</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9960-9657</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Riviere</surname>
            <given-names>Kevin</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8757-942X</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Linz</surname>
            <given-names>Nicklas</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5178-3234</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lindsay</surname>
            <given-names>Hali</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0646-3749</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Elbaum</surname>
            <given-names>Julia</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7940-4053</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Fabre</surname>
            <given-names>Roxane</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5670-1322</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Derreumaux</surname>
            <given-names>Alexandre</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2818-0122</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Robert</surname>
            <given-names>Philippe</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0963-1728</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Stars Team</institution>
        <institution>Institut national de recherche en informatique et en automatique</institution>
        <addr-line>Valbonne</addr-line>
        <country>France</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Département de Santé Publique</institution>
        <institution>Centre Hospitalier Universitaire de Nice</institution>
        <institution>Université Côte d’Azur</institution>
        <addr-line>Nice</addr-line>
        <country>France</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>ki elements</institution>
        <addr-line>Saarbrücken</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>German Research Center for Artificial Intelligence (DFKI)</institution>
        <addr-line>Saarbrücken</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>CoBteK (Cognition-Behaviour-Technology) Lab</institution>
        <institution>La Fédération de Recherche Interventions en Santé</institution>
        <institution>Université Côte d’Azur</institution>
        <addr-line>Nice</addr-line>
        <country>France</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Alexandra König <email>alexandra.konig@inria.fr</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>4</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>19</day>
        <month>4</month>
        <year>2021</year>
      </pub-date>
      <volume>23</volume>
      <issue>4</issue>
      <elocation-id>e24191</elocation-id>
      <history>
        <date date-type="received">
          <day>28</day>
          <month>9</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>24</day>
          <month>10</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>13</day>
          <month>11</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>17</day>
          <month>3</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Alexandra König, Kevin Riviere, Nicklas Linz, Hali Lindsay, Julia Elbaum, Roxane Fabre, Alexandre Derreumaux, Philippe Robert. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 19.04.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on http://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2021/4/e24191" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>During the COVID-19 pandemic, health professionals have been directly confronted with the suffering of patients and their families. By making them main actors in the management of this health crisis, they have been exposed to various psychosocial risks (stress, trauma, fatigue, etc). Paradoxically, stress-related symptoms are often underreported in this vulnerable population but are potentially detectable through passive monitoring of changes in speech behavior.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to investigate the use of rapid and remote measures of stress levels in health professionals working during the COVID-19 outbreak. This was done through the analysis of participants’ speech behavior during a short phone call conversation and, in particular, via positive, negative, and neutral storytelling tasks.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Speech samples from 89 health care professionals were collected over the phone during positive, negative, and neutral storytelling tasks; various voice features were extracted and compared with classical stress measures via standard questionnaires. Additionally, a regression analysis was performed.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Certain speech characteristics correlated with stress levels in both genders; mainly, spectral (ie, formant) features, such as the mel-frequency cepstral coefficient, and prosodic characteristics, such as the fundamental frequency, appeared to be sensitive to stress. Overall, for both male and female participants, using vocal features from the positive tasks for regression yielded the most accurate prediction results of stress scores (mean absolute error 5.31).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Automatic speech analysis could help with early detection of subtle signs of stress in vulnerable populations over the phone. By combining the use of this technology with timely intervention strategies, it could contribute to the prevention of burnout and the development of comorbidities, such as depression or anxiety.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>stress detection</kwd>
        <kwd>speech</kwd>
        <kwd>voice analysis</kwd>
        <kwd>COVID-19</kwd>
        <kwd>phone monitoring</kwd>
        <kwd>computer linguistics</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>In December 2019 in the Chinese city of Wuhan, a new coronavirus pneumonia, COVID-19, emerged. The pathogen involved is SARS-CoV-2. Here, we will refer to the pathology as COVID-19. COVID-19 has spread very rapidly in China but also in many other countries [<xref ref-type="bibr" rid="ref1">1</xref>]. On March 11, 2020, the World Health Organization declared that the COVID-19 outbreak had become a pandemic [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
      <p>According to previous studies on SARS or Ebola epidemics, the onset of a sudden and immediately fatal disease could put extraordinary pressure on health care professionals [<xref ref-type="bibr" rid="ref3">3</xref>]. Increased workloads, physical exhaustion, inadequate personal equipment, nosocomial transmission, and the need to make ethically difficult decisions about rationing care can have dramatic effects on their physical and mental well-being. Their resilience may be further compromised by isolation and loss of social support, risk or loss of friends and relatives, and radical, often worrying changes in working methods. Health care workers are, therefore, particularly vulnerable to mental health problems, including fear, anxiety, depression, and insomnia [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Initial results estimate that 23% and 22% of health care workers experienced depression and anxiety, respectively, during the COVID-19 pandemic [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
      <p>Paradoxically, health care workers do not tend to seek professional help, and stress-related symptoms are often not immediately reported: “burnout, stress, and anxiety will have to wait.” Most of the time there will not even be a demand for care. Early implicit stress detection is of great importance in this population and would allow for timely intervention strategies in order to prevent escalation and complete occupational burnout.</p>
      <p>To measure stress in clinical practice, various scales and questionnaires are available, such as the Perceived Stress Scale (PSS) [<xref ref-type="bibr" rid="ref7">7</xref>], the Stressful Life Event Questionnaire [<xref ref-type="bibr" rid="ref8">8</xref>], the Stress Overload Scale [<xref ref-type="bibr" rid="ref9">9</xref>], and the Trier Inventory for Chronic Stress [<xref ref-type="bibr" rid="ref10">10</xref>]. However, the present health crisis pushed research teams to investigate the use of new technological tools in this specific population. One possible avenue is the use of automatic speech analysis allowing extraction of voice features during standard consultation or over a simple phone call.</p>
      <p>Psychological stress induces multiple effects on the body, including increased muscle tension, increased breathing rate, and changes in salivation rate, which may, in turn, affect vocal production [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Under psychological stress, voice pitch (ie, the acoustic correlate of fundamental frequency [F0]) usually increases, as it is inversely related to the rate of vocal fold vibration, which stretches under stress and becomes tenser together with an increase in subglottal pressure and vocal intensity [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Indeed, an increase in voice pitch is the most commonly reported finding in studies examining speech under stress. However, stress can also affect other voice parameters, such as an increase in speech prosody [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. In depression, the analysis of speech characteristics has recently attracted considerable research attention [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref17">17</xref>]. Studies revealed that patients show flattened affect, reduced speech variability, monotonicity in pitch and loudness, increased pause duration, and reduced speech rate [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. A recent study investigated the use of speech parameters extracted from audio recordings to differentiate patients suffering from posttraumatic stress disorder from healthy controls [<xref ref-type="bibr" rid="ref21">21</xref>].</p>
      <p>Thus, the detection of subtle events in the voice may offer a window into assessing the impact of stress in situations where circumstances make it difficult to monitor stress directly but need to be addressed urgently [<xref ref-type="bibr" rid="ref22">22</xref>].</p>
      <p>In this work, we aim to investigate the use of a rapid and remote measure of stress levels in health professionals working during the COVID-19 outbreak, utilizing the automatic analysis of their speech behavior during a short phone call conversation.</p>
      <p>Firstly, speech samples of health care professionals were collected over the phone during the COVID-19 pandemic, and various voice features were extracted and compared with classical stress measures. Secondly, based on the extracted features, scores from the completed stress scale that were obtained by participants were predicted. The purpose of this pilot study was to assess whether this technological method could be of interest to support early screening of subtle signs of stress.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Participants</title>
        <p>Health care professionals were recruited through outreach telephone calls. They worked during the COVID-19 outbreak in the local university hospital center of Nice, France, in either private practices or as independent workers in the Provence-Alpes-Côte d’Azur region. They could occupy any function in these structures. The only criterion for noninclusion was the subjects’ refusal to participate in the study. Inclusion of participants was carried out from May 5 to June 7, 2020.</p>
        <p>The study was approved by the Ethical Board for noninterventional studies of the University Côte d’Azur, France (approval 2020-58). Participants were given all the information about the study prior to the call so they could give informed consent. For those interested, the option for a follow-up call with a clinician was provided.</p>
      </sec>
      <sec>
        <title>Procedure</title>
        <p>The telephone calls were made by psychiatrists (n=3) or psychologists (n=1) belonging to the Cognition Behavior Technology research team and the memory clinic of the University Côte d’Azur. Calls lasted about 15 minutes and were composed of the following:</p>
        <list list-type="order">
          <list-item>
            <p>An informative part explaining the reasons for the call and its structure and how the study is conducted. The participant’s consent was requested to continue and to proceed with a recording of his or her voice.</p>
          </list-item>
          <list-item>
            <p>The Motivation Stress Affect (MSA) questionnaire. The MSA questionnaire is a self-administered questionnaire composed of 11 questions that must be answered by “yes” or “no.” The first five questions assess motivation [<xref ref-type="bibr" rid="ref23">23</xref>], the next two questions assess depression, and the last four questions assess stress [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
          </list-item>
          <list-item>
            <p>Three open standardized questions: neutral, positive, and negative storytelling. In order to capture natural speech, but within a limited time frame, the participant was asked to talk about something emotionally neutral (ie, describe where he or she is), to talk about a negative event in his or her life, and, finally, to talk about a positive event in his or her life. Each answer should have lasted about 1 minute and was recorded in a secure and encrypted way. It was not specified whether the event had to be experienced during COVID-19; thus, it was open to the participant to recall whatever event first came to mind. These free-speech tasks were used in previous studies [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref25">25</xref>] and allowed for a greater range of induced emotional effects, potentially sensitive to signs of stress and depression. The comparison of speech features between neutral and emotionally loaded questions may give insight into the affective state of participants.</p>
          </list-item>
          <list-item>
            <p>The PSS. This scale [<xref ref-type="bibr" rid="ref7">7</xref>] is a hetero-questionnaire composed of 10 questions to be answered by “never,” “almost never,” “sometimes,” “quite often,” or “often.”</p>
          </list-item>
          <list-item>
            <p>An open listening part aimed at exploring certain points in greater depth in order to refine the clinical needs.</p>
          </list-item>
          <list-item>
            <p>Decision and advice. Following the above steps, the psychiatrist or psychologist offered or did not offer psychological follow-up depending on whether he or she considered that the patient was at risk of developing or had a mood or anxiety disorder. He or she may also have offered advice on intervention strategies (eg, relaxation, yoga, physical activity, and national call platform for psychological support for caregivers).</p>
          </list-item>
        </list>
      </sec>
      <sec>
        <title>Materials</title>
        <p>To perform the phone calls for this study, the phone version of the DELTA application [<xref ref-type="bibr" rid="ref26">26</xref>] was used. The DELTA solution allows for the use of a dedicated interface in the form of an iOS app to make phone calls and locally record these calls on the internal memory of an iPad. The phone calls were made directly with the iPad and through its internal microphone.</p>
        <p>These recordings were then automatically transmitted—the iPad had to be connected to the internet—to the DELTA application programming interface (API) for analysis of acoustic and semantic parameters. Once the analysis was complete, the results were displayed directly on the DELTA interface. The recordings were made locally on the phone, the connection between the interface and the DELTA API was secure and encrypted, and the recordings were destroyed from the DELTA servers once the analysis was complete and the results sent to the experimenter.</p>
      </sec>
      <sec>
        <title>Analysis</title>
        <p>Audio features were extracted directly and automatically from the recorded audio signals of the three open standardized questions (see item #3 in the Procedure section). Characteristics were extracted from four main areas:</p>
        <list list-type="order">
          <list-item>
            <p>Prosodic characteristics, on long-term variations in perceived stress and speech rhythm. Prosodic features also measure alterations in personal speech style (eg, perceived pitch and speech intonation).</p>
          </list-item>
          <list-item>
            <p>Formant characteristics represent the dominant components of the speech spectrum and convey information about the acoustic resonance of the vocal tract and its use. These markers are often indicative of articulatory coordination problems in motor speech control disorders.</p>
          </list-item>
          <list-item>
            <p>Source characteristics that are related to the source of voice production, the airflow through the glottal speech production system. These features make operational irregularities in the movement of the vocal fold (eg, voice quality measurements).</p>
          </list-item>
          <list-item>
            <p>Temporal characteristics include measures of the proportion of speech (eg, duration of pauses and duration of speech segments), speech segment connectivity, and overall speech rate.</p>
          </list-item>
        </list>
        <p>Features were extracted using Python 3.7 (Python Software Foundation) [<xref ref-type="bibr" rid="ref27">27</xref>] and free and publicly available packages. For the temporal features, the My-Voice Analysis [<xref ref-type="bibr" rid="ref28">28</xref>] package was used. This package was built off of the speech analysis research tool praat [<xref ref-type="bibr" rid="ref29">29</xref>]. Temporal features were actualized as the speech rate, syllable count, rate of articulation, speaking duration, total duration, and ratio of speaking to nonspeaking. This package was also used to extract prosodic features, namely the F0 values: mean, standard deviation, minimum, maximum, and upper and lower quartiles. The F0 value is the representation of what is known as the pitch.</p>
        <p>Formant features were calculated using the Python Speech Features library [<xref ref-type="bibr" rid="ref30">30</xref>]. To characterize this aspect of speech, the original sound recording was refit according to a series of transformations commonly used for speech recognition that yield a better representation of the sound called the mel-frequency cepstrum (MFC). From this new representation of the sound form, the first 14 coefficients of the MFC were extracted. The MFC values were extracted given that they describe the spectral shape of the audio file, generally with diminishing returns in terms of how informative they are, which is why we only considered the first 14 coefficients. If we were to select a greater number of MFC values, it would result in a potentially needlessly more complex machine learning model using less informative features.</p>
        <p>From each of these waves, the mean, variance, skewness, and kurtosis were calculated for the energy (static coefficient), velocity (first differential), and acceleration (second differential).</p>
        <p>The Librosa package [<xref ref-type="bibr" rid="ref31">31</xref>] was used to calculate the mean, maximum, minimum, and standard deviation of the root mean square value, centroid, bandwidth, flatness, zero-crossing rate, loudness, and flux of the spectrogram, or the visualization of the recording.</p>
        <p>The source characteristics were extracted using the Signal_Analysis package, version 0.1.26, to extract the micromovements of the sound wave: harmonics-to-noise ratio (HNR), jitter, shimmer, and glottal pulses. Jitter and shimmer are two features of vocal signals that describe the frequency variation from cycle to cycle of the sound wave and the waveform amplitude, respectively [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. While jitter rises with the growing lack of control of vocal cord vibration, higher shimmer is coupled with increased breathiness. HNR is the ratio between periodic components and nonperiodic components that constitute a voiced speech segment [<xref ref-type="bibr" rid="ref34">34</xref>]. These components correspond to the vibration from vocal cords and glottal noise, respectively.</p>
        <p>Speech features vary naturally between males and females due to differences in the length of the vocal tract. These differences have been leveraged in gender classification through speech analysis based on pitch and formant frequencies [<xref ref-type="bibr" rid="ref35">35</xref>], HNR [<xref ref-type="bibr" rid="ref36">36</xref>], linear predictive components, and mel-frequency cepstral coefficients (MFCCs) [<xref ref-type="bibr" rid="ref37">37</xref>]. Previous work found differences in speech depending on gender in the effects of depression and the effectiveness of classifiers for its detection [<xref ref-type="bibr" rid="ref38">38</xref>]. This is why this study considers males and females separately.</p>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>The data collected were described using mean and standard deviation for quantitative variables, and frequency and percentage for qualitative variables. Demographic characteristics, such as age and gender, were compared between different groups of caregivers using a chi-square test for qualitative variables (eg, gender) and an analysis of variance performed for quantitative variables (eg, age). Similarly, the data measured for voice and scores were compared between different groups of caregivers. The normality of the collected data was tested using a Shapiro test. In order to test the relationship between the different voice measures and the measured scores, Spearman correlations were used. In addition, to test the link between the voice measures and the therapist’s decision, Student <italic>t</italic> tests or Wilcoxon-Mann-Whitney tests were performed. A <italic>P</italic> value of less than .05 was considered significant. The analyses were performed using the free statistical software RStudio 4.0.0 [<xref ref-type="bibr" rid="ref39">39</xref>]. Further, regression analyses were performed with the extracted vocal features to determine the error rate for predicting the participants’ stress scores.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Participants</title>
        <p>In total, 89 French-speaking health professionals, aged between 20 and 74 years, accepted the outreach phone calls and their speech samples were recorded and analyzed. Their demographic characteristics are presented in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <p>The mean age of the participants was 40.53 years (SD 14.19). The mean stress score on the PSS was 22.43 (SD 7.16) and on the MSA questionnaire was 2.92 (SD 2.09). The majority of the participants scored below 26 on the PSS but above 0 on the MSA questionnaire. Results on the PSS and on the MSA stress scale were proportional. We found that 27% (24/89) of the recorded health professionals experienced intense stress, and 28% (25/89) experienced occasional stress. Only 16% (14/89) of the participants requested a follow-up. The stress level was gender dependent, with females reporting higher stress levels. For males, stress levels tended to drop with age. <xref rid="figure1" ref-type="fig">Figure 1</xref> shows a distribution of the total stress scores across genders. The total stress scores in the female group are more dispersed than in the male group and are generally higher. A total of 14 out of 88 (16%) participants (11/57, 19% of all females; 3/31, 10% of all males) asked for a follow-up call. Their mean PSS score (mean 31.78, SD 7.40) and mean MSA scale score (mean 5.57, SD 1.34) were significantly higher than for those who did not ask for a follow-up, whose mean PSS score was 20.60 (SD 5.63) and mean MSA scale score was 2.38 (SD 1.8).</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Descriptive statistics for participant characteristics (N=89).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="0"/>
            <col width="150"/>
            <col width="0"/>
            <col width="120"/>
            <col width="0"/>
            <col width="150"/>
            <col width="0"/>
            <col width="0"/>
            <col width="80"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Characteristic</td>
                <td colspan="7">Participants, n (%)</td>
                <td><italic>P</italic> value<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">Total (N=89)</td>
                <td colspan="2">Male (n=31)</td>
                <td colspan="2">Female (n=58)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">Gender</td>
                <td colspan="2">89 (100)</td>
                <td colspan="2">31 (35)</td>
                <td colspan="2">58 (65)</td>
                <td colspan="2">N/A<sup>b</sup></td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Education (years) (n=81)</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.03</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#60;12</td>
                <td colspan="2">19 (23)</td>
                <td colspan="2">1/28 (4)</td>
                <td colspan="2">18/53 (34)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥12</td>
                <td colspan="2">62 (77)</td>
                <td colspan="2">27/28 (96)</td>
                <td colspan="2">35/53 (66)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Timing of call</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.03</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>During lockdown</td>
                <td colspan="2">34 (38)</td>
                <td colspan="2">7 (23)</td>
                <td colspan="2">27 (47)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>After lockdown</td>
                <td colspan="2">55 (62)</td>
                <td colspan="2">24 (77)</td>
                <td colspan="2">31 (53)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Perceived Stress Scale score</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.47</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Knows how to manage stress (&#60;21)</td>
                <td colspan="2">40 (45)</td>
                <td colspan="2">16 (52)</td>
                <td colspan="2">24 (41)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Generally knows how to cope with stress (21-26)</td>
                <td colspan="2">25 (28)</td>
                <td colspan="2">9 (29)</td>
                <td colspan="2">16 (28)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Life is a constant threat (&#62;26)</td>
                <td colspan="2">24 (27)</td>
                <td colspan="2">6 (19)</td>
                <td colspan="2">18 (31)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Motivation Stress Affect (MSA) scale score</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.99</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>0</td>
                <td colspan="2">23 (26)</td>
                <td colspan="2">8 (26)</td>
                <td colspan="2">15 (26)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;0</td>
                <td colspan="2">66 (74)</td>
                <td colspan="2">23 (74)</td>
                <td colspan="2">43 (74)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>MSA motivation scale score</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.47</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>0</td>
                <td colspan="2">30 (34)</td>
                <td colspan="2">12 (39)</td>
                <td colspan="2">18 (31)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;0</td>
                <td colspan="2">59 (66)</td>
                <td colspan="2">19 (61)</td>
                <td colspan="2">40 (69)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>MSA depression scale score</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.32</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>0</td>
                <td colspan="2">57 (64)</td>
                <td colspan="2">22 (71)</td>
                <td colspan="2">35 (60)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;0</td>
                <td colspan="2">32 (36)</td>
                <td colspan="2">9 (29)</td>
                <td colspan="2">23 (40)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Follow-up request (n=88)</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">.36</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>No</td>
                <td colspan="2">74 (84)</td>
                <td colspan="2">28 (90)</td>
                <td colspan="2">46/57 (81)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yes</td>
                <td colspan="2">14 (16)</td>
                <td colspan="2">3 (10)</td>
                <td colspan="2">11/57 (19)</td>
                <td colspan="3">
                  <break/>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Chi-square test or Fisher exact test.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>N/A: not applicable; the <italic>P</italic> value was not calculated for gender.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Stress score distribution across genders.</p>
          </caption>
          <graphic xlink:href="jmir_v23i4e24191_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Correlations</title>
        <p>First, vocal and nonvocal features were analyzed in relation to the stress level. The data set was quite small and, therefore, rather than training a classifier, we performed correlation analysis between the features computed for each speech task and the reported stress level. Further, only extracted speech features were considered; a priori, nonmeaningful features, like ID, were removed.</p>
        <p>We performed a selection of the top k features based on their descriptive power for the target variable <italic>total stress score</italic>. Vocal features might be gender dependent. Therefore, we performed a selection of top features for male and female data sets separately. We used Spearman correlation, since we had both ordinal and continuous features: the target <italic>total stress score</italic> is ordinal. Since Spearman correlation uses only the ranks of the variables and not their raw values, we could omit the normalization step. We considered absolute values of the correlation coefficient for feature scoring. Results are presented in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
        <p>The main speech parameters correlating with stress levels in both genders were spectral (ie, formant) features, namely the MFCCs. These features characterize the spectrum of speech, which is the frequency distribution of the speech signal at a specific time. MFCCs were derived by computing a spectrum of the log-magnitude mel-spectrum of the audio segment. The lower coefficients represent the vocal tract filter and the higher coefficients represent periodic vocal fold sources [<xref ref-type="bibr" rid="ref18">18</xref>]. Moreover, in males’ prosodic characteristics, such as the F0, and in females with the positive storytelling, pitch ranges were associated with stress levels.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Correlation between stress levels and speech features.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Top 10 features for each data set</td>
                <td>Task</td>
                <td>Spearman correlation</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Female data set</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC<sup>a</sup>3 acceleration skewness</td>
                <td>Positive story</td>
                <td>0.49</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC2 mean</td>
                <td>Neutral story</td>
                <td>0.44</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Pitch range</td>
                <td>Positive story</td>
                <td>0.44</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC3 acceleration skewness</td>
                <td>Negative story</td>
                <td>0.43</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC2 mean</td>
                <td>Positive story</td>
                <td>0.44</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC5 acceleration kurtosis</td>
                <td>Negative story</td>
                <td>–0.42</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC2 mean</td>
                <td>Negative story</td>
                <td>0.43</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC5 velocity kurtosis</td>
                <td>Negative story</td>
                <td>–0.40</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC3 acceleration skewness</td>
                <td>Neutral story</td>
                <td>0.39</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC5 velocity kurtosis</td>
                <td>Negative story</td>
                <td>0.39</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Male data set</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Upper quartile F0<sup>b</sup></td>
                <td>Neutral story</td>
                <td>–0.54</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Pronunciation posteriori probability score percentage</td>
                <td>Positive story</td>
                <td>–0.50</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Energy acceleration mean</td>
                <td>Positive story</td>
                <td>0.52</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mean F0</td>
                <td>Neutral story</td>
                <td>–0.51</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC9 kurtosis</td>
                <td>Positive story</td>
                <td>0.41</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC9 variance</td>
                <td>Positive story</td>
                <td>–0.44</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Upper quartile F0</td>
                <td>Negative story</td>
                <td>–0.47</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC4 acceleration mean</td>
                <td>Positive story</td>
                <td>–0.40</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Upper quartile F0</td>
                <td>Positive story</td>
                <td>–0.47</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MFCC12 acceleration skewness</td>
                <td>Neutral story</td>
                <td>–0.42</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>MFCC: mel-frequency cepstral coefficient; the numbers following MFCC are part of the feature names presenting their location on a spectrum.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>F0: fundamental frequency.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>For female participants, correlation analyses between negative, positive, and neutral features and the target feature <italic>total stress score</italic> were performed. Among the top 5 features, we have MFCC acceleration skewness, which correlates with the stress level by 0.45 and 0.37 in the positive and neutral tasks, respectively. The other features among top 5 features are task specific. Thus, for each task there is a different set of features associated with stress level.</p>
        <p>For male participants, the selection was performed analogously. The top features are task specific as well, and they differ from the features for the female data set. In this sample, we obtained more negatively correlating features than for the female data set; this meant that features, for instance, related to F0 of low value (mean F0 in the neutral story with –0.51, upper quartile F0 in the negative and positive story with –0.47, and upper quartile F0 in the neutral story with –0.54) are associated with high stress scores. In general, low values represent a smaller pitch range.</p>
      </sec>
      <sec>
        <title>Regression</title>
        <p>Stress scores were regressed against measurements for positive, neutral, and negative tasks. Similarly, the regression for tasks of different sentiments was performed for groups of female and male participants to allow for possible impacts of gender on stress levels. For the regressors, we used linear, support vector machine (SVM), and random forest regressors to predict the stress scores.</p>
        <p>The first regression approximated the stress score by estimating coefficients for each feature in the training data, where greater coefficients indicate a greater influence over the predicted value. Linear regression models are fast, highly interpretable, and commonly used for prediction of stress scores from audio features and speech analysis, according to previous studies [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]. The random forest regressor created a number of decision trees that were constructed based on random sampling from the training data; each tree then attempted to determine the best way to predict the scores given the data it received. Each decision tree outputted a predicted value and the mode value was selected. Decision tree methods have shown high accuracy with good interpretability in similar studies where vocal and linguistic features were employed for detection of emotions, social signals, and mental health problems [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref45">45</xref>]. The SVM regressor took each set of features and projected them as a vector onto a space and attempted to find the optimal way to separate the data. The stress score was then based on the distance from that separator. Stress modeling with inputs from physiological sensors or audio sources using SVM has also been previously reported to give high model performance [<xref ref-type="bibr" rid="ref46">46</xref>-<xref ref-type="bibr" rid="ref48">48</xref>]. In recent studies, both SVM and random forest provided notably high prediction and classification strength for stress detection using various speech features [<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>].</p>
        <p>The caret package from R, version 3.4.2 (The R Foundation), was used for data training and validation. A 10-fold cross-validation was performed and performance was evaluated using the mean absolute error (MAE): the average of the absolute difference between the predicted and actual values from our models for all participants. The score ranges from 0 to infinity, where a score closer to 0 indicates a better-fitting model.</p>
        <p>The prediction of total stress scores using all or a subset of tasks among male or female subjects was carried out using various baseline regression models, whose performances were evaluated by the plots in <xref rid="figure2" ref-type="fig">Figure 2</xref>, where the MAE values are presented on the y-axis. Overall, the prediction strength in males was better than in females for all sentiments, as shown by a trend of lower errors (lowest MAE for males was 3.84; lowest MAE for females was 5.56). It is notable that stress score regression models based on negative tasks in males and neutral tasks in females performed relatively poorly compared to other tasks. For both male and female participants, using positive tasks for regression yielded equivalent or better results than using all tasks, suggesting that a subset of tasks could be employed for accurate and less time-consuming prediction of stress scores. An overview of the lowest scores for each testing scenario is presented in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
        <p>All regression models outperformed their respective baseline MAE values (4.46 and 6.35 in males and females, respectively). Linear models and the SVM regressor were the most precise for the prediction of total stress scores in general.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Performances of different computerized regression models in predicting stress levels based on vocal features. Boosted: boosted linear model; ElasticNet: mix of L1 and L2 regularized linear regression; MAE: mean absolute error; Poly: support vector machine with polynomial basis function kernel; Quantile: quantile regression forest; Radial: support vector machine with radial basis function kernel; SVM: support vector machine.</p>
          </caption>
          <graphic xlink:href="jmir_v23i4e24191_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>The lowest scores for each testing scenario.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="106"/>
            <col width="127"/>
            <col width="194"/>
            <col width="108"/>
            <col width="184"/>
            <col width="0"/>
            <col width="106"/>
            <col width="175"/>
            <col width="0"/>
            <thead>
              <tr valign="top">
                <td>Participant group</td>
                <td colspan="2">Positive tasks</td>
                <td colspan="3">Neutral tasks</td>
                <td colspan="2">Negative tasks</td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>MAE<sup>a</sup> (SD)</td>
                <td>Model</td>
                <td>MAE (SD)</td>
                <td>Model</td>
                <td colspan="2">MAE (SD)</td>
                <td colspan="2">Model</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>All</td>
                <td>5.31 (0.25)</td>
                <td>ElasticNet<sup>b</sup></td>
                <td>5.25 (0.28)</td>
                <td>QuantileRF<sup>c</sup></td>
                <td colspan="2">5.34 (0.35)</td>
                <td colspan="2">PolySVM<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>Male</td>
                <td>3.84 (0.43)</td>
                <td>QuantileRF</td>
                <td>4.40 (0.37)</td>
                <td>BoostedLM<sup>e</sup></td>
                <td colspan="2">4.37 (0.43)</td>
                <td colspan="2">PolySVM</td>
              </tr>
              <tr valign="top">
                <td>Female</td>
                <td>5.56 (0.41)</td>
                <td>ElasticNet</td>
                <td>5.84 (0.42)</td>
                <td>RadialSVM<sup>f</sup></td>
                <td colspan="2">5.68 (0.45)</td>
                <td colspan="2">PolySVM</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>MAE: mean absolute error.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>ElasticNet: mix of L1 and L2 regularized linear regression.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>QuantileRF: quantile regression forest.</p>
            </fn>
            <fn id="table3fn4">
              <p><sup>d</sup>PolySVM: support vector machine with polynomial basis function kernel.</p>
            </fn>
            <fn id="table3fn5">
              <p><sup>e</sup>BoostedLM: boosted linear model.</p>
            </fn>
            <fn id="table3fn6">
              <p><sup>f</sup>RadialSVM: support vector machine with radial basis function kernel.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The purpose of this study was to investigate the potential of using automatic speech analysis for the detection of stress in health care professionals during the current COVID-19 pandemic. This would potentially lead to earlier and timely prevention among this high-risk population. Firstly, speech samples were collected over the phone, and various voice features were extracted and compared with classical stress measures. Secondly, based on the extracted features, scores obtained by participants on the completed stress scale were predicted.</p>
        <p>The main outcome of this study was the demonstration of this approach’s feasibility under the given context, as all participants were cooperative and appreciated the initiative of rapidly applying this existing technology to this specific use case. Moreover, from phone call recordings, a number of vocal correlates of stress have been identified, namely in the area of spectral features (ie, MFCC) as well as prosodic features such as F0, which seem to be the most commonly reported features in well-controlled trials [<xref ref-type="bibr" rid="ref11">11</xref>]. Stress scores could be predicted based on speech features with relatively small errors.</p>
        <p>Spectral features characterize the speech spectrum; the frequency distribution of the speech signal at a specific time indicates information in some high-dimensional representation [<xref ref-type="bibr" rid="ref18">18</xref>]. The features capture information regarding changes in muscle tension and control and have consistently been observed to change with a speaker’s mental state. A few depression studies reported a relative shift in energy with increasing depression severity [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>].</p>
        <p>Another result we obtained was that most identified vocal features were task dependent as well as gender dependent. Interestingly, in the female group, MFCC features seemed to be associated with stress levels during all tasks, meaning that it did not matter what participants were talking about; as long as sufficient speech was captured, meaningful information could be extracted and subtle signs of stress level could be detected. On the other hand, in the male data set, the upper quartile F0 appeared as a task-independent feature sensitive to stress levels. Overall, in the male data set, we observed more features with a negative correlation than we did for the female data set.</p>
        <p>Voice production can be divided into three processes: breathing, phonation, and resonance stress [<xref ref-type="bibr" rid="ref54">54</xref>]. For the second process, phonation, the vocal folds must close and open again to create vibration. The frequency rate of these pulses determines the F0 of the vocal source contributing to the perceived pitch of the sound.</p>
        <p>Previous research showed that increased muscle tension tends to be caused by stress [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>], resulting in a tensing of the vocal folds, which, in turn, most likely causes a raising of F0. A recent review on voice analysis in stress [<xref ref-type="bibr" rid="ref22">22</xref>] stated that the parameter F0 has been considered as a “universal stress indicator,” whereas increased levels of F0 might be linked with acute bottom-up processes of sympathetic arousal. Similar studies of analysis of phone call recordings during situational stress situations revealed an increase in F0 and intensity with presumed levels of stress [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref58">58</xref>]. Our findings seem consistent with the majority of acoustic studies, pointing to F0 as one important marker of stress levels.</p>
        <p>However, most correlations we found were with resonance (ie, formant) parameters, which are involved in the quality of sound shaping and vowel and consonant pronunciation and are produced by the muscle activity involved in the shaping of the resonant cavities of the vocal tract system [<xref ref-type="bibr" rid="ref59">59</xref>]. These parameters are less documented in regard to stress. The MFCC, in particular, can be indicative of breathiness in the voice [<xref ref-type="bibr" rid="ref60">60</xref>]. Interestingly, one study found a circadian pattern in MFCCs due to sleep deprivation. For this, voice perturbations were compared with classical sleep measures [<xref ref-type="bibr" rid="ref61">61</xref>] and correlations were found between fatigue scores and MFCCs. This might eventually explain our results, as most participants also reported signs of fatigue during the interviews.</p>
        <p>Another study examined speech in students under exam stress and a few days later; in this case, heart rate was measured to control for the actual stress levels. Under stress, students’ heart rates increased, F0 and F0 SD increased, first formant (F1) and second formant (F2) frequencies increased, and MFCCs decreased in relation to baseline levels [<xref ref-type="bibr" rid="ref62">62</xref>].</p>
        <p>It can be hypothesized that given our recorded population who reported relatively mild to moderate levels of stress, rather subtle changes in voice parameters were found and, therefore, weaker correlations were observed. However, it is important to underline that changes in features that we found to be sensitive to stress levels were gender dependent but not necessarily task dependent. They were most likely too small to be detectable by the human ear but were captured by the automatic speech analysis. We assume that by applying this technology to regular check-up calls with people experiencing high stress levels, such as health care professionals, very early signs of stress can be detected in their voices, allowing for timely preventive strategies.</p>
        <p>Regression models using vocal features performed relatively well in predicting stress scores, namely in the positive story task for both genders (MAE of 5.31). It shows that the technology could capture indicative patterns from even a short amount of time, possibly even from one task, to recognize tendencies of stress levels in a fragile but healthy population; this represents a promising rapid tool for prediction of stress scores.</p>
      </sec>
      <sec>
        <title>Strengths of This Study</title>
        <p>This study is a first step into the early identification of stress in an at-risk population, such as caregivers, who do not directly express their psychological suffering. We can imagine extending this technique to other fragile populations for early screening of stress, such as teenagers who are victims of school harassment or women who are victims of abuse, where timely management could potentially prevent the development of comorbidities, such as depression and anxiety. Moreover, patient populations who have difficulty expressing their problems, such as those with autism spectrum disorder or dementia, could benefit from this technology.</p>
        <p>Generally, remote psychological counseling is controversial. Nevertheless, it is becoming necessary due to current economic, social, and health constraints, but has been received by professionals and patients with mixed feelings. Indeed, the nonverbal part of communication is lost and the dynamics of interaction are not the same. However, contrary to these preconceived ideas, we have noticed during this work that it is easier for certain participants to open up and speak about personal issues during these interviews in a liberating manner, similar to a confessional. Not being in the physical presence of the listener may facilitate personal expression, with less fear of being judged. This aspect is very interesting during a screening because it considerably accelerates the process of detection and diagnosing of psychological symptoms.</p>
      </sec>
      <sec>
        <title>Weaknesses of the Study</title>
        <p>This project has been rapidly implemented, initially with an approach of qualitative and quantitative data analysis, that should contribute to the early and timely assistance of health professionals during the COVID-19 pandemic. The staff members available to participate in the study were limited. Patient selection was done on a voluntary basis. It is conceivable that the population studied were more concerned about their state of psychological suffering and, therefore, potentially had a selection bias.</p>
        <p>Although the voice recordings were made in the middle of the interview without this time being precisely stated, it is possible that some patients may have suspected this, which could have been anxiety provoking and skewed our results. Recording throughout the interview for parameters not affected by the tasks would provide more data and more robust results.</p>
        <p>Finally, the obtained correlations can be considered as rather moderate, which makes it difficult to draw any strong conclusions. A larger data set, ideally of a longitudinal nature, with more precise characterization of the speakers is needed in order to verify whether the correlating features represent real markers of stress.</p>
      </sec>
      <sec>
        <title>Future Perspective</title>
        <p>For future work, we propose to perform this analysis on a larger data set and to build a prediction model. In case of an insufficient number of observations per stress level, the number of stress levels can be reduced by binning. Binning can also be carried out on characteristic values.</p>
        <p>Further studies with acoustic measurements and stress questionnaires at regular time intervals would allow for the analysis of the kinetics of the markers and a better perception of their sensitivity and specificity. In addition, adding clinical measurements of psychiatric symptoms, such as the Diagnostic and Statistical Manual of Mental Disorders, Fifth Edition [<xref ref-type="bibr" rid="ref63">63</xref>], would make it possible to perceive whether one of the markers is predictive of an anxiety or depression disorder. The use of the tool could be combined with the delivery of preventive strategies, such as physical exercises, adaptation of diet, psychotherapy, meditation, or the use of symptomatic treatments, and it could be employed at the same time for the evaluation of the obtained effects. However, in order to produce a real-world application of this technology, larger validation studies have to be performed to demonstrate clinical meaningfulness by comparing its performance to standardized measurement tools.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">API</term>
          <def>
            <p>application programming interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">F0</term>
          <def>
            <p>fundamental frequency</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">F1</term>
          <def>
            <p>first formant</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">F2</term>
          <def>
            <p>second formant</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">GSF</term>
          <def>
            <p>Groupe Services France</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">HNR</term>
          <def>
            <p>harmonics-to-noise ratio</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">MAE</term>
          <def>
            <p>mean absolute error</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">MFC</term>
          <def>
            <p>mel-frequency cepstrum</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MFCC</term>
          <def>
            <p>mel-frequency cepstral coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">MSA</term>
          <def>
            <p>Motivation Stress Affect</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">PSS</term>
          <def>
            <p>Perceived Stress Scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was supported by a grant from the Fondation GSF (Groupe Services France) Jean Louis Noisiez and the Association Innovation Alzheimer. Thanks to all of the professional health care workers who agreed to participate in this study.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>AK, KR, JE, and PR designed and conducted the study. AD contributed as technical support to this study. NL, HL, and RF analyzed the data. KR, AK, NL, HL, and PR drafted the manuscript. All authors have read and agreed to the published version of the manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>NL is an employee and shareholder of ki elements UG.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Pneumonia of unknown cause – China</article-title>
          <source>World Health Organization</source>
          <year>2020</year>
          <month>01</month>
          <day>05</day>
          <access-date>2020-01-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/csr/don/05-january-2020-pneumonia-of-unkown-cause-china/en/">https://www.who.int/csr/don/05-january-2020-pneumonia-of-unkown-cause-china/en/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>WHO announces COVID-19 outbreak a pandemic</article-title>
          <source>World Health Organization</source>
          <year>2020</year>
          <month>03</month>
          <day>12</day>
          <access-date>2020-03-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.euro.who.int/en/health-topics/health-emergencies/coronavirus-covid-19/news/news/2020/3/who-announces-covid-19-outbreak-a-pandemic">https://www.euro.who.int/en/health-topics/health-emergencies/coronavirus-covid-19/news/news/2020/3/who-announces-covid-19-outbreak-a-pandemic</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Kakade</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fuller</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Depression after exposure to stressful events: Lessons learned from the severe acute respiratory syndrome epidemic</article-title>
          <source>Compr Psychiatry</source>
          <year>2012</year>
          <month>01</month>
          <volume>53</volume>
          <issue>1</issue>
          <fpage>15</fpage>
          <lpage>23</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21489421"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.comppsych.2011.02.003</pub-id>
          <pub-id pub-id-type="medline">21489421</pub-id>
          <pub-id pub-id-type="pii">S0010-440X(11)00021-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC3176950</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lung</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shu</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Mental symptoms in different health professionals during the SARS attack: A follow-up study</article-title>
          <source>Psychiatr Q</source>
          <year>2009</year>
          <month>06</month>
          <volume>80</volume>
          <issue>2</issue>
          <fpage>107</fpage>
          <lpage>116</lpage>
          <pub-id pub-id-type="doi">10.1007/s11126-009-9095-5</pub-id>
          <pub-id pub-id-type="medline">19247834</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Fuller</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Susser</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hoven</surname>
              <given-names>CW</given-names>
            </name>
          </person-group>
          <article-title>The psychological impact of the SARS epidemic on hospital employees in China: Exposure, risk perception, and altruistic acceptance of risk</article-title>
          <source>Can J Psychiatry</source>
          <year>2009</year>
          <month>05</month>
          <volume>54</volume>
          <issue>5</issue>
          <fpage>302</fpage>
          <lpage>311</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/19497162"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/070674370905400504</pub-id>
          <pub-id pub-id-type="medline">19497162</pub-id>
          <pub-id pub-id-type="pmcid">PMC3780353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pappa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ntella</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Giannakas</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Giannakoulis</surname>
              <given-names>VG</given-names>
            </name>
            <name name-style="western">
              <surname>Papoutsi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Katsaounou</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Prevalence of depression, anxiety, and insomnia among healthcare workers during the COVID-19 pandemic: A systematic review and meta-analysis</article-title>
          <source>Brain Behav Immun</source>
          <year>2020</year>
          <month>08</month>
          <volume>88</volume>
          <fpage>901</fpage>
          <lpage>907</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/32437915"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.bbi.2020.05.026</pub-id>
          <pub-id pub-id-type="medline">32437915</pub-id>
          <pub-id pub-id-type="pii">S0889-1591(20)30845-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC7206431</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kamarck</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mermelstein</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A global measure of perceived stress</article-title>
          <source>J Health Soc Behav</source>
          <year>1983</year>
          <month>12</month>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>385</fpage>
          <lpage>396</lpage>
          <pub-id pub-id-type="medline">6668417</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roohafza</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ramezani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sadeghi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Shahnam</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zolfagari</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sarafzadegan</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Development and validation of the stressful life event questionnaire</article-title>
          <source>Int J Public Health</source>
          <year>2011</year>
          <month>08</month>
          <volume>56</volume>
          <issue>4</issue>
          <fpage>441</fpage>
          <lpage>448</lpage>
          <pub-id pub-id-type="doi">10.1007/s00038-011-0232-1</pub-id>
          <pub-id pub-id-type="medline">21327856</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amirkhan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Stress overload: A new approach to the assessment of stress</article-title>
          <source>Am J Community Psychol</source>
          <year>2012</year>
          <month>03</month>
          <volume>49</volume>
          <issue>1-2</issue>
          <fpage>55</fpage>
          <lpage>71</lpage>
          <pub-id pub-id-type="doi">10.1007/s10464-011-9438-x</pub-id>
          <pub-id pub-id-type="medline">21538152</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Petrowski</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Paul</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Albani</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Brähler</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Factor structure and psychometric properties of the trier inventory for chronic stress (TICS) in a representative German sample</article-title>
          <source>BMC Med Res Methodol</source>
          <year>2012</year>
          <month>04</month>
          <day>01</day>
          <volume>12</volume>
          <fpage>42</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-12-42"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/1471-2288-12-42</pub-id>
          <pub-id pub-id-type="medline">22463771</pub-id>
          <pub-id pub-id-type="pii">1471-2288-12-42</pub-id>
          <pub-id pub-id-type="pmcid">PMC3350460</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giddens</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Barron</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Byrd-Craven</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>KF</given-names>
            </name>
            <name name-style="western">
              <surname>Winter</surname>
              <given-names>AS</given-names>
            </name>
          </person-group>
          <article-title>Vocal indices of stress: A review</article-title>
          <source>J Voice</source>
          <year>2013</year>
          <month>05</month>
          <volume>27</volume>
          <issue>3</issue>
          <fpage>390.e21</fpage>
          <lpage>399</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2012.12.010</pub-id>
          <pub-id pub-id-type="medline">23462686</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(12)00235-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pisanski</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nowak</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sorokowski</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Individual differences in cortisol stress response predict increases in voice pitch during exam stress</article-title>
          <source>Physiol Behav</source>
          <year>2016</year>
          <month>09</month>
          <day>01</day>
          <volume>163</volume>
          <fpage>234</fpage>
          <lpage>238</lpage>
          <pub-id pub-id-type="doi">10.1016/j.physbeh.2016.05.018</pub-id>
          <pub-id pub-id-type="medline">27188981</pub-id>
          <pub-id pub-id-type="pii">S0031-9384(16)30250-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kirchhübel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Stedmon</surname>
              <given-names>AW</given-names>
            </name>
          </person-group>
          <article-title>Acoustic correlates of speech when under stress: Research, methods and future directions</article-title>
          <source>Int J Speech Lang Law</source>
          <year>2011</year>
          <month>09</month>
          <day>13</day>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>75</fpage>
          <lpage>98</lpage>
          <pub-id pub-id-type="doi">10.1558/ijsll.v18i1.75</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hollien</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Vocal fold dynamics for frequency change</article-title>
          <source>J Voice</source>
          <year>2014</year>
          <month>07</month>
          <volume>28</volume>
          <issue>4</issue>
          <fpage>395</fpage>
          <lpage>405</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2013.12.005</pub-id>
          <pub-id pub-id-type="medline">24726331</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(13)00252-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sobin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Sackeim</surname>
              <given-names>HA</given-names>
            </name>
          </person-group>
          <article-title>Psychomotor symptoms of depression</article-title>
          <source>Am J Psychiatry</source>
          <year>1997</year>
          <month>01</month>
          <volume>154</volume>
          <issue>1</issue>
          <fpage>4</fpage>
          <lpage>17</lpage>
          <pub-id pub-id-type="doi">10.1176/ajp.154.1.4</pub-id>
          <pub-id pub-id-type="medline">8988952</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schrijvers</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hulstijn</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Sabbe</surname>
              <given-names>BG</given-names>
            </name>
          </person-group>
          <article-title>Psychomotor symptoms in depression: A diagnostic, pathophysiological and therapeutic tool</article-title>
          <source>J Affect Disord</source>
          <year>2008</year>
          <month>07</month>
          <volume>109</volume>
          <issue>1-2</issue>
          <fpage>1</fpage>
          <lpage>20</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2007.10.019</pub-id>
          <pub-id pub-id-type="medline">18082896</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(07)00377-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bylsma</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>BH</given-names>
            </name>
            <name name-style="western">
              <surname>Rottenberg</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A meta-analysis of emotional reactivity in major depressive disorder</article-title>
          <source>Clin Psychol Rev</source>
          <year>2008</year>
          <month>04</month>
          <volume>28</volume>
          <issue>4</issue>
          <fpage>676</fpage>
          <lpage>691</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cpr.2007.10.001</pub-id>
          <pub-id pub-id-type="medline">18006196</pub-id>
          <pub-id pub-id-type="pii">S0272-7358(07)00162-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krajewski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schnieder</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Quatieri</surname>
              <given-names>TF</given-names>
            </name>
          </person-group>
          <article-title>A review of depression and suicide risk assessment using speech analysis</article-title>
          <source>Speech Commun</source>
          <year>2015</year>
          <month>07</month>
          <volume>71</volume>
          <fpage>10</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1016/j.specom.2015.03.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nilsonne</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Speech characteristics as indicators of depressive illness</article-title>
          <source>Acta Psychiatr Scand</source>
          <year>1988</year>
          <month>03</month>
          <volume>77</volume>
          <issue>3</issue>
          <fpage>253</fpage>
          <lpage>263</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1600-0447.1988.tb05118.x</pub-id>
          <pub-id pub-id-type="medline">3394527</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leff</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Abberton</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Voice pitch measurements in schizophrenia and depression</article-title>
          <source>Psychol Med</source>
          <year>1981</year>
          <month>11</month>
          <volume>11</volume>
          <issue>4</issue>
          <fpage>849</fpage>
          <lpage>852</lpage>
          <pub-id pub-id-type="doi">10.1017/s0033291700041349</pub-id>
          <pub-id pub-id-type="medline">7323240</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marmar</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Laska</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Siegel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Amara</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tsiartas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Richey</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Knoth</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Vergyri</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Speech-based markers for posttraumatic stress disorder in US veterans</article-title>
          <source>Depress Anxiety</source>
          <year>2019</year>
          <month>07</month>
          <volume>36</volume>
          <issue>7</issue>
          <fpage>607</fpage>
          <lpage>616</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31006959"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/da.22890</pub-id>
          <pub-id pub-id-type="medline">31006959</pub-id>
          <pub-id pub-id-type="pmcid">PMC6602854</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Van Puyvelde</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Neyt</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>McGlone</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pattyn</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Voice stress analysis: A new framework for voice and effort in human performance</article-title>
          <source>Front Psychol</source>
          <year>2018</year>
          <volume>9</volume>
          <fpage>1994</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2018.01994"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2018.01994</pub-id>
          <pub-id pub-id-type="medline">30515113</pub-id>
          <pub-id pub-id-type="pmcid">PMC6255927</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Robert</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lanctôt</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Agüera-Ortiz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aalten</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bremond</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Defrancesco</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hanon</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>David</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dubois</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dujardin</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Husain</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>König</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mantua</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Meulien</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moebius</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rasmussen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Robert</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ruthirakuhan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Stella</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Yesavage</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zeghari</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Manera</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Is it time to revise the diagnostic criteria for apathy in brain disorders? The 2018 international consensus group</article-title>
          <source>Eur Psychiatry</source>
          <year>2018</year>
          <month>10</month>
          <volume>54</volume>
          <fpage>71</fpage>
          <lpage>76</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0924-9338(18)30143-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.eurpsy.2018.07.008</pub-id>
          <pub-id pub-id-type="medline">30125783</pub-id>
          <pub-id pub-id-type="pii">S0924-9338(18)30143-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yesavage</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Brink</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>TL</given-names>
            </name>
            <name name-style="western">
              <surname>Lum</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Adey</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leirer</surname>
              <given-names>VO</given-names>
            </name>
          </person-group>
          <article-title>Development and validation of a geriatric depression screening scale: A preliminary report</article-title>
          <source>J Psychiatr Res</source>
          <year>1982</year>
          <month>1</month>
          <volume>17</volume>
          <issue>1</issue>
          <fpage>37</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1016/0022-3956(82)90033-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>König</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Linz</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zeghari</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Klinge</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tröger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Alexandersson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Robert</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Detecting apathy in older adults with cognitive disorders using automatic speech analysis</article-title>
          <source>J Alzheimers Dis</source>
          <year>2019</year>
          <month>06</month>
          <day>18</day>
          <volume>69</volume>
          <issue>4</issue>
          <fpage>1183</fpage>
          <lpage>1193</lpage>
          <pub-id pub-id-type="doi">10.3233/jad-181033</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="web">
          <source>ki:elements</source>
          <access-date>2020-11-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ki-elements.de">https://ki-elements.de</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Van Rossum</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Drake</surname>
              <given-names>FL</given-names>
            </name>
          </person-group>
          <source>Python 3 Reference Manual</source>
          <year>2009</year>
          <publisher-loc>Scotts Valley, CA</publisher-loc>
          <publisher-name>CreateSpace</publisher-name>
          <fpage>1</fpage>
          <lpage>242</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shahab</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>My-Voice Analysis</article-title>
          <source>GitHub</source>
          <year>2020</year>
          <access-date>2020-11-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/Shahabks/my-voice-analysis">https://github.com/Shahabks/my-voice-analysis</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boersma</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>PRAAT, a system for doing phonetics by computer</article-title>
          <source>Glot International. Vol 5, No. 9/10</source>
          <year>2001</year>
          <access-date>2021-04-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/profile/Paul-Boersma-2/publication/208032992_PRAAT_a_system_for_doing_phonetics_by_computer/links/53d77c1c0cf29f55cfb4d26b/PRAAT-a-system-for-doing-phonetics-by-computer.pdf">https://www.researchgate.net/profile/Paul-Boersma-2/publication/208032992_PRAAT_a_system_for_doing_phonetics_by_computer/links/53d77c1c0cf29f55cfb4d26b/PRAAT-a-system-for-doing-phonetics-by-computer.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lyons</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>python_speech_features</article-title>
          <source>GitHub</source>
          <year>2020</year>
          <month>01</month>
          <day>14</day>
          <access-date>2020-11-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/jameslyons/python_speech_features">https://github.com/jameslyons/python_speech_features</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <article-title>librosa 0.8.0</article-title>
          <source>Python Package Index</source>
          <year>2020</year>
          <month>07</month>
          <day>21</day>
          <access-date>2020-11-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pypi.org/project/librosa/">https://pypi.org/project/librosa/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kreiman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gerratt</surname>
              <given-names>BR</given-names>
            </name>
          </person-group>
          <article-title>Perception of aperiodicity in pathological voice</article-title>
          <source>J Acoust Soc Am</source>
          <year>2005</year>
          <month>04</month>
          <volume>117</volume>
          <issue>4 Pt 1</issue>
          <fpage>2201</fpage>
          <lpage>2211</lpage>
          <pub-id pub-id-type="doi">10.1121/1.1858351</pub-id>
          <pub-id pub-id-type="medline">15898661</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Michaelis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fröhlich</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Strube</surname>
              <given-names>HW</given-names>
            </name>
            <name name-style="western">
              <surname>Kruse</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Story</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Titze</surname>
              <given-names>IR</given-names>
            </name>
          </person-group>
          <article-title>Some simulations concerning jitter and shimmer measurement</article-title>
          <source>Proceedings of the 3rd International Workshop on Advances in Quantitative Laryngoscopy, Voice and Speech Research</source>
          <year>1998</year>
          <month>06</month>
          <conf-name>3rd International Workshop on Advances in Quantitative Laryngoscopy, Voice and Speech Research</conf-name>
          <conf-date>1998</conf-date>
          <conf-loc>Aachen, Germany</conf-loc>
          <fpage>744</fpage>
          <lpage>754</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Akande</surname>
              <given-names>OO</given-names>
            </name>
          </person-group>
          <article-title>Cepstrum-based estimation of the harmonics-to-noise ratio for synthesized and human voice signals</article-title>
          <source>Proceedings of the International Conference on Nonlinear Analyses and Algorithms for Speech Processing</source>
          <year>2005</year>
          <month>04</month>
          <conf-name>International Conference on Nonlinear Analyses and Algorithms for Speech Processing</conf-name>
          <conf-date>April 19-22, 2005</conf-date>
          <conf-loc>Barcelona, Spain</conf-loc>
          <fpage>150</fpage>
          <lpage>160</lpage>
          <pub-id pub-id-type="doi">10.1007/11613107_13</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Childers</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Gender recognition from speech. Part II: Fine analysis</article-title>
          <source>J Acoust Soc Am</source>
          <year>1991</year>
          <month>10</month>
          <volume>90</volume>
          <issue>4 Pt 1</issue>
          <fpage>1841</fpage>
          <lpage>1856</lpage>
          <pub-id pub-id-type="doi">10.1121/1.401664</pub-id>
          <pub-id pub-id-type="medline">1755877</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heffernan</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Evidence from HNR that/s/is a social marker of gender</article-title>
          <source>Toronto Working Papers in Linguistics</source>
          <year>2004</year>
          <volume>23</volume>
          <fpage>71</fpage>
          <lpage>84</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://twpl.library.utoronto.ca/index.php/twpl/article/view/6208/3197"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Childers</surname>
              <given-names>DG</given-names>
            </name>
          </person-group>
          <article-title>Gender recognition from speech. Part I: Coarse analysis</article-title>
          <source>J Acoust Soc Am</source>
          <year>1991</year>
          <month>10</month>
          <volume>90</volume>
          <issue>4 Pt 1</issue>
          <fpage>1828</fpage>
          <lpage>1840</lpage>
          <pub-id pub-id-type="doi">10.1121/1.401663</pub-id>
          <pub-id pub-id-type="medline">1960278</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Low</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Maddage</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Lech</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sheeber</surname>
              <given-names>LB</given-names>
            </name>
            <name name-style="western">
              <surname>Allen</surname>
              <given-names>NB</given-names>
            </name>
          </person-group>
          <article-title>Detection of clinical depression in adolescents’ speech during family interactions</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2011</year>
          <month>03</month>
          <volume>58</volume>
          <issue>3</issue>
          <fpage>574</fpage>
          <lpage>586</lpage>
          <pub-id pub-id-type="doi">10.1109/tbme.2010.2091640</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="web">
          <source>The Comprehensive R Archive Network</source>
          <access-date>2020-11-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cran.r-project.org/">https://cran.r-project.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gillespie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Laures-Gore</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Farina</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Russell</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Logan</surname>
              <given-names>YL</given-names>
            </name>
          </person-group>
          <article-title>Detecting stress and depression in adults with aphasia through speech analysis</article-title>
          <source>Proceedings of the 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</source>
          <year>2017</year>
          <conf-name>2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</conf-name>
          <conf-date>March 5-9, 2017</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <fpage>5140</fpage>
          <lpage>5144</lpage>
          <pub-id pub-id-type="doi">10.1109/icassp.2017.7953136</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van den Broek</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>van der Sluis</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Dijkstra</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Telling the story and re-living the past: How speech analysis can reveal emotions in post-traumatic stress disorder (PTSD) patients</article-title>
          <source>Sensing Emotions</source>
          <year>2010</year>
          <publisher-loc>Dordrecht, the Netherlands</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>153</fpage>
          <lpage>180</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Muaremi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Arnrich</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tröster</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Towards measuring stress with smartphones and wearable devices during workday and sleep</article-title>
          <source>Bionanoscience</source>
          <year>2013</year>
          <volume>3</volume>
          <fpage>172</fpage>
          <lpage>183</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25530929"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s12668-013-0089-2</pub-id>
          <pub-id pub-id-type="medline">25530929</pub-id>
          <pub-id pub-id-type="pii">89</pub-id>
          <pub-id pub-id-type="pmcid">PMC4269214</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hasan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rundensteiner</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Agu</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Emotex: Detecting emotions in twitter messages</article-title>
          <source>Proceedings of the 2014 ASE Bigdata/Socialcom/Cybersecurity Conference</source>
          <year>2014</year>
          <conf-name>2014 ASE Bigdata/Socialcom/Cybersecurity Conference</conf-name>
          <conf-date>May 27-31, 2014</conf-date>
          <conf-loc>San Francisco, CA</conf-loc>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://web.cs.wpi.edu/~emmanuel/publications/PDFs/C30.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gosztolya</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Busa-Fekete</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tóth</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Detecting autism, emotions and social signals using AdaBoost</article-title>
          <source>Proceedings of the 14th Annual Conference of the International Speech Communication Association</source>
          <year>2013</year>
          <conf-name>14th Annual Conference of the International Speech Communication Association</conf-name>
          <conf-date>August 25-29, 2013</conf-date>
          <conf-loc>Lyon, France</conf-loc>
          <fpage>220</fpage>
          <lpage>224</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.isca-speech.org/archive/archive_papers/interspeech_2013/i13_0220.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Howes</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Purver</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McCabe</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Linguistic indicators of severity and progress in online text-based therapy for depression</article-title>
          <source>Proceedings of the Workshop on Computational Linguistics and Clinical Psychology: From Linguistic Signal to Clinical Reality</source>
          <year>2014</year>
          <conf-name>Workshop on Computational Linguistics and Clinical Psychology: From Linguistic Signal to Clinical Reality</conf-name>
          <conf-date>June 27, 2014</conf-date>
          <conf-loc>Baltimore, MA</conf-loc>
          <fpage>7</fpage>
          <lpage>16</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.aclweb.org/anthology/W14-3202.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.3115/v1/w14-3202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Al-Shargie</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>TB</given-names>
            </name>
            <name name-style="western">
              <surname>Badruddin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kiguchi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Towards multilevel mental stress assessment using SVM with ECOC: An EEG approach</article-title>
          <source>Med Biol Eng Comput</source>
          <year>2018</year>
          <month>01</month>
          <volume>56</volume>
          <issue>1</issue>
          <fpage>125</fpage>
          <lpage>136</lpage>
          <pub-id pub-id-type="doi">10.1007/s11517-017-1733-8</pub-id>
          <pub-id pub-id-type="medline">29043535</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11517-017-1733-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rabaoui</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Davy</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rossignol</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ellouze</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Using one-class SVMs and wavelets for audio surveillance</article-title>
          <source>IEEE Trans Inf Forensics Secur</source>
          <year>2008</year>
          <month>12</month>
          <volume>3</volume>
          <issue>4</issue>
          <fpage>763</fpage>
          <lpage>775</lpage>
          <pub-id pub-id-type="doi">10.1109/tifs.2008.2008216</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Physiological emotion analysis using support vector regression</article-title>
          <source>Neurocomputing</source>
          <year>2013</year>
          <month>12</month>
          <volume>122</volume>
          <fpage>79</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2013.02.041</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Soury</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Devillers</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Stress detection from audio on multiple window analysis size in a public speaking task</article-title>
          <source>Proceedings of the 2013 Humaine Association Conference on Affective Computing and Intelligent Interaction</source>
          <year>2013</year>
          <conf-name>2013 Humaine Association Conference on Affective Computing and Intelligent Interaction</conf-name>
          <conf-date>September 2-5, 2013</conf-date>
          <conf-loc>Geneva, Switzerland</conf-loc>
          <fpage>529</fpage>
          <lpage>533</lpage>
          <pub-id pub-id-type="doi">10.1109/acii.2013.93</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sysoev</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kos</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pogačnik</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Noninvasive stress recognition considering the current activity</article-title>
          <source>Pers Ubiquitous Comput</source>
          <year>2015</year>
          <month>8</month>
          <day>26</day>
          <volume>19</volume>
          <issue>7</issue>
          <fpage>1045</fpage>
          <lpage>1052</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1007%2Fs00779-015-0885-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00779-015-0885-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gjoreski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gjoreski</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lutrek</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gams</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic detection of perceived stress in campus students using smartphones</article-title>
          <source>Proceedings of the 2015 International Conference on Intelligent Environments</source>
          <year>2015</year>
          <conf-name>2015 International Conference on Intelligent Environments</conf-name>
          <conf-date>July 15-17, 2015</conf-date>
          <conf-loc>Prague, Czech Republic</conf-loc>
          <fpage>132</fpage>
          <lpage>135</lpage>
          <pub-id pub-id-type="doi">10.1109/IE.2015.27</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ambikairajah</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Spectro-temporal analysis of speech affected by depression and psychomotor retardation</article-title>
          <source>Proceedings of the 2013 IEEE International Conference on Acoustics, Speech and Signal Processing</source>
          <year>2013</year>
          <conf-name>2013 IEEE International Conference on Acoustics, Speech and Signal Processing</conf-name>
          <conf-date>May 26-31, 2013</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
          <fpage>7542</fpage>
          <lpage>7546</lpage>
          <pub-id pub-id-type="doi">10.1109/ICASSP.2013.6639129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sethu</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Breakspear</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Goecke</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Modeling spectral variability for the classification of depressed speech</article-title>
          <source>Proceedings of the 14th Annual Conference of the International Speech Communication Association</source>
          <year>2013</year>
          <conf-name>14th Annual Conference of the International Speech Communication Association</conf-name>
          <conf-date>August 25-29, 2013</conf-date>
          <conf-loc>Lyon, France</conf-loc>
          <fpage>857</fpage>
          <lpage>861</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.654.9966&#38;rep=rep1&#38;type=pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kreiman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sidtis</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <source>Foundations of Voice Studies: An Interdisciplinary Approach to Voice Production and Perception</source>
          <year>2011</year>
          <publisher-loc>Hoboken, NJ</publisher-loc>
          <publisher-name>John Wiley &#38; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Streeter</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Macdonald</surname>
              <given-names>NH</given-names>
            </name>
            <name name-style="western">
              <surname>Apple</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Krauss</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Galotti</surname>
              <given-names>KM</given-names>
            </name>
          </person-group>
          <article-title>Acoustic and perceptual indicators of emotional stress</article-title>
          <source>J Acoust Soc Am</source>
          <year>1983</year>
          <month>04</month>
          <volume>73</volume>
          <issue>4</issue>
          <fpage>1354</fpage>
          <lpage>1360</lpage>
          <pub-id pub-id-type="doi">10.1121/1.389239</pub-id>
          <pub-id pub-id-type="medline">6853847</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Grandjean</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Johnstone</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Klasmeyer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Bänziger</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Acoustic correlates of task load and stress</article-title>
          <source>Proceedings of the 7th International Conference on Spoken Language Processing</source>
          <year>2002</year>
          <conf-name>7th International Conference on Spoken Language Processing</conf-name>
          <conf-date>September 16-20, 2002</conf-date>
          <conf-loc>Denver, CO</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.isca-speech.org/archive/archive_papers/icslp_2002/i02_2017.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ruiz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Absil</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Harmegnies</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Legros</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Poch</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Time- and spectrum-related variabilities in stressed speech under laboratory and real conditions</article-title>
          <source>Speech Commun</source>
          <year>1996</year>
          <month>11</month>
          <volume>20</volume>
          <issue>1-2</issue>
          <fpage>111</fpage>
          <lpage>129</lpage>
          <pub-id pub-id-type="doi">10.1016/s0167-6393(96)00048-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jessen</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <source>Einfluss von Stress auf Sprache und Stimme: Unter besondere Berücksichtigung polizeidienstlicher Anforderungen</source>
          <year>2006</year>
          <publisher-loc>Idstein, Germany</publisher-loc>
          <publisher-name>Schulz-Kirchner Verlag</publisher-name>
          <fpage>1</fpage>
          <lpage>218</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gopalan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wenndt</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cupples</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>An analysis of speech under stress using certain modulation features</article-title>
          <source>Proceedings of the 25th Annual Conference of the IEEE Industrial Electronics Society</source>
          <year>1999</year>
          <conf-name>25th Annual Conference of the IEEE Industrial Electronics Society</conf-name>
          <conf-date>November 29-December 3, 1999</conf-date>
          <conf-loc>San Jose, CA</conf-loc>
          <fpage>1193</fpage>
          <lpage>1197</lpage>
          <pub-id pub-id-type="doi">10.1109/iecon.1999.819381</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hillenbrand</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Houde</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Acoustic correlates of breathy vocal quality: Dysphonic voices and continuous speech</article-title>
          <source>J Speech Hear Res</source>
          <year>1996</year>
          <month>04</month>
          <volume>39</volume>
          <issue>2</issue>
          <fpage>311</fpage>
          <lpage>321</lpage>
          <pub-id pub-id-type="doi">10.1044/jshr.3902.311</pub-id>
          <pub-id pub-id-type="medline">8729919</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greeley</surname>
              <given-names>HP</given-names>
            </name>
            <name name-style="western">
              <surname>Friets</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Raghavan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Picone</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Berg</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Detecting fatigue from voice using speech recognition</article-title>
          <source>Proceedings of the IEEE International Symposium on Signal Processing and Information Technology</source>
          <year>2006</year>
          <conf-name>IEEE International Symposium on Signal Processing and Information Technology</conf-name>
          <conf-date>August 27-30, 2006</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
          <fpage>567</fpage>
          <lpage>571</lpage>
          <pub-id pub-id-type="doi">10.1109/ISSPIT.2006.270865</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sigmund</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Introducing the database ExamStress for speech under stress</article-title>
          <source>Proceedings of the 7th Nordic Signal Processing Symposium</source>
          <year>2006</year>
          <conf-name>7th Nordic Signal Processing Symposium</conf-name>
          <conf-date>June 7-9, 2006</conf-date>
          <conf-loc>Reykjavik, Iceland</conf-loc>
          <fpage>290</fpage>
          <lpage>293</lpage>
          <pub-id pub-id-type="doi">10.1109/NORSIG.2006.275258</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <collab>American Psychiatric Association</collab>
          </person-group>
          <source>Diagnostic And Statistical Manual Of Mental Disorders, Fifth Edition</source>
          <year>2013</year>
          <publisher-loc>Arlington, VA</publisher-loc>
          <publisher-name>American Psychiatric Association Publishing</publisher-name>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
