<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e67772</article-id>
      <article-id pub-id-type="pmid">40228243</article-id>
      <article-id pub-id-type="doi">10.2196/67772</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Acoustic Features for Identifying Suicide Risk in Crisis Hotline Callers: Machine Learning Approach</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Coristine</surname>
            <given-names>Andrew</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Iyer</surname>
            <given-names>Ravi</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Carli</surname>
            <given-names>Vladimir</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Su</surname>
            <given-names>Zhengyuan</given-names>
          </name>
          <degrees>MEd</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0001-3664-0953</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Jiang</surname>
            <given-names>Huadong</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-2274-4253</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Ying</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9023-8247</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Hou</surname>
            <given-names>Xiangqing</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7142-3583</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Su</surname>
            <given-names>Yanli</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-6351-1879</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Li</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution/>
            <institution>Laboratory of Suicidal Behavior Research</institution>
            <institution>Tianjin University</institution>
            <addr-line>135 Yaguan Road, Jinnan District</addr-line>
            <addr-line>Tianjin, 300354</addr-line>
            <country>China</country>
            <phone>86 13752183496</phone>
            <email>yangli@tju.edu.cn</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7947-0392</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Laboratory of Suicidal Behavior Research</institution>
        <institution>Tianjin University</institution>
        <addr-line>Tianjin</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Institute of Applied Psychology</institution>
        <institution>Tianjin University</institution>
        <addr-line>Tianjin</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>School of Education</institution>
        <institution>Tianjin University</institution>
        <addr-line>Tianjin</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Xi'an Mental Health Centre</institution>
        <addr-line>Xi'an</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Li Yang <email>yangli@tju.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>14</day>
        <month>4</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e67772</elocation-id>
      <history>
        <date date-type="received">
          <day>21</day>
          <month>10</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>2</day>
          <month>12</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>14</day>
          <month>12</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>25</day>
          <month>2</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Zhengyuan Su, Huadong Jiang, Ying Yang, Xiangqing Hou, Yanli Su, Li Yang. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 14.04.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e67772" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Crisis hotlines serve as a crucial avenue for the early identification of suicide risk, which is of paramount importance for suicide prevention and intervention. However, assessing the risk of callers in the crisis hotline context is constrained by factors such as lack of nonverbal communication cues, anonymity, time limits, and single-occasion intervention. Therefore, it is necessary to develop approaches, including acoustic features, for identifying the suicide risk among hotline callers early and quickly. Given the complicated features of sound, adopting artificial intelligence models to analyze callers’ acoustic features is promising.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>In this study, we investigated the feasibility of using acoustic features to predict suicide risk in crisis hotline callers. We also adopted a machine learning approach to analyze the complex acoustic features of hotline callers, with the aim of developing suicide risk prediction models.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We collected 525 suicide-related calls from the records of a psychological assistance hotline in a province in northwest China. Callers were categorized as low or high risk based on suicidal ideation, suicidal plans, and history of suicide attempts, with risk assessments verified by a team of 18 clinical psychology raters. A total of 164 clearly categorized risk recordings were analyzed, including 102 low-risk and 62 high-risk calls. We extracted 273 audio segments, each exceeding 2 seconds in duration, which were labeled by raters as containing suicide-related expressions for subsequent model training and evaluation. Basic acoustic features (eg, Mel Frequency Cepstral Coefficients, formant frequencies, jitter, shimmer) and high-level statistical function (HSF) features (using OpenSMILE [Open-Source Speech and Music Interpretation by Large-Space Extraction] with the ComParE 2016 configuration) were extracted. Four supervised machine learning algorithms (logistic regression, support vector machine, random forest, and extreme gradient boosting) were trained and evaluated using grouped 5-fold cross-validation and a test set, with performance metrics, including accuracy, <italic>F</italic><sub>1</sub>-score, recall, and false negative rate.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The development of machine learning models utilizing HSF acoustic features has been demonstrated to enhance recognition performance compared to models based solely on basic acoustic features. The random forest classifier, developed with HSFs, achieved the best performance in detecting the suicide risk among the models evaluated (accuracy=0.75, <italic>F</italic><sub>1</sub>-score=0.70, recall=0.76, false negative rate=0.24).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The results of our study demonstrate the potential of developing artificial intelligence–based early warning systems using acoustic features for identifying the suicide risk among crisis hotline callers. Our work also has implications for employing acoustic features to identify suicide risk in salient voice contexts.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>suicide</kwd>
        <kwd>crisis hotline</kwd>
        <kwd>acoustic feature</kwd>
        <kwd>machine learning</kwd>
        <kwd>acoustics</kwd>
        <kwd>suicide risk</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>feasibility</kwd>
        <kwd>prediction models</kwd>
        <kwd>hotline callers</kwd>
        <kwd>voice</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Suicide is a global public health issue. According to the World Health Organization in 2019, approximately 703,000 people died by suicide each year worldwide [<xref ref-type="bibr" rid="ref1">1</xref>]. Therefore, massive studies have been conducted to identify suicidal risks, which is crucial for preventing and reducing suicide [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. However, most previous works relied on the language content, including clinical interview and assessment using self-report scales, when identifying suicidal risks [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. In this study, we aimed to investigate how to use acoustic features to identify suicide risk in the context of crisis hotlines. In fact, speech has been used for suicide diagnosis since decades [<xref ref-type="bibr" rid="ref8">8</xref>]. For example, studies utilizing linguistic analysis have shown that when a person is suicidal, their speeches become hollow and toneless [<xref ref-type="bibr" rid="ref9">9</xref>]. However, such a manual speech analysis approach cannot be applied in large-scale research and clinical environments [<xref ref-type="bibr" rid="ref10">10</xref>]. Now these acoustic changes can be well captured using acoustic speech features [<xref ref-type="bibr" rid="ref11">11</xref>]. Moreover, with the development of artificial intelligence technologies such as machine learning, it is possible to analyze highly complex patterns in acoustic features [<xref ref-type="bibr" rid="ref12">12</xref>]. Using artificial intelligence to automatically analyze features can help us move from a clinical practice model that relies solely on clinician judgment to an evidence-based medicine model based on data measurements [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>Some studies have used acoustic features as indicators to automatically identify suicidal ideation and to determine suicide risks in populations such as veterans, active duty soldiers, and university students. The audio materials in these studies are mostly derived from laboratory interviews or spontaneous recordings such as audio diaries [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref23">23</xref>]. In the context of collecting such audio materials, acoustic information is less important due to the assistance of nonverbal information and suicide screening scales. However, voice messages become particularly important in special contexts such as in crisis hotline calls [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
        <p>As a suicide prevention method, crisis hotlines play a crucial role in early detection and response to suicide risk [<xref ref-type="bibr" rid="ref25">25</xref>]. The World Health Organization estimates that there are more than 1000 crisis hotlines worldwide. Crisis lines provide a confidential and stigma-free alternative for individuals who are suicidal and may not seek help from traditional health services, family, or friends, or who have not disclosed their suicidal thoughts to professionals, thereby reaching those who are otherwise unreached for their mental health struggles [<xref ref-type="bibr" rid="ref26">26</xref>]. However, accurately assessing suicide risk in a crisis hotline is a difficult task. Due to their anonymous, time-limited, and typically single-occasion nature, crisis hotline counselors cannot predict or control the type of calls they receive and are expected to respond as quickly as possible to the risks of callers [<xref ref-type="bibr" rid="ref26">26</xref>]. Furthermore, individuals identified as high-risk callers in crisis lines are significantly more likely to engage in subsequent suicidal behavior than those identified as low risk. Recognizing the suicide risk of callers is the first critical step to manage the risk for crisis hotline counselors. When the crisis hotline counselor realizes that the caller is at high risk, more urgent intervention strategies are employed to help the caller manage the risk [<xref ref-type="bibr" rid="ref27">27</xref>]. Simply identifying the presence of suicidal ideation in callers is not sufficient—a more thorough assessment by crisis hotline counselors is required [<xref ref-type="bibr" rid="ref28">28</xref>]. Unlike risk assessments in the form of face-to-face interviews, crisis hotline counselors are unable to observe nonverbal communication cues [<xref ref-type="bibr" rid="ref29">29</xref>]. Therefore, crisis hotline counselors rely solely on vocal communication, and they have to be highly attuned to every sound, silence, inflection, and quality of speech, including tone, pitch, and speed [<xref ref-type="bibr" rid="ref30">30</xref>]. This will undoubtedly add to the burden of the counselors. It would be helpful if acoustic information could be used to assist the counselor in risk assessment and then management. Therefore, approaches or techniques that can identify suicide risk automatically based on the acoustic characteristics of the caller is promising.</p>
        <p>Although there have been studies [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref24">24</xref>] exploring the effectiveness of acoustic features in suicide risk identification, they are understudied in crisis hotlines. Recorded calls to crisis hotlines are characterized by low sampling rates (8 kHz) and poor recording environments. These limitations pose substantial challenges for the acoustic analysis of the data [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. The study by Iyer et al [<xref ref-type="bibr" rid="ref33">33</xref>] is one of the few studies that have tested the feasibility of acoustic features for suicide risk identification in hotline callers. Their findings suggested that acoustic features have the potential to be considered as biomarkers of suicide risk in callers [<xref ref-type="bibr" rid="ref33">33</xref>]. However, because they used the uncommon method of independent analysis of speech frames and did not validate the model on an independent test set, the results of their study require further validation.</p>
      </sec>
      <sec>
        <title>Objective</title>
        <p>This is a retrospective machine learning study. The purpose of this study was to first train a machine learning model by using speech material from a crisis hotline and to test the performance of the model on an independent test set. In doing so, we aim to test which machine learning model is more suitable to be applied for the risk identification of hotline callers. Second, considering the characteristics of the sampling rate and the recording environment of the hotline speech material, this study further investigates whether advanced acoustic features (high-level statistical functions [HSFs]), which have better recognition performance than basic acoustic features, contribute to the recognition performance of the machine learning model [<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Materials and Clinical Assessment</title>
        <p>A total of 525 calls were selected from the records of a psychological assistance hotline in a province in northwest China between January 2022 and March 2023. These calls were identified as involving suicide-related conversations between the counselor and the caller. The callers included both adolescents and adults (aged 12 years and older). The counselor assessed the caller’s suicide risk according to the “suicidal thoughts and plans” entry in the risk assessment criteria for Chinese crisis hotlines [<xref ref-type="bibr" rid="ref28">28</xref>]. Specifically, callers exhibiting suicidal ideation without a suicide plan were categorized as low risk; callers presenting with suicidal ideation accompanied by a suicide plan, and who were in the process of executing or preparing to engage in suicidal behavior within the subsequent 72-hour period, or had a recent history of a suicide attempt within the preceding 2 weeks were designated as high risk.</p>
        <p>To verify the accuracy of the initial classifications by the hotline counselor, we recruited another sample of raters to rate the callers’ suicide risk in each recording. A total of 18 risk raters with a background in clinical psychology and experience working with crisis hotlines were recruited. They were asked to rate the recordings included in the study, according to the “suicidal thoughts and plans” entry in the risk assessment criteria for Chinese crisis hotlines. Each rater rated 10 randomly selected suicide risk recordings to assess interrater agreement before engaging in assessment. The interrater reliability (κ) of the clinical assessment conducted by a team of 18 raters was 0.771 for a random selection of 10 recordings. This value represents that there is a high degree of rater agreement among the raters [<xref ref-type="bibr" rid="ref37">37</xref>].</p>
        <p>The assessment of each recorded suicide risk call was conducted by 2 independent raters. Callers whose statements were deemed indicative of suicide risk by both raters and who exhibited a similar level of suicide risk, as indicated by their respective assessment ratings, were included in this study. Risk assessors were asked to make notes on the point in time of the dialogue where suicide-related themes (including suicidal ideation, suicide planning, history of suicide attempts, and ongoing suicidal behaviors) occurred in the recording.</p>
      </sec>
      <sec>
        <title>Data Exclusion</title>
        <p>Risk recordings were excluded if they did not adhere to the established assessment process. This included instances where the caller’s suicide plan, preparation, or other relevant factors were not adequately assessed after the disclosure of suicidal ideation. Additionally, recordings where the caller’s expression was unclear or insufficient for risk assessment were excluded. The final screening resulted in 164 recordings where the caller’s expression was sufficiently clear to indicate their level of risk. Of these, 102 calls were assessed as low risk and 62 as high risk. The authors extracted segments of suicide-related expressions at specific time points where raters identified the occurrence of suicide-related conversations. All audio clips of suicide-related expressions with a duration of 2 seconds or more were manually intercepted [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. We obtained a total of 273 clips (132 clips for high risk and 141 clips for low risk). The high risk and low risk segments were used in subsequent model training and evaluation [<xref ref-type="bibr" rid="ref33">33</xref>].</p>
      </sec>
      <sec>
        <title>Preprocessing of Call Recordings</title>
        <p>All call recordings were originally saved in MP3 (MPEG-1 audio layer 3) format with a sampling rate of 8 kHz, a bit depth of 32 bits, and in dual channel. The suicide-related clips labelled by the risk assessors were checked and relistened to by the first author, and complete sentences of suicide-related expressions were intercepted as audio-recorded material. We also removed the crisis hotline counselor’s voice channel and only kept the caller’s voice. After removing the nonspeech fragments from the first and end points of the clip by using voice activity detection, we converted the audio files to WAV (waveform audio file) format.</p>
      </sec>
      <sec>
        <title>Feature Extraction</title>
        <sec>
          <title>Basic Acoustic Feature Extraction</title>
          <p>Spectrum features, quality features, and rhythm features were extracted for each segmented utterance and averaged over the entire time interval [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. The spectral characteristics of the audio signal were captured through a 39-dimensional Mel Frequency Cepstral Coefficients (MFCCs) representation [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. The quality attributes of the signal are represented by the center frequency and bandwidth of the first 3 formants, in addition to jitter and shimmer measurements [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Rhythmic aspects of the speech are quantified through metrics such as the duration of effective speech segments, fundamental frequency (pitch), short-time energy, and sound pressure level [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
        </sec>
        <sec>
          <title>Advanced Feature Extraction</title>
          <sec>
            <title>Open-Source Speech and Music Interpretation by Large-Space Extraction</title>
            <p>OpenSMILE (Open-Source Speech and Music Interpretation by Large-Space Extraction), an open-source tool and a robust platform for the extraction of acoustic features, can take the original waveform signal of a sound signal in a time series as input and output the names and values of the corresponding acoustic features [<xref ref-type="bibr" rid="ref44">44</xref>]. We employed the ComParE 2016 configuration profile to extract the 6373-dimensional features of this feature set. The ComParE 2016 feature set incorporates more combinations of low-level descriptors and functionals than the basic acoustic feature set [<xref ref-type="bibr" rid="ref45">45</xref>]. This large feature set provides a quantification of voice characteristics that is more comprehensive than ever before, and it has demonstrated effectiveness in the fields of emotion and personality trait recognition [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p>
          </sec>
          <sec>
            <title>Mutual Information</title>
            <p>To avoid overfitting the training model and to compare it with models constructed from basic acoustic features, this study uses the mutual information method to reduce the number of feature dimensions of the advanced feature set to be consistent with the basic acoustic feature set. The mutual information method computes the degree of mutual information, which indicates the dependency between features and discrete binary labels. The higher the mutual information degree, the stronger the dependency between the feature and the label, and therefore, the more useful it is for model recognition [<xref ref-type="bibr" rid="ref47">47</xref>]. Mutual information has been employed for dimensionality reduction within high-dimensional feature spaces relevant to suicide acoustic studies [<xref ref-type="bibr" rid="ref36">36</xref>]. Extraction was performed using Python (version 3.6), and the following packages were used to extract the acoustic features: <italic>librosa</italic> (version 0.7.2), <italic>NumPy</italic> (version 1.19.5), and <italic>pandas</italic> (version 1.1.5). Normalization or filtering was not performed during the preprocessing stage.</p>
          </sec>
        </sec>
      </sec>
      <sec>
        <title>Machine Learning Methods</title>
        <p>In mental health research, traditional machine learning methods are frequently favored due to their interpretability and suitability for smaller datasets [<xref ref-type="bibr" rid="ref48">48</xref>]. For the problem of risk identification in the field of suicide, supervised machine learning methods are usually the most applicable [<xref ref-type="bibr" rid="ref12">12</xref>]. In consideration of the scale of the dataset employed in this study and the research questions posed, we utilized supervised machine learning as the modality of data analysis. We employed 4 supervised machine learning algorithms: logistic regression, support vector machine, random forest, and extreme gradient boosting [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
        <p>The entire machine learning analysis process is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>. The data were divided into training and test sets using the GroupShuffleSplit method, with an 80:20 ratio. This prevents the information leakage of the same incoming call by dividing multiple segments of the same call into training and test sets at the same time. In the training set, we employed a grouped 5-fold cross-validation approach with a grid search strategy to optimize the model parameters [<xref ref-type="bibr" rid="ref49">49</xref>]. The optimal combination of parameters was selected based on the training set, and its performance was subsequently evaluated using the test set.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Flowchart of the machine learning analysis process for suicide risk assessment in crisis hotline callers, including data preprocessing, model training, and evaluation. OpenSMILE: Open-Source Speech and Music Interpretation by Large-Space Extraction.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e67772_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We used accuracy to evaluate the overall recognition performance of the model and <italic>F</italic><sub>1</sub>-score, recall, and false negative rate (FNR) to evaluate the model’s detection performance for high-risk callers. Higher values for accuracy, <italic>F</italic><sub>1</sub>-score, and recall metrics imply a superior performance of the model in accurately identifying instances of suicide risk. Reduced values of FNR indicate a diminished likelihood of misclassifying high-risk individuals as low risk within the caller population. True positive (TP) is the number of samples that were correctly classified as belonging to the high-risk class. False positive (FP) refers to the number of samples that were incorrectly classified as high risk. False negative (FN) is the number of high-risk samples that were misclassified as low risk. True negative (TN) is the number of samples that were correctly classified as low risk [<xref ref-type="bibr" rid="ref50">50</xref>]. The evaluation metrics are defined as follows:</p>
        <disp-formula>Accuracy = (TP + TN) / (TP + FP + TN + FN)</disp-formula>
        <disp-formula><italic>F</italic><sub>1</sub>-score = 2TP / (2TP + FP + FN)</disp-formula>
        <disp-formula>Recall = TP / (TP + FN)</disp-formula>
        <disp-formula>FNR = FN / (TP + FN)</disp-formula>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>This study has been approved by the institutional review board of Tianjin University (2024-453). The researchers confirm that all stages of this study were conducted in accordance with the ethical standards set forth by the Helsinki Declaration, as revised in 1989. Prior to being connected with a hotline operator, callers were informed via an automated message that their calls would be recorded and that any data obtained from these calls would be treated in accordance with the tenets of confidentiality and analyzed in an anonymized manner. All data have been anonymized, and any private information related to the caller has been removed.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Descriptive Statistics for Suicide-Related Expressions</title>
        <p>The gender and age of the callers of the 273 suicide-related statements are shown in <xref ref-type="table" rid="table1">Table 1</xref>. We first conducted chi-square tests to examine whether the gender and age range ratios differed in high and low suicide risk conditions.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Demographic characteristics (sex and age) of the callers with suicide-related expression segments included in this study (N=273).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="250"/>
            <col width="250"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Characteristic</td>
                <td>High risk of suicide, n (%)</td>
                <td>Low risk of suicide, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">Number of segments</td>
                <td>132 (48.4)</td>
                <td>141 (51.6)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Sex</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Male</td>
                <td>62 (47)</td>
                <td>48 (34)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Female</td>
                <td>70 (53)</td>
                <td>93 (66)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Age (years)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>12-18</td>
                <td>57 (43.2)</td>
                <td>36 (25.5)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>19-34</td>
                <td>58 (43.9)</td>
                <td>74 (52.5)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>35+</td>
                <td>6 (4.5)</td>
                <td>20 (14.2)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Unspecified</td>
                <td>11 (8.3)</td>
                <td>11 (7.8)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>A significant association was observed between gender and risk category (<italic>χ<sup>2</sup></italic><sub>1</sub>=6.1; <italic>P</italic>=.01). The low-risk group showed higher female representation, while the high-risk group had a balanced gender ratio. With regard to age, the age distribution differed significantly between risk groups (<italic>χ<sup>2</sup></italic><sub>3</sub>=18.2; <italic>P</italic>&#60;.001). The proportion of callers aged 12-18 years within the high suicide risk caller segment was higher than that within the low suicide risk segment (<italic>χ<sup>2</sup></italic><sub>1</sub><italic>=</italic>9.5; <italic>P</italic>=.002). Additionally, the proportion of callers older than 35 years within the high suicide risk caller segment was lower than that within the low suicide risk segment (<italic>χ<sup>2</sup></italic><sub>1</sub>=7.4; <italic>P</italic>=.007). There were no significant differences in other age ranges. An independent sample 2-tailed <italic>t</italic> test was conducted to compare the duration of suicide-related utterances in high-risk and low-risk calls. There was no significant difference in the duration for high-risk (mean 6.470, SD 5.365) and low-risk (mean 6.262, SD 4.378) conditions (<italic>t</italic><sub>271</sub>=–0.351; <italic>P</italic>=.73).</p>
      </sec>
      <sec>
        <title>Suicide Risk Recognition With Basic Acoustic Features</title>
        <sec>
          <title>Feature Selection</title>
          <p>We extracted spectrum features, quality features, and rhythm features totaling 53 dimensions and performed multicollinearity diagnosis on these features. Fifty-dimensional basic acoustic features were included in this study after excluding 3 dimensions with a variance inflation factor greater than 10 (sound pressure level, MFCC1, and MFCC2). <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> and <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> present the feature information that survives multicollinearity diagnostics.</p>
        </sec>
        <sec>
          <title>Machine Learning Models for Basic Acoustic Features</title>
          <p>The results of the recognition models trained with basic acoustic features are presented in <xref rid="figure2" ref-type="fig">Figure 2</xref> and <xref ref-type="table" rid="table2">Table 2</xref>. Support vector machine demonstrated the most optimal performance in the model that was trained using the basic acoustic feature set, with an accuracy of 0.49. The model achieved an <italic>F</italic><sub>1</sub>-score of 0.47, recall of 0.62, and FNR of 0.38. The model demonstrated a 49% accuracy rate in identifying high/low risk, which is not significantly superior to the accuracy expected by chance. Therefore, machine learning models incorporating advanced acoustic features are required, which will be described in the next section.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>(A) Confusion matrix and (B) receiver operating characteristic curves for a basic acoustic feature caller risk classification model based on support vector machines. AUC: area under the curve; ROC: receiver operating characteristic; SVC: support vector classification.</p>
            </caption>
            <graphic xlink:href="jmir_v27i1e67772_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Performance comparison of the 4 machine learning models using basic acoustic features for suicide risk classification in crisis hotline callers.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="540"/>
              <col width="100"/>
              <col width="100"/>
              <col width="100"/>
              <col width="160"/>
              <thead>
                <tr valign="top">
                  <td>Machine learning model (testing sets)</td>
                  <td>Accuracy</td>
                  <td><italic>F</italic><sub>1</sub>-score</td>
                  <td>Recall</td>
                  <td>False negative rate</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Logistic regression</td>
                  <td>0.44</td>
                  <td>0.36</td>
                  <td>0.43</td>
                  <td>0.57</td>
                </tr>
                <tr valign="top">
                  <td>Random forest</td>
                  <td>0.44</td>
                  <td>0.37</td>
                  <td>0.48</td>
                  <td>0.52</td>
                </tr>
                <tr valign="top">
                  <td>Support vector machine</td>
                  <td>0.49</td>
                  <td>0.47</td>
                  <td>0.62</td>
                  <td>0.38</td>
                </tr>
                <tr valign="top">
                  <td>Extreme gradient boosting</td>
                  <td>0.38</td>
                  <td>0.31</td>
                  <td>0.38</td>
                  <td>0.62</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>Suicide Risk Recognition With Advanced Acoustic Features</title>
        <sec>
          <title>Feature Selection</title>
          <p>In this study, 50 advanced acoustic features with the highest mutual information were selected in alignment with the number of basic acoustic features from a set of 6373-dimensional features. A multicollinearity test was performed on the 50-dimensional features after dimensionality reduction, and it was found that none of them had multicollinearity. The details of the 50 advanced acoustic features are presented in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
        </sec>
        <sec>
          <title>Machine Learning Models for Advanced Acoustic Features</title>
          <p>The results of the recognition models trained with advanced acoustic features are presented in <xref rid="figure3" ref-type="fig">Figure 3</xref> and <xref ref-type="table" rid="table3">Table 3</xref>. Random forest demonstrated the most optimal performance in the model that was trained using the advanced acoustic feature set, with an accuracy of 0.75. The model achieved an <italic>F</italic><sub>1</sub>-score of 0.70, recall of 0.76, and FNR of 0.24. The model trained with advanced acoustic features showed higher recognition performance than that trained with basic acoustic features.</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>(A) Confusion matrix and (B) receiver operating characteristic curves for random forest–based high-level statistical function acoustic features caller risk classification model: area under the receiver operating characteristic curve. AUC: area under the curve; ROC: receiver operating characteristic.</p>
            </caption>
            <graphic xlink:href="jmir_v27i1e67772_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Performance evaluation of the 4 machine learning models utilizing advanced acoustic features for suicide risk prediction in crisis hotline callers.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="400"/>
              <col width="150"/>
              <col width="150"/>
              <col width="150"/>
              <col width="150"/>
              <thead>
                <tr valign="top">
                  <td>Machine learning model (testing sets)</td>
                  <td>Accuracy</td>
                  <td><italic>F</italic><sub>1</sub>-score</td>
                  <td>Recall</td>
                  <td>False negative rate</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Logistic regression</td>
                  <td>0.61</td>
                  <td>0.24</td>
                  <td>0.48</td>
                  <td>0.52</td>
                </tr>
                <tr valign="top">
                  <td>Random forest</td>
                  <td>0.75</td>
                  <td>0.70</td>
                  <td>0.76</td>
                  <td>0.24</td>
                </tr>
                <tr valign="top">
                  <td>Support vector machine</td>
                  <td>0.58</td>
                  <td>0.18</td>
                  <td>0.43</td>
                  <td>0.57</td>
                </tr>
                <tr valign="top">
                  <td>Extreme gradient boosting</td>
                  <td>0.63</td>
                  <td>0.55</td>
                  <td>0.62</td>
                  <td>0.38</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <p>Considering that random forest achieved the best suicide risk identification performance by using the downgraded HSF feature, we chose to explore the relationship between the acoustic features and the degree of suicide risk in it by using variable importance plot versus partial dependence plot (PDP). The importance of the features in the classification model is illustrated in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>. The top 3 most important features of the classification model were audSpec_Rfilt_sma [0]_stddevRisingSlope (SRRS), pcm_fftMag_spectralSkewness_sma_iqr1-3 (PMSS), and mfcc_sma [13]_centroid (MFSC). SRRS is the standard deviation of the rising slope of the first element in the simple moving average (SMA) of the audio spectrum after being filtered. PMSS is the spectral skewness, calculated using SMA of the magnitude of the Fast Fourier Transform of the Pulse Code Modulation signal, with IQR spanning from the first to the third quartiles. MFSC is the centroid of the 13th coefficient in the MFCC feature set, smoothed with SMA. The PDPs of the most important variables in the classification model illustrate the relationship between the probability of being classified as high-suicide risk (y-axis) and the acoustic features (x-axis). <xref rid="figure4" ref-type="fig">Figure 4</xref> illustrates the nonlinear relationship between the 3 most significant acoustic features in the random forest model and the probability of being classified as high risk, along with the corresponding feature. The PDPs for the 2 variables SRRS and MFSC exhibit a similar trend, whereby the probability of being categorized as high risk increases in tandem with the value of the variable. The highest probability of being categorized as high risk is observed when the value of the variable reaches a value between 1 and 1.5, followed by a slight decline. In contrast, the PMSS feature exhibits a divergent trend, with the probability of being classified as a high-risk caller demonstrating a slight increase and reaching a maximum as the variable value increases within the interval between –1 and –0.5. The probability of being categorized as a high-risk caller tends to decrease in intervals where the value of the variable is greater than –0.5 and less than 1.5.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>Partial dependence plots of the top 3 most important variables in the random forest model for suicide risk assessment using high-level statistical function features.</p>
            </caption>
            <graphic xlink:href="jmir_v27i1e67772_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Main Findings</title>
        <p>This study tests the feasibility of using acoustic features to identify the suicide risk of crisis hotline callers. In doing so, we collected suicide-related calls to a crisis hotline and analyzed the acoustic features of high-risk versus low-risk suicidal calls. We extracted different sets of acoustic features by using 2 methods. First, the Python-based <italic>librosa</italic> library was used as in existing studies [<xref ref-type="bibr" rid="ref24">24</xref>] and the basic acoustic features were extracted and averaged over the whole time interval. The second method that we used was OpenSMILE, an audio feature extraction tool, to extract 6373-dimensional HSFs for hotline speech segments and to perform dimensionality reduction by using the mutual information method. We used 4 machine learning algorithms to train models on each of the 2 feature subsets and to compare performance between algorithmic models. In the subset of basic acoustic features, the 4 machine learning models performed poorly, with the best performing support vector machine achieving only 49% recognition accuracy. In the HSF feature subset, all 4 machine learning algorithms had better accuracy. The classification performance of the random forest model was much better than all the other 3 algorithms, reaching 75% accuracy, that is, random forest model using a subset of HSF features is likely to be a feasible approach to identify the suicide risk of hotline callers. We found that voice characteristics, especially the HSF features, have the potential to serve as an objective indicator for identifying callers’ suicide risk in a crisis helpline. We also agree with Draper et al [<xref ref-type="bibr" rid="ref51">51</xref>] that constructing such a classification model for acoustic information is not designed to replace the counselor’s judgement, but it may assist the counselor in assessing short-term warning signs for suicide.</p>
      </sec>
      <sec>
        <title>Strengths</title>
        <p>We obtained and analyzed authentic caller audio clips from a crisis hotline, which offers a high degree of ecological validity. Given the dearth of research on speech material in the context of crisis hotlines [<xref ref-type="bibr" rid="ref33">33</xref>], this study makes a valuable contribution to the automated quantitative analysis of voices in this context. The majority of previous studies utilized automatic speech analysis of acoustic features for the detection of suicidal ideation [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref52">52</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. However, in the context of crisis hotline services, the mere identification of the presence or absence of suicidal ideation expressions in callers is often insufficient. This study identifies and classifies low-risk and high-risk callers to the crisis hotline, going beyond relying solely on language in recognizing suicide risk.</p>
        <p>Additionally, rigorous exclusion criteria were employed to exclude all callers with ambiguous suicide risk levels. Two trained raters independently reassessed each caller’s recordings to obtain more accurate clinical assessment labels. As the clinical assessment was based on suicide-related expressions, only speech segments of suicide-related expressions were included in this study. High quality data, with redundancy and irrelevant speech segments removed and accurately annotated, help improve the classifier’s recognition performance [<xref ref-type="bibr" rid="ref56">56</xref>]. This allowed for a more detailed examination of how well machine learning models constructed solely on acoustic features match with accurate clinical assessments.</p>
        <p>In this study, we employed an approach that can directly extract acoustic features from speech segments, differing from Iyer et al’s [<xref ref-type="bibr" rid="ref33">33</xref>] frame-based analysis of speech. The method we used is conducive to the prevention of the loss of feature information that might otherwise result from the exclusion of silent frames. Additionally, building on the foundational research by Iyer et al [<xref ref-type="bibr" rid="ref33">33</xref>], we conducted validation on a test set that is independent of the training set, corroborating that acoustic features can indeed serve as markers for identifying the risk of suicide in hotline callers.</p>
        <p>The machine learning model we trained using the basic set of acoustic features extracted from previous research in laboratory interview scenarios did not show good performance on the test set. This may be due to the quality of the recording material. The sampling rate for call recordings is 8 kHz, whereas the sampling rate of microphone equipment for interview recordings is usually several times higher [<xref ref-type="bibr" rid="ref24">24</xref>]. Therefore, we analyzed the advanced acoustic features of hotline callers. In alignment with the findings of previous studies and the hypotheses proposed, the more comprehensive advanced statistical function features demonstrated superior performance in the risk classification of crisis hotline callers. This may assist in circumventing the constraints imposed by the low sampling rate of hotline audio recordings [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Furthermore, the random forest model trained on the subset of HSF features demonstrated the highest recognition performance. This is also consistent with that reported in previous studies, where tree-based models have been found to perform better than other machine learning models in suicide voice-related databases [<xref ref-type="bibr" rid="ref15">15</xref>].</p>
        <p>We also conducted further model interpretation, highlighting the top 10 features that significantly influenced the model’s classification accuracy. PDPs were then used to present the relationship between the 3 most important dimensions and model categorization. The 3 most important variables in the random forest model trained with advanced acoustic features were SRRS, MFSC, and PMSS. Among them, SRRS and MFSC are 2 typical features of the RASTA (Reliable And Smooth Template Algorithm) style-filtered auditory spectra and MFCC, respectively, which are the most relevant acoustic features of the valence dimension [<xref ref-type="bibr" rid="ref57">57</xref>], evaluating the pleasure level of the emotion [<xref ref-type="bibr" rid="ref58">58</xref>]. In our study, this meant that although both low-risk and high-risk callers made suicide-related statements, there were some differences in their emotions. PMSS was associated with increased vocal effort, hyperfunction of the neck muscles, and potential laryngeal compression [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. Such an increase in vocal effort also means that low-risk and high-risk callers have different stress levels [<xref ref-type="bibr" rid="ref61">61</xref>].</p>
        <p>Given the aforementioned strengths, our work has implications for developing a theory or framework to identify the suicide risk of crisis line callers. On the one hand, the advanced features highly related with suicide risk shed light on the developing framework to identify suicidal callers in crisis hotlines. As predicted, callers with suicidal risk could be recognized quickly according to their voice features. On the other hand, we found that the approach of the random forest model based on HSF features is the optimal. Follow-up work can use such models to analyze the HSF features to replicate and extend our work. What’s important, approaches may also be developed to automatically identify suicidal callers according to their voice, which will be helpful and valuable for timely prevention and intervention through crisis hotlines. Such an automatic procedure can also help compensate for the manual limitations of crisis hotlines.</p>
      </sec>
      <sec>
        <title>Limitations and Future Directions</title>
        <p>Our work also has limitations. First, we did not control for characteristics that could potentially diminish the classification performance of the machine learning model [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref63">63</xref>]. Our study includes all low-risk and high-risk callers because the sample sizes for subgroups based on demographic features such as age and gender were insufficient for independent analysis. As the acoustic features vary across different age and gender groups, our findings may be limited by not controlling for such demographic variables, which is awaiting further explorations in future work. Second, we only utilized machine learning as the data analysis method, being constrained by the limited sample size of the study. Studies have applied deep learning methods to identify depressed patients, achieving high accuracy in model performance [<xref ref-type="bibr" rid="ref64">64</xref>]. Future research can utilize deep learning to explore more complex relationships between acoustic features and suicide risk within larger datasets. Third, the content of the recordings was not considered in this study. The narrative content of crisis hotline communications is critical, as it is the primary reference for assessing the caller’s risk assessment. It has been demonstrated that the fusion of acoustic and textual features through multimodal techniques enhances the accuracy of recognition [<xref ref-type="bibr" rid="ref15">15</xref>]. It is expected that, in the future, means of combining chat text with acoustic information will help to develop more refined models of risk assessment for hotline callers [<xref ref-type="bibr" rid="ref65">65</xref>]. Fourth, in light of the relatively low base rate of suicide, the overall positive predictive value for the identification of high-risk callers is low [<xref ref-type="bibr" rid="ref28">28</xref>]. This implies that crisis hotline counselors must remain vigilant to the potential for misclassification, that is, high-risk callers may be inaccurately assessed as low risk, while low-risk callers might be mistakenly evaluated as high risk. Consequently, the acoustic-based risk assessment should not be used in isolation but rather as a complementary tool to other risk assessment methods employed by counselors.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>This study suggests that voice characteristics are promising objective indicators for detecting suicide risk among crisis helpline callers. We demonstrated that HSF features can be employed to identify suicide risk in crisis helpline callers, especially based on the random forest model (a typical machine learning model). Although further external validation and methodological optimization are needed to validate and extend the findings of this study, our work holds promise for real-time assessment of high-risk callers by using acoustic features.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Fifty basic acoustic feature dimensions included in this study.</p>
        <media xlink:href="jmir_v27i1e67772_app1.docx" xlink:title="DOCX File , 18 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Ten-dimensional acoustic features with the highest variance inflation factor identified from the 50-dimensional basic acoustic feature set for suicide risk assessment. MFCC: Mel Frequency Cepstral Coefficient; VIF: variance inflation factor.</p>
        <media xlink:href="jmir_v27i1e67772_app2.png" xlink:title="PNG File , 42 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Fifty high-level statistical function features included in this study.</p>
        <media xlink:href="jmir_v27i1e67772_app3.docx" xlink:title="DOCX File , 22 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Top 10 most important acoustic feature dimensions in random forest models for suicide risk prediction by using high-level statistical function features.</p>
        <media xlink:href="jmir_v27i1e67772_app4.png" xlink:title="PNG File , 206 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">FN</term>
          <def>
            <p>false negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">FNR</term>
          <def>
            <p>false negative rate</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">FP</term>
          <def>
            <p>false positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">HSF</term>
          <def>
            <p>high-level statistical function</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">MFCC</term>
          <def>
            <p>Mel Frequency Cepstral Coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MFSC</term>
          <def>
            <p>mfcc_sma [13]_centroid</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">MP3</term>
          <def>
            <p>MPEG-1 audio layer 3</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">OpenSMILE</term>
          <def>
            <p>Open-Source Speech and Music Interpretation by Large-Space Extraction</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PDP</term>
          <def>
            <p>partial dependence plot</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">PMSS</term>
          <def>
            <p>pcm_fftMag_spectralSkewness_sma_iqr1-3</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">RASTA</term>
          <def>
            <p>Reliable And Smooth Template Algorithm</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">SMA</term>
          <def>
            <p>simple moving average</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">SRRS</term>
          <def>
            <p>audSpec_Rfilt_sma [0]_stddevRisingSlope</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">TN</term>
          <def>
            <p>true negative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">TP</term>
          <def>
            <p>true positive</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">WAV</term>
          <def>
            <p>waveform audio file</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors would like to extend their sincere gratitude to the counselor team at Xi’an Mental Health Center and the team at the Shaanxi Provincial Psychological Assistance Hotline for their support in this research. Additionally, the authors would like to thank Jihe Yang, Jiani Wu, Ruixue Nie, Mingchen Wan, Qing Wang, and Yan Dou for their hard work in data organization. Lastly, a heartfelt thanks goes out to all the participants who took part in this study.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>Data cannot be publicly provided because of privacy concerns. Oral informed consent obtained from the study participants does not permit public sharing of the data. The principal investigator of this project (ZS) and the corresponding author (LY) have full access to the data and are responsible for its integrity. Further inquiries, including the study protocol, should be directed to these authors.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Suicide in the world: global health estimates</article-title>
          <source>World Health Organization</source>
          <year>2019</year>
          <access-date>2023-12-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://apps.who.int/iris/handle/10665/326948">https://apps.who.int/iris/handle/10665/326948</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fernandes</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Velupillai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sanyal</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chandran</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Identifying suicide ideation and suicidal attempts in a psychiatric clinical research database using natural language processing</article-title>
          <source>Sci Rep</source>
          <year>2018</year>
          <month>05</month>
          <day>09</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>7426</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-018-25773-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-018-25773-2</pub-id>
          <pub-id pub-id-type="medline">29743531</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-018-25773-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC5943451</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Passos</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Mwangi</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hamilton</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zunta-Soares</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Quevedo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kauer-Sant'Anna</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kapczinski</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Soares</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Identifying a clinical signature of suicidality among patients with mood disorders: A pilot study using a machine learning approach</article-title>
          <source>J Affect Disord</source>
          <year>2016</year>
          <month>03</month>
          <day>15</day>
          <volume>193</volume>
          <fpage>109</fpage>
          <lpage>16</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/26773901"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jad.2015.12.066</pub-id>
          <pub-id pub-id-type="medline">26773901</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(15)31092-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC4744514</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Identifying Chinese adolescents with a high suicide attempt risk</article-title>
          <source>Psychiatry Res</source>
          <year>2018</year>
          <month>11</month>
          <volume>269</volume>
          <fpage>474</fpage>
          <lpage>480</lpage>
          <pub-id pub-id-type="doi">10.1016/j.psychres.2018.08.085</pub-id>
          <pub-id pub-id-type="medline">30195741</pub-id>
          <pub-id pub-id-type="pii">S0165-1781(17)31725-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Biddle</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Owen-Smith</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Klineberg</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Bennewith</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Hawton</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kapur</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Donovan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gunnell</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Qualitative interviewing with vulnerable populations: individuals' experiences of participating in suicide and self-harm based research</article-title>
          <source>J Affect Disord</source>
          <year>2013</year>
          <month>03</month>
          <day>05</day>
          <volume>145</volume>
          <issue>3</issue>
          <fpage>356</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2012.08.024</pub-id>
          <pub-id pub-id-type="medline">23021191</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(12)00593-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lv</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Creating a Chinese suicide dictionary for identifying suicide risk on social media</article-title>
          <source>PeerJ</source>
          <year>2015</year>
          <volume>3</volume>
          <fpage>e1455</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/26713232"/>
          </comment>
          <pub-id pub-id-type="doi">10.7717/peerj.1455</pub-id>
          <pub-id pub-id-type="medline">26713232</pub-id>
          <pub-id pub-id-type="pii">1455</pub-id>
          <pub-id pub-id-type="pmcid">PMC4690390</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Posner</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Stanley</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Brent</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yershova</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Oquendo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Currier</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Melvin</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Greenhill</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The Columbia-Suicide Severity Rating Scale: initial validity and internal consistency findings from three multisite studies with adolescents and adults</article-title>
          <source>Am J Psychiatry</source>
          <year>2011</year>
          <month>12</month>
          <volume>168</volume>
          <issue>12</issue>
          <fpage>1266</fpage>
          <lpage>77</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/22193671"/>
          </comment>
          <pub-id pub-id-type="doi">10.1176/appi.ajp.2011.10111704</pub-id>
          <pub-id pub-id-type="medline">22193671</pub-id>
          <pub-id pub-id-type="pmcid">PMC3893686</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Harrigan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenthal</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Nonverbal behavior in clinician—patient interaction</article-title>
          <source>Applied and Preventive Psychology</source>
          <year>1995</year>
          <month>12</month>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>21</fpage>
          <lpage>37</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/S0962-1849(05)80049-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/s0962-1849(05)80049-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>Stephen E</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>Marilyn K</given-names>
            </name>
          </person-group>
          <article-title>Methods and apparatus for evaluating near-term suicidal risk using vocal parameters</article-title>
          <source>US Patent US7062443</source>
          <year>2006</year>
          <access-date>2006-06-13</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://patents.google.com/patent/US7062443B2/en">https://patents.google.com/patent/US7062443B2/en</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Corcoran</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mittal</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bearden</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>E Gur</surname>
              <given-names>Raquel</given-names>
            </name>
            <name name-style="western">
              <surname>Hitczenko</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Bilgrami</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Savic</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cecchi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wolff</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Language as a biomarker for psychosis: A natural language processing approach</article-title>
          <source>Schizophr Res</source>
          <year>2020</year>
          <month>12</month>
          <volume>226</volume>
          <fpage>158</fpage>
          <lpage>166</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32499162"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.schres.2020.04.032</pub-id>
          <pub-id pub-id-type="medline">32499162</pub-id>
          <pub-id pub-id-type="pii">S0920-9964(20)30247-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7704556</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Homan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gabi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Klee</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bachmann</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moser</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Duri'</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bertram</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Maatz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Seiler</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Stark</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kleim</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Linguistic features of suicidal thoughts and behaviors: A systematic review</article-title>
          <source>Clinical Psychology Review</source>
          <year>2022</year>
          <month>07</month>
          <volume>95</volume>
          <fpage>102161</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.cpr.2022.102161"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cpr.2022.102161</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Linthicum</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Schafer</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ribeiro</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Machine learning in suicide science: Applications and ethics</article-title>
          <source>Behav Sci Law</source>
          <year>2019</year>
          <month>05</month>
          <volume>37</volume>
          <issue>3</issue>
          <fpage>214</fpage>
          <lpage>222</lpage>
          <pub-id pub-id-type="doi">10.1002/bsl.2392</pub-id>
          <pub-id pub-id-type="medline">30609102</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Insel</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Digital phenotyping: technology for a new science of behavior</article-title>
          <source>JAMA</source>
          <year>2017</year>
          <month>10</month>
          <day>03</day>
          <volume>318</volume>
          <issue>13</issue>
          <fpage>1215</fpage>
          <lpage>1216</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2017.11295</pub-id>
          <pub-id pub-id-type="medline">28973224</pub-id>
          <pub-id pub-id-type="pii">2654782</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akkaralaertsest</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yingthawornsuk</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Comparative analysis of vocal characteristics in speakers with depression and high-risk suicide</article-title>
          <source>IJCTE</source>
          <year>2015</year>
          <month>12</month>
          <volume>7</volume>
          <issue>6</issue>
          <fpage>448</fpage>
          <lpage>452</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.7763/ijcte.2015.v7.1001"/>
          </comment>
          <pub-id pub-id-type="doi">10.7763/ijcte.2015.v7.1001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Belouali</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sourirajan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Allen</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Alaoui</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutton</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Reinhard</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Acoustic and language analysis of speech for suicidal ideation among US veterans</article-title>
          <source>BioData Min</source>
          <year>2021</year>
          <month>02</month>
          <day>02</day>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>11</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://biodatamining.biomedcentral.com/articles/10.1186/s13040-021-00245-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13040-021-00245-y</pub-id>
          <pub-id pub-id-type="medline">33531048</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13040-021-00245-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC7856815</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bryan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Baucom</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Crenshaw</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Imel</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Atkins</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Clemans</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Leeson</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Burch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mintz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rudd</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Associations of patient-rated emotional bond and vocally encoded emotional arousal among clinicians and acutely suicidal military personnel</article-title>
          <source>J Consult Clin Psychol</source>
          <year>2018</year>
          <month>04</month>
          <volume>86</volume>
          <issue>4</issue>
          <fpage>372</fpage>
          <lpage>383</lpage>
          <pub-id pub-id-type="doi">10.1037/ccp0000295</pub-id>
          <pub-id pub-id-type="medline">29648857</pub-id>
          <pub-id pub-id-type="pii">2018-14401-005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gideon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schatten</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mcinnis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Provost</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Emotion recognition from natural phone conversations in individuals with and without recent suicidal ideation</article-title>
          <year>2019</year>
          <conf-name>Conf Int Speech Commun Assoc</conf-name>
          <conf-date>September 15</conf-date>
          <conf-loc>Graz, Austria</conf-loc>
          <pub-id pub-id-type="doi">10.21437/interspeech.2019-1830</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hashim</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Meggs</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>France</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of voice acoustics as predictors of clinical depression scores</article-title>
          <source>J Voice</source>
          <year>2017</year>
          <month>03</month>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>256.e1</fpage>
          <lpage>256.e6</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2016.06.006</pub-id>
          <pub-id pub-id-type="medline">27473933</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(16)30105-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keskinpala</surname>
              <given-names>Hande Kaymaz</given-names>
            </name>
            <name name-style="western">
              <surname>Yingthawornsuk</surname>
              <given-names>Thaweesak</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>Mitch</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>Richard G</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>Ronald M</given-names>
            </name>
          </person-group>
          <article-title>Screening for high risk suicidal states using mel-cepstral coefficients and energy in frequency bands</article-title>
          <year>2007</year>
          <conf-name>15th European Signal Processing Conference</conf-name>
          <conf-date>September 3-7</conf-date>
          <conf-loc>Poznan, Poland</conf-loc>
          <fpage>2229</fpage>
          <lpage>2233</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.scopus.com/inward/record.uri?eid=2-s2.0-84863746063&#38;partnerID=40&#38;md5=4a161da5de62e10bd9358cddc3e45bc6"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ozdas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Analysis of vocal tract characteristics for near-term suicidal risk assessment</article-title>
          <source>Methods Inf Med</source>
          <year>2004</year>
          <volume>43</volume>
          <issue>1</issue>
          <fpage>36</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="medline">15026833</pub-id>
          <pub-id pub-id-type="pii">04010036</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pestian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sorter</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Connolly</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bretonnel Cohen</surname>
              <given-names>Kevin</given-names>
            </name>
            <name name-style="western">
              <surname>McCullumsmith</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rohlfs</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Research Group</surname>
              <given-names>Stm</given-names>
            </name>
          </person-group>
          <article-title>A machine learning approach to identifying the thought markers of suicidal subjects: a prospective multicenter trial</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2017</year>
          <month>02</month>
          <volume>47</volume>
          <issue>1</issue>
          <fpage>112</fpage>
          <lpage>121</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.12312</pub-id>
          <pub-id pub-id-type="medline">27813129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Figueroa Saavedra</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Otzen Hernández</surname>
              <given-names>Tamara</given-names>
            </name>
            <name name-style="western">
              <surname>Alarcón Godoy</surname>
              <given-names>Camila</given-names>
            </name>
            <name name-style="western">
              <surname>Ríos Pérez</surname>
              <given-names>Arlette</given-names>
            </name>
            <name name-style="western">
              <surname>Frugone Salinas</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lagos Hernández</surname>
              <given-names>Roberto</given-names>
            </name>
          </person-group>
          <article-title>Association between suicidal ideation and acoustic parameters of university students' voice and speech: a pilot study</article-title>
          <source>Logoped Phoniatr Vocol</source>
          <year>2021</year>
          <month>07</month>
          <volume>46</volume>
          <issue>2</issue>
          <fpage>55</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1080/14015439.2020.1733075</pub-id>
          <pub-id pub-id-type="medline">32138570</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Duvvuri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chandra</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ghomi</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Automated voice biomarkers for depression symptoms using an online cross-sectional data collection initiative</article-title>
          <source>Depress Anxiety</source>
          <year>2020</year>
          <month>07</month>
          <volume>37</volume>
          <issue>7</issue>
          <fpage>657</fpage>
          <lpage>669</lpage>
          <pub-id pub-id-type="doi">10.1002/da.23020</pub-id>
          <pub-id pub-id-type="medline">32383335</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Min</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Rhee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kwon</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Ahn</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Acoustic analysis of speech for screening for suicide risk: Machine learning classifiers for between- and within-person evaluation of suicidality</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <month>03</month>
          <day>23</day>
          <volume>25</volume>
          <fpage>e45456</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e45456/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/45456</pub-id>
          <pub-id pub-id-type="medline">36951913</pub-id>
          <pub-id pub-id-type="pii">v25i1e45456</pub-id>
          <pub-id pub-id-type="pmcid">PMC10131783</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brülhart</surname>
              <given-names>Marius</given-names>
            </name>
            <name name-style="western">
              <surname>Klotzbücher</surname>
              <given-names>Valentin</given-names>
            </name>
            <name name-style="western">
              <surname>Lalive</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Reich</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Mental health concerns during the COVID-19 pandemic as revealed by helpline calls</article-title>
          <source>Nature</source>
          <year>2021</year>
          <month>12</month>
          <volume>600</volume>
          <issue>7887</issue>
          <fpage>121</fpage>
          <lpage>126</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34789873"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41586-021-04099-6</pub-id>
          <pub-id pub-id-type="medline">34789873</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41586-021-04099-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC9973557</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>World Health Organization</collab>
          </person-group>
          <source>Preventing suicide: a resource for establishing a crisis line</source>
          <access-date>2018-09-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/publications/i/item/WHO_MSD_MER_18.4">https://www.who.int/publications/i/item/WHO_MSD_MER_18.4</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gould</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lake</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Munfakh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Galfalvy</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kleinman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Glass</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McKeon</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Helping callers to the national suicide prevention lifeline who are at imminent risk of suicide: evaluation of caller risk profiles and interventions implemented</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2016</year>
          <month>04</month>
          <volume>46</volume>
          <issue>2</issue>
          <fpage>172</fpage>
          <lpage>90</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.12182</pub-id>
          <pub-id pub-id-type="medline">26242234</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Conner</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Conwell</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Predictive value of suicidal risk assessment using data from China's largest suicide prevention hotline</article-title>
          <source>J Affect Disord</source>
          <year>2023</year>
          <month>05</month>
          <day>15</day>
          <volume>329</volume>
          <fpage>141</fpage>
          <lpage>148</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2023.02.095</pub-id>
          <pub-id pub-id-type="medline">36842651</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(23)00257-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hines</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Using the telephone in family therapy</article-title>
          <source>J Marital Fam Ther</source>
          <year>1994</year>
          <volume>20</volume>
          <fpage>175</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/j.1752-0606.1994.tb01025.x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/j.1752-0606.1994.tb01025.x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coman</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Burrows</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Telephone counselling in Australia: Applications and considerations for use</article-title>
          <source>Br J Guid Couns?258</source>
          <year>2001</year>
          <volume>29</volume>
          <fpage>A</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1080/03069880020047166"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/03069880124904</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shillingford</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Assael</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Walters</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Speech bandwidth extension with Wavenet</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)</conf-name>
          <conf-date>October 20-23</conf-date>
          <conf-loc>New Paltz, NY, USA</conf-loc>
          <fpage>205</fpage>
          <lpage>208</lpage>
          <pub-id pub-id-type="doi">10.1109/waspaa.2019.8937169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ong</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sy</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>The effects of noise on speech intelligibility in telephone communication</article-title>
          <source>Canadian Acoustics</source>
          <year>1984</year>
          <access-date>1984-07-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://citeseerx.ist.psu.edu/document?repid=rep1&#38;type=pdf&#38;doi=8179e5b5e44359cb77edf046128b90e3b12ce323">https://citeseerx.ist.psu.edu/document?repid=rep1&#38;type=pdf&#38;doi=8179e5b5e44359cb77edf046128b90e3b12ce323</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iyer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nedeljkovic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Using voice biomarkers to classify suicide risk in adult telehealth callers: Retrospective observational study</article-title>
          <source>JMIR Ment Health</source>
          <year>2022</year>
          <month>08</month>
          <day>15</day>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>e39807</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mental.jmir.org/2022/8/e39807/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/39807</pub-id>
          <pub-id pub-id-type="medline">35969444</pub-id>
          <pub-id pub-id-type="pii">v9i8e39807</pub-id>
          <pub-id pub-id-type="pmcid">PMC9425169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Atmaja</surname>
              <given-names>BT</given-names>
            </name>
            <name name-style="western">
              <surname>Akagi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>On the differences between song and speech emotion recognition: Effect of feature sets, feature types, and classifiers</article-title>
          <year>2020</year>
          <conf-name>2020 IEEE Region 10 Conference (TENCON)</conf-name>
          <conf-date>November 16-19</conf-date>
          <conf-loc>Osaka, Japan</conf-loc>
          <fpage>968</fpage>
          <lpage>972</lpage>
          <pub-id pub-id-type="doi">10.1109/tencon50793.2020.9293852</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <collab>Chunjun Zheng</collab>
            <collab>Ning Jia</collab>
            <collab>Wei Sun</collab>
          </person-group>
          <article-title>The extraction method of emotional feature based on children's spoken speech</article-title>
          <year>2019</year>
          <conf-name>2019 11th International Conference on Intelligent Human-Machine Systems and Cybernetics (IHMSC)</conf-name>
          <conf-date>August 24-25</conf-date>
          <conf-loc>Hangzhou, China</conf-loc>
          <fpage>165</fpage>
          <lpage>168</lpage>
          <pub-id pub-id-type="doi">10.1109/ihmsc.2019.00046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pillai</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nepal</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Nemesure</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Heinz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Price</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lekkas</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Griffin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Buck</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Preum</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobson</surname>
              <given-names>NC</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-Zeev</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Investigating generalizability of speech-based suicidal ideation detection using mobile phones</article-title>
          <source>Proc. ACM Interact. Mob. Wearable Ubiquitous Technol</source>
          <year>2024</year>
          <month>01</month>
          <day>12</day>
          <volume>7</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>38</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3631452"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3631452</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fleiss</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Measuring nominal scale agreement among many raters</article-title>
          <source>Psychological Bulletin</source>
          <year>1971</year>
          <volume>76</volume>
          <issue>5</issue>
          <fpage>378</fpage>
          <lpage>382</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1037/h0031619"/>
          </comment>
          <pub-id pub-id-type="doi">10.1037/h0031619</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Atal</surname>
              <given-names>BS</given-names>
            </name>
          </person-group>
          <article-title>Effectiveness of linear prediction characteristics of the speech wave for automatic speaker identification and verification</article-title>
          <source>J Acoust Soc Am</source>
          <year>1974</year>
          <month>06</month>
          <volume>55</volume>
          <issue>6</issue>
          <fpage>1304</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.1121/1.1914702</pub-id>
          <pub-id pub-id-type="medline">4846727</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Di</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Predictive modeling of neuroticism in depressed and non-depressed cohorts using voice features</article-title>
          <source>J Affect Disord</source>
          <year>2024</year>
          <month>05</month>
          <day>01</day>
          <volume>352</volume>
          <fpage>395</fpage>
          <lpage>402</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2024.02.021</pub-id>
          <pub-id pub-id-type="medline">38342318</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(24)00328-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC11625964</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rhee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ahn</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Detection of of minor and major depression through voice as a biomarker using machine learning</article-title>
          <source>J Clin Med</source>
          <year>2021</year>
          <month>07</month>
          <day>08</day>
          <volume>10</volume>
          <issue>14</issue>
          <fpage>3046</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm10143046"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm10143046</pub-id>
          <pub-id pub-id-type="medline">34300212</pub-id>
          <pub-id pub-id-type="pii">jcm10143046</pub-id>
          <pub-id pub-id-type="pmcid">PMC8303477</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krajewski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schnieder</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Quatieri</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A review of depression and suicide risk assessment using speech analysis</article-title>
          <source>Speech Communication</source>
          <year>2015</year>
          <month>07</month>
          <volume>71</volume>
          <fpage>10</fpage>
          <lpage>49</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.specom.2015.03.004"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.specom.2015.03.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yingthawornsuk</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Keskinpala</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Direct acoustic feature using iterative EM algorithm and spectral energy for classifying suicidal speech</article-title>
          <year>2007</year>
          <conf-name>Interspeech 2007, 8th Annual Conference of the International Speech Communication Association</conf-name>
          <conf-date>August 27-31</conf-date>
          <conf-loc>Antwerp, Belgium</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2007-144</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>France</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Acoustical properties of speech as indicators of depression and suicidal risk</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2000</year>
          <month>07</month>
          <volume>47</volume>
          <issue>7</issue>
          <fpage>829</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1109/10.846676</pub-id>
          <pub-id pub-id-type="medline">10916253</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wöllmer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Opensmile: the munich versatilefast open-source audio feature extractor</article-title>
          <year>2010</year>
          <conf-name>Proceedings of the 9th ACM International Conference on Multimedia, MM 2010</conf-name>
          <conf-date>October 25-29</conf-date>
          <conf-loc>Firenze, Italy</conf-loc>
          <fpage>1459</fpage>
          <lpage>1462</lpage>
          <pub-id pub-id-type="doi">10.1145/1873951.1874246</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Steidl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Batliner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hirschberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Burgoon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Baird</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Elkins</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Coutinho</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Evanini</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The INTERSPEECH 2016 Computational Paralinguistics Challenge: Deception, Sincerity &#38; Native Language</article-title>
          <year>2016</year>
          <conf-name>Interspeech 2016</conf-name>
          <conf-date>September 8-12</conf-date>
          <conf-loc>San Francisco, USA</conf-loc>
          <pub-id pub-id-type="doi">10.21437/interspeech.2016-129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deb</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dandapat</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krajewski</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Analysis and classification of cold speech using variational mode decomposition</article-title>
          <source>IEEE Trans. Affective Comput</source>
          <year>2020</year>
          <month>4</month>
          <day>1</day>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>296</fpage>
          <lpage>307</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/TAFFC.2017.2761750"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/taffc.2017.2761750</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kraskov</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stögbauer</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Grassberger</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Erratum: Estimating mutual information [Phys. Rev. E 69, 066138 (2004)]</article-title>
          <source>Phys Rev E</source>
          <year>2011</year>
          <month>1</month>
          <day>20</day>
          <volume>83</volume>
          <issue>1</issue>
          <fpage>E</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1103/PhysRevE.83.019903"/>
          </comment>
          <pub-id pub-id-type="doi">10.1103/physreve.83.019903</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dwyer</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Falkai</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Koutsouleris</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Machine learning approaches for clinical psychology and psychiatry</article-title>
          <source>Annu Rev Clin Psychol</source>
          <year>2018</year>
          <month>05</month>
          <day>07</day>
          <volume>14</volume>
          <fpage>91</fpage>
          <lpage>118</lpage>
          <pub-id pub-id-type="doi">10.1146/annurev-clinpsy-032816-045037</pub-id>
          <pub-id pub-id-type="medline">29401044</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saeb</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lonini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Jayaraman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mohr</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Kording</surname>
              <given-names>KP</given-names>
            </name>
          </person-group>
          <article-title>The need to approximate the use-case in clinical machine learning</article-title>
          <source>Gigascience</source>
          <year>2017</year>
          <month>05</month>
          <day>01</day>
          <volume>6</volume>
          <issue>5</issue>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28327985"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/gigascience/gix019</pub-id>
          <pub-id pub-id-type="medline">28327985</pub-id>
          <pub-id pub-id-type="pii">3071704</pub-id>
          <pub-id pub-id-type="pmcid">PMC5441397</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sokolova</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Japkowicz</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Szpakowicz</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Beyond accuracy, F-score and ROC: a family of discriminant measures for performance evaluation</article-title>
          <year>2006</year>
          <conf-name>Australasian Joint Conference on Artificial Intelligence</conf-name>
          <conf-date>December 4-8</conf-date>
          <conf-loc>Hobart, Australia</conf-loc>
          <pub-id pub-id-type="doi">10.1007/11941439_114</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Draper</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vega</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Covington</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>McKeon</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Helping callers to the National Suicide Prevention Lifeline who are at imminent risk of suicide: the importance of active engagement, active rescue, and collaboration between crisis and emergency services</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2015</year>
          <month>06</month>
          <volume>45</volume>
          <issue>3</issue>
          <fpage>261</fpage>
          <lpage>70</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/25270689"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/sltb.12128</pub-id>
          <pub-id pub-id-type="medline">25270689</pub-id>
          <pub-id pub-id-type="pmcid">PMC4491352</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasir</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Baucom</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bryan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Narayanan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Georgiou</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Complexity in speech and its relation to emotional bond in therapist-patient interactions during suicide risk assessment interviews</article-title>
          <year>2017</year>
          <conf-name>Interspeech 2017</conf-name>
          <conf-date>August 20-24</conf-date>
          <conf-loc>Stockholm, Sweeden</conf-loc>
          <pub-id pub-id-type="doi">10.21437/interspeech.2017-1641</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pestian</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Investigating the speech characteristics of suicidal adolescents</article-title>
          <year>2013</year>
          <conf-name>2013 IEEE International Conference on Acoustics, Speech and Signal Processing</conf-name>
          <conf-date>May 26-31</conf-date>
          <conf-loc>Vancouver, BC, Canada</conf-loc>
          <fpage>709</fpage>
          <lpage>713</lpage>
          <pub-id pub-id-type="doi">10.1109/icassp.2013.6637740</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wahidah</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Investigating the course of recovery in high risk suicide using power spectral density</article-title>
          <source>Asian Journal of Applied Sciences</source>
          <year>2015</year>
          <access-date>2025-03-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/283325745_Investigating_the_Course_of_Recovery_in_High_Risk_Suicide_using_Power_Spectral_Density">https://www.researchgate.net/publication/283325745_Investigating_the_Course_of_Recovery_in_High_Risk_Suicide_using_Power_Spectral_Density</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wahidah</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Timing patterns of speech as potential indicators of near-term suicidal risk</article-title>
          <source>International Journal of Multidisciplinary and Current Research</source>
          <year>2015</year>
          <access-date>2025-03-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://ijmcr.com/timing-patterns-of-speech-as-potential-indicators-of-near-term-suicidal-risk/">http://ijmcr.com/timing-patterns-of-speech-as-potential-indicators-of-near-term-suicidal-risk/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Figueroa</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng-Treitler</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Kandula</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ngo</surname>
              <given-names>LH</given-names>
            </name>
          </person-group>
          <article-title>Predicting sample size required for classification performance</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2012</year>
          <month>02</month>
          <day>15</day>
          <volume>12</volume>
          <fpage>8</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/1472-6947-12-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/1472-6947-12-8</pub-id>
          <pub-id pub-id-type="medline">22336388</pub-id>
          <pub-id pub-id-type="pii">1472-6947-12-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC3307431</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wöllmer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Reiter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cox</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Douglas-Cowie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cowie</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Abandoning emotion classes - towards continuous emotion recognition with modelling of long-range dependencies</article-title>
          <year>2008</year>
          <conf-name>Interspeech 2008</conf-name>
          <conf-date>September 22-26</conf-date>
          <conf-loc>Brisbane, Australia</conf-loc>
          <fpage>597</fpage>
          <lpage>600</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2008-192</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mehrabian</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Framework for a comprehensive description and measurement of emotional states</article-title>
          <source>Genet Soc Gen Psychol Monogr</source>
          <year>1995</year>
          <month>08</month>
          <volume>121</volume>
          <issue>3</issue>
          <fpage>339</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="medline">7557355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lowell</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Colton</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Kelley</surname>
              <given-names>RT</given-names>
            </name>
            <name name-style="western">
              <surname>Hahn</surname>
              <given-names>YC</given-names>
            </name>
          </person-group>
          <article-title>Spectral- and cepstral-based measures during continuous speech: capacity to distinguish dysphonia and consistency within a speaker</article-title>
          <source>J Voice</source>
          <year>2011</year>
          <month>09</month>
          <volume>25</volume>
          <issue>5</issue>
          <fpage>e223</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2010.06.007</pub-id>
          <pub-id pub-id-type="medline">20971612</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(10)00120-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Memon</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Acoustic correlates of the voice qualifiers: a survey</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on October 29, 2020</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2010.15869</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dietrich</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Verdolini Abbott</surname>
              <given-names>Katherine</given-names>
            </name>
          </person-group>
          <article-title>Vocal function in introverts and extraverts during a psychological stress reactivity protocol</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2012</year>
          <month>06</month>
          <volume>55</volume>
          <issue>3</issue>
          <fpage>973</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.1044/1092-4388(2011/10-0344)</pub-id>
          <pub-id pub-id-type="medline">22232397</pub-id>
          <pub-id pub-id-type="pii">1092-4388_2011_10-0344</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Suh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Screening major depressive disorder using vocal acoustic features in the elderly by sex</article-title>
          <source>J Affect Disord</source>
          <year>2021</year>
          <month>08</month>
          <day>01</day>
          <volume>291</volume>
          <fpage>15</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jad.2021.04.098</pub-id>
          <pub-id pub-id-type="medline">34022551</pub-id>
          <pub-id pub-id-type="pii">S0165-0327(21)00430-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tasnim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Novikova</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Cost-effective models for detecting depression from speech</article-title>
          <year>2022</year>
          <conf-name>21st IEEE International Conference on Machine Learning and Applications (ICMLA)</conf-name>
          <conf-date>December 12-14</conf-date>
          <conf-loc>Nassau, Bahamas</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icmla55696.2022.00259</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>Hyun-Chool</given-names>
            </name>
          </person-group>
          <article-title>Automatic depression detection using smartphone-based text-dependent speech signals: deep convolutional neural network approach</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <month>01</month>
          <day>25</day>
          <volume>25</volume>
          <fpage>e34474</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e34474/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/34474</pub-id>
          <pub-id pub-id-type="medline">36696160</pub-id>
          <pub-id pub-id-type="pii">v25i1e34474</pub-id>
          <pub-id pub-id-type="pmcid">PMC9909514</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grimland</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Benatov</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yeshayahu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Izmaylov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Segal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Levi-Belz</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Predicting suicide risk in real-time crisis hotline chats integrating machine learning with psychological factors: Exploring the black box</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2024</year>
          <month>06</month>
          <volume>54</volume>
          <issue>3</issue>
          <fpage>416</fpage>
          <lpage>424</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.13056</pub-id>
          <pub-id pub-id-type="medline">38345174</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
