<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v24i10e40567</article-id>
      <article-id pub-id-type="pmid">36264608</article-id>
      <article-id pub-id-type="doi">10.2196/40567</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Automatic Assessment of Intelligibility in Noise in Parkinson Disease: Validation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kukafka</surname>
            <given-names>Rita</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Klein</surname>
            <given-names>Gunnar</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Delgado Hernández</surname>
            <given-names>Jonathan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Balaguer</surname>
            <given-names>Mathieu</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Moya-Galé</surname>
            <given-names>Gemma</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Communication Sciences &#38; Disorders</institution>
            <institution>Long Island University</institution>
            <addr-line>1 University Plaza</addr-line>
            <addr-line>Brooklyn, NY, 11201</addr-line>
            <country>United States</country>
            <phone>1 718 780 4125</phone>
            <email>gemma.moya-gale@liu.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4933-1110</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Walsh</surname>
            <given-names>Stephen J</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0505-648X</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Goudarzi</surname>
            <given-names>Alireza</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5002-3815</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Communication Sciences &#38; Disorders</institution>
        <institution>Long Island University</institution>
        <addr-line>Brooklyn, NY</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Mathematics and Statistics</institution>
        <institution>Utah State University</institution>
        <addr-line>Logan, UT</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Factorize</institution>
        <addr-line>Tokyo</addr-line>
        <country>Japan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Gemma Moya-Galé <email>gemma.moya-gale@liu.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>10</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>20</day>
        <month>10</month>
        <year>2022</year>
      </pub-date>
      <volume>24</volume>
      <issue>10</issue>
      <elocation-id>e40567</elocation-id>
      <history>
        <date date-type="received">
          <day>27</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>23</day>
          <month>8</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>5</day>
          <month>9</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>16</day>
          <month>9</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Gemma Moya-Galé, Stephen J Walsh, Alireza Goudarzi. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 20.10.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2022/10/e40567" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Most individuals with Parkinson disease (PD) experience a degradation in their speech intelligibility. Research on the use of automatic speech recognition (ASR) to assess intelligibility is still sparse, especially when trying to replicate communication challenges in real-life conditions (ie, noisy backgrounds). Developing technologies to automatically measure intelligibility in noise can ultimately assist patients in self-managing their voice changes due to the disease.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The goal of this study was to pilot-test and validate the use of a customized web-based app to assess speech intelligibility in noise in individuals with dysarthria associated with PD.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>In total, 20 individuals with dysarthria associated with PD and 20 healthy controls (HCs) recorded a set of sentences using their phones. The Google Cloud ASR API was used to automatically transcribe the speakers’ sentences. An algorithm was created to embed speakers’ sentences in +6-dB signal-to-noise multitalker babble. Results from ASR performance were compared to those from 30 listeners who orthographically transcribed the same set of sentences. Data were reduced into a single event, defined as a success if the artificial intelligence (AI) system transcribed a random speaker or sentence as well or better than the average of 3 randomly chosen human listeners. These data were further analyzed by logistic regression to assess whether AI success differed by speaker group (HCs or speakers with dysarthria) or was affected by sentence length. A discriminant analysis was conducted on the human listener data and AI transcriber data independently to compare the ability of each data set to discriminate between HCs and speakers with dysarthria.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The data analysis indicated a 0.8 probability (95% CI 0.65-0.91) that AI performance would be as good or better than the average human listener. AI transcriber success probability was not found to be dependent on speaker group. AI transcriber success was found to decrease with sentence length, losing an estimated 0.03 probability of transcribing as well as the average human listener for each word increase in sentence length. The AI transcriber data were found to offer the same discrimination of speakers into categories (HCs and speakers with dysarthria) as the human listener data.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>ASR has the potential to assess intelligibility in noise in speakers with dysarthria associated with PD. Our results hold promise for the use of AI with this clinical population, although a full range of speech severity needs to be evaluated in future work, as well as the effect of different speaking tasks on ASR.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>automatic speech recognition</kwd>
        <kwd>Parkinson disease</kwd>
        <kwd>intelligibility</kwd>
        <kwd>dysarthria</kwd>
        <kwd>digital health</kwd>
        <kwd>artificial intelligence</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Parkinson disease (PD) is the second most common neurodegenerative disease, following Alzheimer disease [<xref ref-type="bibr" rid="ref1">1</xref>]. Approximately 1 million individuals are estimated to be affected by the disease in the United States [<xref ref-type="bibr" rid="ref2">2</xref>], and its prevalence surpasses 6 million people worldwide [<xref ref-type="bibr" rid="ref3">3</xref>], with numbers projected to increase in the future [<xref ref-type="bibr" rid="ref2">2</xref>]. Close to 90% of individuals with PD evidence problems with voice or speech, an impairment known as hypokinetic dysarthria, which has a latency that averages 7 years post–disease onset [<xref ref-type="bibr" rid="ref4">4</xref>]. This motor speech disorder is characterized by hypophonia (ie, reduced loudness), monopitch, monoloudness, articulatory imprecision, reduced stress, short rushes of speech, and variable rate [<xref ref-type="bibr" rid="ref5">5</xref>]. As a result, many individuals affected by the disease complain of intelligibility problems (ie, their ability to be understood by others) [<xref ref-type="bibr" rid="ref6">6</xref>], especially in noisy environments (eg, when dining out at a restaurant). Additionally, the presence of background noise has been shown to negatively affect even speakers with mildly dysarthric speech [<xref ref-type="bibr" rid="ref7">7</xref>]. Overall, these speech deficits substantially reduce speakers’ social participation and overall quality of life [<xref ref-type="bibr" rid="ref8">8</xref>], as their inability to effectively communicate with others increases their frustration and social isolation.</p>
      <p>The application of artificial intelligence (AI) in the medical field has brought promising results to enhance communication and, ultimately, quality of life [<xref ref-type="bibr" rid="ref9">9</xref>] in a wide range of individuals. For example, voice-assisted technology, which is used in devices such as Siri or Alexa, has become increasingly more present among individuals with a neurodegenerative disease, such as those with PD [<xref ref-type="bibr" rid="ref10">10</xref>], and has gradually been incorporated as a potential available tool for health professionals, such as speech and language pathologists [<xref ref-type="bibr" rid="ref11">11</xref>]. The development of automatic speech recognition (ASR) technologies has substantially advanced in the past 40 years, especially given the onset of deep learning mechanisms [<xref ref-type="bibr" rid="ref12">12</xref>]. Most crucially, the use of ASR has been shown to be effective in estimating speakers’ intelligibility deficits for different clinical populations who may present with speech impairments [<xref ref-type="bibr" rid="ref13">13</xref>], such as those resulting from a laryngectomy [<xref ref-type="bibr" rid="ref14">14</xref>], a cleft palate [<xref ref-type="bibr" rid="ref15">15</xref>], or head and neck cancer [<xref ref-type="bibr" rid="ref16">16</xref>]. Additionally, the clinical validity of ASR has also been explored in individuals with apraxia of speech and aphasia with promising results [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Project Euphonia has achieved a large-scale data set with over 1 million recordings of disordered speech, with the ultimate goal to personalize ASR models to enhance communication in individuals who experience speech and language difficulties [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Despite the great advancements that these findings represent, however, research on the application of ASR for individuals with the motor speech disorder of dysarthria has been more limited [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref23">23</xref>], and it has underscored the high degree of variability that characterizes dysarthric speech [<xref ref-type="bibr" rid="ref13">13</xref>], especially with increased speech severity levels [<xref ref-type="bibr" rid="ref24">24</xref>]. Dimauro et al [<xref ref-type="bibr" rid="ref25">25</xref>] explored the use of ASR with 28 individuals with dysarthria associated with PD, 22 healthy older adults, and 15 healthy young controls. In their study, the speech-to-text system focused on the recognition error rates of words from different speech tasks. Although their results upheld the use of AI as a promising resource for clinical populations, it is important to note, however, that their experiment was conducted in quiet conditions, which may not reflect the real-life challenges speakers with PD face in everyday communication. More recently, Gutz et al [<xref ref-type="bibr" rid="ref26">26</xref>] used the Google Cloud ASR API for intelligibility measurement with 52 speakers with dysarthria associated with amyotrophic lateral sclerosis and 20 healthy controls. Additionally, the authors used noise-augmented ASR to assist the AI system in discriminating between healthy speech and mildly dysarthric speech. Results from their study showed high variability and poor internal validity of machine word recognition rate, suggesting that this technology may have limited clinical applicability for this population at this time.</p>
      <p>Our previous pilot work examined ASR performance in multitalker babble noise to measure speech intelligibility from a reading task in 5 speakers with PD and 5 healthy adults [<xref ref-type="bibr" rid="ref27">27</xref>]. Preliminary results supported the feasibility of AI technologies to simulate real-life challenges posed by ambient noise. Our current study was aimed at expanding our previous work with speakers with dysarthria associated with PD to preliminarily validate the use of ASR in noise with this clinical population. To that end, this study reports on the development, pilot-testing, and validation of a web-based app, <italic>Understand Me for Life</italic> [<xref ref-type="bibr" rid="ref27">27</xref>], to assess speech intelligibility in noise using the Google Cloud ASR API in speakers with dysarthria associated with PD. Specifically, our aims were to (1) examine how ASR compared to human transcription, the current gold standard, when determining intelligibility accuracy scores for speakers with hypokinetic dysarthria associated with PD; and (2) determine the extent to which ASR could accurately discriminate between speakers with dysarthria and healthy controls.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Ethics Approval</title>
        <p>This study was approved by the Institutional Review Board at Long Island University, Brooklyn (21/01-002-Bkln).</p>
      </sec>
      <sec>
        <title>Speakers</title>
        <p>In total, 20 individuals with PD (12 women and 8 men; mean age 73.3 years; age range 62-81 years) and 20 age- and sex-matched neurologically healthy adults participated in the speech recordings for this study. Individuals with PD had to meet the following inclusion criteria: (1) having a medical diagnosis of PD, (2) having experienced changes in their voice that represented a current concern, (3) having a stable anti-Parkinsonian medication, (4) passing the Montreal Cognitive Assessment [<xref ref-type="bibr" rid="ref28">28</xref>], and (5) being a native speaker of English. Exclusion criteria included having received intensive voice-focused treatment in the past 2 years prior to the study and having received deep brain stimulation. Neurologically healthy speakers (12 women and 8 men; mean age 70.5 years; age range 59-84 years) with no history of motor speech impairments served as controls. <xref ref-type="table" rid="table1">Table 1</xref> presents the speakers’ biographical details and clinical characteristics.</p>
        <p>Dysarthria severity ranged from mild to moderate in these speakers and was assessed from a conversation sample by an experienced speech and language pathologist. Consensus with a second speech and language pathologist was obtained for the final dysarthria severity estimates [<xref ref-type="bibr" rid="ref29">29</xref>].</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Speakers’ biographical details and clinical characteristics.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="80"/>
            <col width="100"/>
            <col width="100"/>
            <col width="100"/>
            <col width="150"/>
            <col width="470"/>
            <thead>
              <tr valign="top">
                <td>Speaker</td>
                <td>Age (years)</td>
                <td>Sex</td>
                <td>YPD<sup>a</sup></td>
                <td>Dysarthria severity</td>
                <td>Patient’s voice complaint</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>P1<sup>b</sup></td>
                <td>77</td>
                <td>Female</td>
                <td>9</td>
                <td>Mild</td>
                <td>Voice is softer and sounds are not as well-articulated</td>
              </tr>
              <tr valign="top">
                <td>P2</td>
                <td>77</td>
                <td>Male</td>
                <td>1</td>
                <td>Mild-moderate</td>
                <td>Voice is softer</td>
              </tr>
              <tr valign="top">
                <td>P3</td>
                <td>70</td>
                <td>Female</td>
                <td>6</td>
                <td>Mild</td>
                <td>Hoarseness</td>
              </tr>
              <tr valign="top">
                <td>P4</td>
                <td>72</td>
                <td>Female</td>
                <td>4</td>
                <td>Mild</td>
                <td>Less control over shaping words, changes in loudness, and occasional rapid breathing</td>
              </tr>
              <tr valign="top">
                <td>P5</td>
                <td>72</td>
                <td>Female</td>
                <td>7</td>
                <td>Mild-moderate</td>
                <td>Voice is much lower and softer and reduced intelligibility</td>
              </tr>
              <tr valign="top">
                <td>P6</td>
                <td>80</td>
                <td>Female</td>
                <td>8</td>
                <td>Mild-moderate</td>
                <td>Increased fatigue, hoarseness, and lack of clarity</td>
              </tr>
              <tr valign="top">
                <td>P7</td>
                <td>80</td>
                <td>Female</td>
                <td>8</td>
                <td>Mild</td>
                <td>Reduced fundamental frequency range for singing and “scratchy feeling” in throat</td>
              </tr>
              <tr valign="top">
                <td>P8</td>
                <td>67</td>
                <td>Female</td>
                <td>9</td>
                <td>Mild-moderate</td>
                <td>Lower pitch, hoarseness, voice is much softer, and reduced intelligibility</td>
              </tr>
              <tr valign="top">
                <td>P9</td>
                <td>65</td>
                <td>Female</td>
                <td>5</td>
                <td>Mild</td>
                <td>Recent coughing, softness of voice, and voice sounds rougher and softer than usual.</td>
              </tr>
              <tr valign="top">
                <td>P10</td>
                <td>78</td>
                <td>Female</td>
                <td>7</td>
                <td>Mild</td>
                <td>Slurring, voice is softer, and intelligibility has been affected.</td>
              </tr>
              <tr valign="top">
                <td>P11</td>
                <td>60</td>
                <td>Female</td>
                <td>8</td>
                <td>Mild</td>
                <td>Occasional reduction in loudness</td>
              </tr>
              <tr valign="top">
                <td>P12</td>
                <td>66</td>
                <td>Male</td>
                <td>7</td>
                <td>Mild</td>
                <td>Fluctuations in voice and voice is much softer</td>
              </tr>
              <tr valign="top">
                <td>P14</td>
                <td>73</td>
                <td>Male</td>
                <td>8</td>
                <td>Mild</td>
                <td>Occasional reduction in loudness and stuttering</td>
              </tr>
              <tr valign="top">
                <td>P14</td>
                <td>80</td>
                <td>Female</td>
                <td>7</td>
                <td>Mild-moderate</td>
                <td>Voice is softer</td>
              </tr>
              <tr valign="top">
                <td>P15</td>
                <td>73</td>
                <td>Male</td>
                <td>13</td>
                <td>Mild-moderate</td>
                <td>Voice is softer and more strained</td>
              </tr>
              <tr valign="top">
                <td>P16</td>
                <td>78</td>
                <td>Male</td>
                <td>4</td>
                <td>Mild</td>
                <td>Voice is softer, trouble finding words, and sometimes intelligibility is affected</td>
              </tr>
              <tr valign="top">
                <td>P17</td>
                <td>62</td>
                <td>Male</td>
                <td>13</td>
                <td>Moderate</td>
                <td>Voice is very soft, problems with intelligibility, and fast speaking rate</td>
              </tr>
              <tr valign="top">
                <td>P18</td>
                <td>81</td>
                <td>Male</td>
                <td>8</td>
                <td>Mild-moderate</td>
                <td>Voice is softer, breathiness, and have to clear throat more often</td>
              </tr>
              <tr valign="top">
                <td>P19</td>
                <td>80</td>
                <td>Female</td>
                <td>8</td>
                <td>Mild</td>
                <td>Voice is softer</td>
              </tr>
              <tr valign="top">
                <td>P20</td>
                <td>76</td>
                <td>Male</td>
                <td>7</td>
                <td>Moderate</td>
                <td>Soft voice and hoarseness</td>
              </tr>
              <tr valign="top">
                <td>HC1<sup>c</sup></td>
                <td>68</td>
                <td>Female</td>
                <td>N/A<sup>d</sup></td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC2</td>
                <td>71</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC3</td>
                <td>64</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC4</td>
                <td>67</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC5</td>
                <td>72</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC6</td>
                <td>77</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC7</td>
                <td>72</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC8</td>
                <td>71</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC9</td>
                <td>67</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC10</td>
                <td>78</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC11</td>
                <td>59</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC12</td>
                <td>61</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC13</td>
                <td>75</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC14</td>
                <td>66</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC15</td>
                <td>63</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC16</td>
                <td>63</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC17</td>
                <td>84</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC18</td>
                <td>84</td>
                <td>Male</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC19</td>
                <td>65</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
              <tr valign="top">
                <td>HC20</td>
                <td>83</td>
                <td>Female</td>
                <td>N/A</td>
                <td>N/A</td>
                <td>N/A</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>YPD: years postdiagnosis.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>P: patient (speaker with dysarthria associated with Parkinson disease).</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>HC: healthy control.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>N/A: not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Speech Stimuli and Recording Procedures</title>
        <p>A set of 100 grammatically and semantically correct sentences was created for this study. Sentences differed in length, from 5 to 9 words (eg, “Take care of my house while I am away”), and contained high frequency words in the English language (The English Lexicon Project) [<xref ref-type="bibr" rid="ref30">30</xref>]. The data set was then divided into 4 different blocks of 25 randomized sentences each, with blocks having an equal number of sentences from each sentence length. Each speaker was randomized to 1 block of stimuli for speech recordings, so that each block was read by 10 different speakers. Recordings were self-paced and conducted in a quiet room in the speakers’ homes using a customized web-based app, <italic>Understand Me for Life</italic> [<xref ref-type="bibr" rid="ref27">27</xref>], that the speakers could access from their mobile phones. The first author met with speakers over the Zoom videoconferencing platform (Zoom Video Communications) to explain the recording procedure and address any potential questions. Careful directions were provided to ensure a constant 8-cm (3.15 inches) mouth-to-microphone distance [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Given the possibility of PD-related motor impairments hindering adequate recordings (eg, tremors), care partners were recruited to assist speakers when necessary. Speakers were allowed to rerecord a sentence in cases of extraneous noise in the background. A brief familiarization phase was provided at the beginning of the recording session so that speakers could practice using the interface. Feedback from speakers was obtained for later app optimization.</p>
        <p>For each recorded sentence, the app automatically embedded the speakers’ voice signal into +6-dB signal-to-noise multitalker babble noise [<xref ref-type="bibr" rid="ref33">33</xref>] to provide an intelligibility score, defined as the percentage of words accurately understood by the ASR system. Automatic feedback on performance was provided at the end of the recording session and not after each sentence to avoid any potential priming effects that could influence sentence production on subsequent items [<xref ref-type="bibr" rid="ref34">34</xref>].</p>
      </sec>
      <sec>
        <title>Multitalker Babble Noise</title>
        <p>Multitalker babble is thought to be the most common type of environmental noise experienced by listeners [<xref ref-type="bibr" rid="ref35">35</xref>], which, therefore, makes it more ecologically valid in speech perception experiments. For this study, 10-second sample recordings from National Public Radio were used. Audio files were manually checked to control for sudden changes in the speech signal (eg, increase in vocal intensity). Prolonged silences (ie, over 500 ms) were trimmed, followed by the equalization of the audio spectrum in a moving window. An equal number of male and female speakers was implemented in the creation of background noise [<xref ref-type="bibr" rid="ref36">36</xref>]. The equalized audios were finally combined to render 10-talker babble [<xref ref-type="bibr" rid="ref33">33</xref>].</p>
      </sec>
      <sec>
        <title>Listeners</title>
        <p>In total, 30 neurologically healthy adults (25 women and 5 men; mean age 23.1 years; age range 18-31 years) participated as listeners in the study. Listeners were recruited via flyers and word of mouth across the New York City area. Inclusion criteria for participation required listeners to be native speakers of English; have no history of speech, language, or communication impairment; have no prior experience with motor speech disorders; and pass a bilateral pure-tone hearing screening at 25-dB hearing level at 500, 1000, 2000, and 4000 Hz [<xref ref-type="bibr" rid="ref37">37</xref>]. Listeners were paid US $20 for their participation in the study.</p>
      </sec>
      <sec>
        <title>Human Transcription</title>
        <p>Listeners completed the intelligibility assessment task free field (ie, without headphones) in a quiet space at the Long Island University campus, in Brooklyn, New York. The task was accessible through the <italic>Understand Me for Life</italic> portal on a MacBook Pro laptop (Apple Inc). Listeners maintained a distance of 85 cm from the loudspeakers (Logitech Z150), and the loudspeakers were placed 31 cm from each other. Listener-to-loudspeaker distance represented the typical distance between conversational partners [<xref ref-type="bibr" rid="ref38">38</xref>]. The task took approximately 30-40 minutes to complete.</p>
        <p>A brief familiarization phase was presented before the start of the experiment and contained 3 sentences produced by a neurologically healthy adult male speaker. Listeners were instructed to write down word by word what they heard and not worry about punctuation marks. Each listener was randomly assigned to 1 speaker per block, with block presentation being random across listeners. Therefore, each listener heard a total of 4 speakers and 100 sentences. Sentences were presented in multitalker babble, hence replicating the AI condition. To avoid abrupt onsets and offsets of stimuli, 400 ms of noise were inserted at the beginning of each sentence, and each sentence was followed by 50 ms of babble noise [<xref ref-type="bibr" rid="ref39">39</xref>]. To obtain an average score for subsequent transcription accuracy calculations, each speaker was assigned to 3 listeners. None of the listeners required a break during the completion of this task.</p>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <sec>
          <title>Automatic Intelligibility Assessment</title>
          <p>Automatic intelligibility assessment (AIA) was conducted using the Google Cloud ASR API, a speech-to-text AI system with documented low word error rate for individuals with healthy speech that is thought to be the best platform to handle dysarthric speech, although software performance is still dependent on speech severity, with high word error rates in cases of more severely affected speech [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
          <p>For a given produced utterance (<italic>S</italic>) and the corresponding target sentence (<italic>T</italic>), stimuli were suitably padded with whitespace to ensure that both <italic>S</italic> and <italic>T</italic> were of equal length (<italic>L</italic>). Each word in <italic>S</italic> was codified with <italic>w<sub>s</sub></italic> and each word in <italic>T</italic> with <italic>w<sub>t</sub></italic>, where <italic>s</italic> and <italic>t</italic> were numbers from 0 to <italic>L</italic> – 1. Accuracy was calculated by the formula as follows:</p>
          <disp-formula>
            <graphic xlink:href="jmir_v24i10e40567_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </disp-formula>
          <p>where <italic>σ(w<sub>s</sub>,w<sub>t</sub>)</italic> = 1 if <italic>w<sub>s</sub></italic> = <italic>w<sub>t</sub></italic>, and 0 otherwise. This step was implemented to avoid providing a score to words that appeared in both <italic>S</italic> and <italic>T</italic> but were out of order [<xref ref-type="bibr" rid="ref27">27</xref>].</p>
        </sec>
        <sec>
          <title>Manual Intelligibility Assessment</title>
          <p>Transcription accuracy scores were calculated as the percentage of words correctly transcribed. Orthographic transcriptions are considered the most objective measure to assess intelligibility in dysarthria [<xref ref-type="bibr" rid="ref33">33</xref>]. Listeners’ orthographic transcripts had to match the target to be accepted as correct [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Obvious spelling errors or errors involving homonyms did not impact calculation scores and were assessed as correct responses. Omissions or additions of morphemes (eg, flower for flowers) were coded as an error.</p>
        </sec>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>The goal of the first phase of statistical analysis was to assess the degree to which the AIA could score as well or better than the average human transcriber (ie, listener). As described above, 3 listeners orthographically transcribed sentences from the same speakers, and their data were condensed into a <italic>percentage accuracy</italic> measure for each sentence, which summarized the percentage of words the human listener correctly transcribed. For each question, the average percentage accuracy, denoted as <italic>â<sub>ij, human avg</sub></italic>, was computed for each sentence <italic>j</italic> within each speaker <italic>i</italic> to reduce intralistener variability. The AIA system also received a percentage accuracy measure for each sentence or speaker, which we denoted as <italic>â<sub>ij, AIA</sub></italic>. The success of the AIA system was defined as follows:</p>
        <disp-formula>
          <graphic xlink:href="jmir_v24i10e40567_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>The AIA system was considered to give a successful transcription if its percentage accuracy score was at least as good as the average of the human listeners’ accuracies for sentence <italic>j</italic> within each speaker <italic>i</italic>. The data were then condensed up to the speaker level by computing the proportion of successes of the AIA system over the <italic>j</italic> = 1 , ... , 25 sentences read by speaker <italic>i</italic> as follows:</p>
        <disp-formula>
          <graphic xlink:href="jmir_v24i10e40567_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>This procedure provided an estimate of the probability of success of the AIA system transcription for randomly selected speakers. Standard binomial statistics were used to quantify uncertainty in this analysis and present the results with appropriate statistical summaries and CIs. We investigated whether data provided evidence that the AIA transcriber success differed whether the system was transcribing a healthy control (HC) or a speaker with dysarthria associated with PD and whether sentence length had an effect on AIA success, via a logistic regression analysis.</p>
        <p>The goal of the second phase of statistical analysis was to compare the ability of the resulting AIA transcription data summaries to discriminate between healthy controls and speakers with dysarthria. To investigate this goal, we applied linear discriminant analysis to identify optimal discrimination thresholds for both the listener transcriptions and the AIA transcriptions and summarized the discrimination ability of each via typical confusion matrices and correct percentage classification summaries. All statistical analyses were conducted in R statistical software (version 4.1.1; R Foundation for Statistical Computing) [<xref ref-type="bibr" rid="ref42">42</xref>] and a discriminant and classification analysis was conducted via the <italic>lda</italic> function in the <italic>MASS</italic> package [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
        <p>Intralistener reliability was assessed via percentage agreement on several (approximately 10) duplicate speaker sentences. Interlistener reliability was controlled for in this assessment by condensing each of the 3 listeners’ percentage accuracy measures for each speaker or sentence into the average.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>A summary of intrarater reliability is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>. The average percentage agreement of repeated responses of this study’s listeners was 80%.</p>
      <p>The success summaries of the AIA transcriber at the speaker level are presented in <xref rid="figure2" ref-type="fig">Figure 2</xref>. The figure shows estimates of the probability of success for each speaker (ordered by score) with a 95% CI. The mean probability of success is indicated by the red horizontal line. The figure illustrates that the expected success probability of the AIA transcriber for a randomly selected speaker was approximately 0.8 (95% CI 0.65-0.91), with the AIA system scoring 80% of target sentences as well or better than the human transcribers for half (22/40, 55%) of the study’s speakers. The success probability estimates stratified by speaker group (HC or speaker with dysarthria) are shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>. The figure suggests that the AIA transcriber had a slightly more difficult time accurately transcribing the sentences read by speakers with dysarthria, with a slight decline in the estimate of probability of success for speakers #14, #18, and #19.</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Distribution of intrarater percentage agreement across the 30 listeners.</p>
        </caption>
        <graphic xlink:href="jmir_v24i10e40567_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Estimates of the probability that the automatic intelligibility assessment transcriber will be as accurate as human transcribers for each speaker. The vertical bands are 95% CIs on the estimate of probability of success. Black dotted line=0.5 and red dotted line=median AI probability of success. AI: artificial intelligence; C: control; P: patient with dysarthria.</p>
        </caption>
        <graphic xlink:href="jmir_v24i10e40567_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure3" position="float">
        <label>Figure 3</label>
        <caption>
          <p>Estimates of the probability that the automatic intelligibility assessment transcriber will be as accurate as human transcribers for each speaker: (A) healthy controls and (B) speakers with dysarthria. AI: artificial intelligence; C: control; P: patient with dysarthria.</p>
        </caption>
        <graphic xlink:href="jmir_v24i10e40567_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>We further analyzed these data via a logistic regression model. The response was the (logit) probability of AI success and the predictors were speaker group (HC or speakers with dysarthria) and sentence type. Speaker-to-speaker variance was controlled for by including speaker as a random effect. The fitted model estimates are presented in <xref ref-type="table" rid="table2">Table 2</xref>. The advantage of this approach is that each row provides a significance test for each term <italic>provided we have controlled for the effects of the other terms</italic>. In this regard, after controlling for speaker and sentence length, we see that these data provide weak evidence that AI success differs significantly by speaker group (ie, between HC and speakers with dysarthria; <italic>P</italic>=.23). Further, sentence length was found to have a significant negative impact on AI success (<italic>P&#60;</italic>.001)<italic>.</italic> The results are represented in an effects plot in <xref rid="figure4" ref-type="fig">Figure 4</xref>. The left panel illustrates that an estimate of the probability of AI success for speakers with dysarthria is 0.78, but this value is not significantly different from the estimate of the probability of AI success for HCs (0.82; <italic>P</italic>=.23). The right panel illustrates an estimated dependence of the probability of AI success on sentence length, with each increase in sentence length decreasing AI success probability by an estimated 0.03.</p>
      <p>Percentage accuracy distributions by transcriber (human or AIA system) and speaker group are presented in <xref rid="figure5" ref-type="fig">Figure 5</xref>. The box plots in <xref rid="figure5" ref-type="fig">Figure 5</xref> indicate that the median accuracy score for speakers with dysarthria was farther from the median accuracy score for healthy controls as compared to the distance between the 2 medians for the human transcriber data. This finding suggests that the AIA system data may offer better discrimination and classification ability for speaker group.</p>
      <p>Confusion matrices recording the classification rates of discriminants based on human transcription data and AIA system data are presented in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Fitted logistic regression model coefficients.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <thead>
            <tr valign="top">
              <td>Effect</td>
              <td>Estimate</td>
              <td>SE</td>
              <td><italic>z</italic> value</td>
              <td><italic>P</italic> value</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Intercept</td>
              <td>3.14414</td>
              <td>0.44774</td>
              <td>7.022</td>
              <td>&#60;.001</td>
            </tr>
            <tr valign="top">
              <td>Speaker group</td>
              <td>–0.25525</td>
              <td>0.21156</td>
              <td>–1.207</td>
              <td>.23</td>
            </tr>
            <tr valign="top">
              <td>Sentence length</td>
              <td>–0.23658</td>
              <td>0.05763</td>
              <td>–4.105</td>
              <td>.001</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <fig id="figure4" position="float">
        <label>Figure 4</label>
        <caption>
          <p>Estimated effects and CIs from the logistic regression of probability of AI success as a function of (A) speaker group, (B) sentence length, and speaker random effect. AI: artificial intelligence; HC: healthy controls.</p>
        </caption>
        <graphic xlink:href="jmir_v24i10e40567_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure5" position="float">
        <label>Figure 5</label>
        <caption>
          <p>Box plots of the estimates of AIA system success by speaker category and transcriber: (A) human listener and (B) AIA system. AI: artificial intelligence; AIA: automatic intelligibility assessment; HC: healthy controls.</p>
        </caption>
        <graphic xlink:href="jmir_v24i10e40567_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Classification summary of the speakers based on linear discriminants fit to the human transcription data and automatic intelligibility assessment system data.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="120"/>
          <col width="220"/>
          <col width="220"/>
          <col width="220"/>
          <col width="220"/>
          <thead>
            <tr valign="top">
              <td>True group</td>
              <td colspan="4">Classified group via discriminant</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td colspan="2">Discriminant from human listener average data (overall predictive accuracy: 0.6)</td>
              <td colspan="2">Discriminant from artificial intelligence data (overall predictive accuracy: 0.675)</td>
            </tr>
            <tr valign="top">
              <td> </td>
              <td>HC<sup>a</sup></td>
              <td>PD<sup>b</sup></td>
              <td>HC</td>
              <td>PD</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>HC</td>
              <td>15</td>
              <td>5</td>
              <td>15</td>
              <td>5</td>
            </tr>
            <tr valign="top">
              <td>PD</td>
              <td>11</td>
              <td>9</td>
              <td>8</td>
              <td>12</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table3fn1">
            <p><sup>a</sup>HC: healthy control.</p>
          </fn>
          <fn id="table3fn2">
            <p><sup>b</sup>PD: Parkinson disease.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This study aimed to develop, pilot-test, and validate the use of a web-based app, <italic>Understand Me for Life</italic>, to automatically measure speech intelligibility in noise in speakers with hypokinetic dysarthria associated with PD. Additionally, a secondary objective of the study was to determine whether ASR could discriminate between the speech of healthy controls and that of speakers with dysarthria.</p>
        <p>Literature on ASR performance on clinical populations, especially those with motor speech disorders, is still sparse. To validate the use of speech-to-text technology to determine intelligibility accuracy scores for speakers with dysarthria, ASR performance was benchmarked relative to that of human transcribers [<xref ref-type="bibr" rid="ref19">19</xref>]. Results showed that the ASR system had an 80% chance of performing as well as or better than a human transcriber on any random speaker. The potential capacity of ASR to outperform human listeners has been shown in recent studies [<xref ref-type="bibr" rid="ref19">19</xref>], although further work is required with longer utterances and different speech tasks, as summarized in the limitations section below. Our findings also echo those reported with other clinical populations, such as those with a diagnosis of apraxia of speech and aphasia [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Additionally, our data provided no evidence that the mean probability of ASR success differed between the 2 groups of speakers, either a speaker with dysarthria or a healthy control. Thus, the success of the speech-to-text system did not depend on whether the speaker was neurologically healthy or presented with hypokinetic dysarthria associated with PD. It is important to acknowledge, however, that our speakers did not evidence dysarthria across all severity ranges; this limitation will be addressed in future work. Sentence length did influence ASR, with a decrease in accuracy observed for longer sentences, which was an expected result and is in agreement with prior literature [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        <p>The second aim of the study was to determine whether ASR could accurately discriminate between speakers with dysarthria and healthy controls. Results showed that both the human and the AIA system data provided the same classification rates for healthy controls (15/20, 75% correctly classified and 5/20, 25% incorrectly classified as speakers with dysarthria), hence evidencing equal specificity (ie, 75%). The AIA system data, however, yielded a slightly better classification success for speakers with dysarthria (12/20, 60% correct PD classifications compared to the human transcription data that only yielded 9/20, 45% correct PD classifications), which suggests stronger sensitivity than the one obtained for human transcribers (ie, 60% vs 45%). In traditional studies using human listeners, performance on intelligibility assessments has not shown significant differences between speakers with mild dysarthria secondary to PD and healthy controls [<xref ref-type="bibr" rid="ref33">33</xref>], hence suggesting that group classification based on intelligibility scores may depend on speech severity. In our study, AI correctly classified 12 speakers with dysarthria (out of 20), a result that could be explained by the severity levels of our sample ranging from mild to mild-to-moderate only.</p>
      </sec>
      <sec>
        <title>Limitations and Future Work</title>
        <p>The study’s limitations warrant future work in this research area. It should be noted that our sample of speakers with dysarthria did not include those with more severe speech deficits. Therefore, these results offer a preliminarily promising, albeit not conclusive, clinical tool for measuring intelligibility in individuals with dysarthria associated with PD. Nevertheless, ASR performance with a more diverse speech severity range in speakers with dysarthria associated with PD should be explored. It is likely that increased speech severity in individuals with PD would impact ASR, as this increase was also found in speakers with dysarthria associated with amyotrophic lateral sclerosis [<xref ref-type="bibr" rid="ref26">26</xref>]. An additional limitation from this study is that the speech stimuli were derived from read sentences rather than from conversational speech. Although sentences rendered a higher level of predictability and, thus, control, conversational speech would have greater ecological validity. Finally, we should also acknowledge that previously reported studies used different ASR methodology compared to this study and that, as discussed in Jacks et al [<xref ref-type="bibr" rid="ref18">18</xref>], ASR technology is in constant and rapid evolution, rendering any results on ASR in need of systematic reevaluation for the proper and valid use of ASR-assisted clinical tools.</p>
        <p>Our ongoing work is motivated by the concept of self-management, which, in the context of a chronic illness such as PD, has become increasingly relevant. Self-management relates to the patient’s ability to identify a given behavior (eg, voice changes) and react or problem-solve in accordance with such observation [<xref ref-type="bibr" rid="ref44">44</xref>]. Having the knowledge on how to respond to the worsening of disease symptoms and when to seek medical advice has been shown to be crucial contributors to patients’ well-being [<xref ref-type="bibr" rid="ref45">45</xref>]. The implementation of ASR in speech intelligibility assessment, therefore, can potentially serve to establish preventative measures before the onset of speech and intelligibility degradation and control measures (eg, referral to a speech therapist) if speech deficits already exist.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This study validated the use of ASR to measure intelligibility in real-life settings (ie, using background noise) in speakers with mild-to-moderate dysarthria associated with PD. Therefore, our preliminary data show that ASR has the potential to assess intelligibility in noise in this clinical population. Results hold promise for the use of AI as a future clinical tool to assist patients and speech and language therapists alike, although the full range of speech severity needs to be evaluated in future work, as well as the effect of different speaking tasks on ASR.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AIA</term>
          <def>
            <p>automatic intelligibility assessment</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ASR</term>
          <def>
            <p>automatic speech recognition</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">HC</term>
          <def>
            <p>healthy control</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">PD</term>
          <def>
            <p>Parkinson disease</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We wholeheartedly thank the participants in this study, their care partners, as well as our research assistant, Robert Seefeldt, for his priceless help across the different stages of the project. This project was funded by the Michael J. Fox Foundation for Parkinson’s Research (grant 001236; awarded to GM-G, the principal investigator).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dorsey</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Constantinescu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Biglan</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Holloway</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Kieburtz</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Marshall</surname>
              <given-names>FJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ravina</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Schifitto</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Siderowf</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tanner</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Projected number of people with Parkinson disease in the most populous nations, 2005 through 2030</article-title>
          <source>Neurology</source>
          <year>2007</year>
          <month>01</month>
          <day>30</day>
          <volume>68</volume>
          <issue>5</issue>
          <fpage>384</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1212/01.wnl.0000247740.47667.03</pub-id>
          <pub-id pub-id-type="medline">17082464</pub-id>
          <pub-id pub-id-type="pii">01.wnl.0000247740.47667.03</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marras</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Bower</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Roberts</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ritz</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>GW</given-names>
            </name>
            <name name-style="western">
              <surname>Abbott</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Savica</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Van Den Eeden</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Willis</surname>
              <given-names>AW</given-names>
            </name>
            <name name-style="western">
              <surname>Tanner</surname>
              <given-names>C</given-names>
            </name>
            <collab>Parkinson’s Foundation P4 Group</collab>
          </person-group>
          <article-title>Prevalence of Parkinson's disease across North America</article-title>
          <source>NPJ Parkinsons Dis</source>
          <year>2018</year>
          <month>07</month>
          <day>10</day>
          <volume>4</volume>
          <fpage>21</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41531-018-0058-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41531-018-0058-0</pub-id>
          <pub-id pub-id-type="medline">30003140</pub-id>
          <pub-id pub-id-type="pii">58</pub-id>
          <pub-id pub-id-type="pmcid">PMC6039505</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dorsey</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Sherer</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Okun</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Bloem</surname>
              <given-names>BR</given-names>
            </name>
          </person-group>
          <article-title>The emerging evidence of the Parkinson pandemic</article-title>
          <source>J Parkinsons Dis</source>
          <year>2018</year>
          <month>12</month>
          <day>18</day>
          <volume>8</volume>
          <issue>s1</issue>
          <fpage>S3</fpage>
          <lpage>S8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30584159"/>
          </comment>
          <pub-id pub-id-type="doi">10.3233/JPD-181474</pub-id>
          <pub-id pub-id-type="medline">30584159</pub-id>
          <pub-id pub-id-type="pii">JPD181474</pub-id>
          <pub-id pub-id-type="pmcid">PMC6311367</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wenning</surname>
              <given-names>GK</given-names>
            </name>
            <name name-style="western">
              <surname>Verny</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McKee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chaudhuri</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Jellinger</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Poewe</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Litvan</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Progression of dysarthria and dysphagia in postmortem-confirmed Parkinsonian disorders</article-title>
          <source>Arch Neurol</source>
          <year>2001</year>
          <month>02</month>
          <day>01</day>
          <volume>58</volume>
          <issue>2</issue>
          <fpage>259</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1001/archneur.58.2.259</pub-id>
          <pub-id pub-id-type="medline">11176964</pub-id>
          <pub-id pub-id-type="pii">noc00008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <source>Motor Speech Disorders: Substrates, Differential Diagnosis, and Management. 4th ed</source>
          <year>2020</year>
          <publisher-loc>Amsterdam, the Netherlands</publisher-loc>
          <publisher-name>Elsevier Mosby</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Rossi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>States</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>A community-based program for exercise and social participation for individuals with Parkinson's disease: a multidisciplinary model</article-title>
          <source>Perspect ASHA SIGs</source>
          <year>2020</year>
          <month>10</month>
          <day>23</day>
          <volume>5</volume>
          <issue>5</issue>
          <fpage>1290</fpage>
          <lpage>1296</lpage>
          <pub-id pub-id-type="doi">10.1044/2020_persp-20-00031</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chiu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Forrest</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The impact of lexical characteristics and noise on intelligibility of Parkinsonian speech</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2018</year>
          <month>04</month>
          <day>17</day>
          <volume>61</volume>
          <issue>4</issue>
          <fpage>837</fpage>
          <lpage>846</lpage>
          <pub-id pub-id-type="doi">10.1044/2017_JSLHR-S-17-0205</pub-id>
          <pub-id pub-id-type="medline">29587306</pub-id>
          <pub-id pub-id-type="pii">2677472</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McAuliffe</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Baylor</surname>
              <given-names>CR</given-names>
            </name>
            <name name-style="western">
              <surname>Yorkston</surname>
              <given-names>KM</given-names>
            </name>
          </person-group>
          <article-title>Variables associated with communicative participation in Parkinson's disease and its relationship to measures of health-related quality-of-life</article-title>
          <source>Int J Speech Lang Pathol</source>
          <year>2017</year>
          <month>08</month>
          <day>27</day>
          <volume>19</volume>
          <issue>4</issue>
          <fpage>407</fpage>
          <lpage>417</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/27347713"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/17549507.2016.1193900</pub-id>
          <pub-id pub-id-type="medline">27347713</pub-id>
          <pub-id pub-id-type="pmcid">PMC6190828</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Derosier</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Farber</surname>
              <given-names>RS</given-names>
            </name>
          </person-group>
          <article-title>Speech recognition software as an assistive device: a pilot study of user satisfaction and psychosocial impact</article-title>
          <source>Work</source>
          <year>2005</year>
          <volume>25</volume>
          <issue>2</issue>
          <fpage>125</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="medline">16131742</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Synnott</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McNaney</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Brito Zambrano</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kernohan</surname>
              <given-names>WG</given-names>
            </name>
          </person-group>
          <article-title>Attitudes toward the use of voice-assisted technologies among people with Parkinson disease: findings from a web-based survey</article-title>
          <source>JMIR Rehabil Assist Technol</source>
          <year>2021</year>
          <month>03</month>
          <day>11</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>e23006</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://rehab.jmir.org/2021/1/e23006/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/23006</pub-id>
          <pub-id pub-id-type="medline">33704072</pub-id>
          <pub-id pub-id-type="pii">v8i1e23006</pub-id>
          <pub-id pub-id-type="pmcid">PMC8082949</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kulkarni</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Synnott</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kernohan</surname>
              <given-names>WG</given-names>
            </name>
            <name name-style="western">
              <surname>McNaney</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Speech and language practitioners' experiences of commercially available voice-assisted technology: web-based survey study</article-title>
          <source>JMIR Rehabil Assist Technol</source>
          <year>2022</year>
          <month>01</month>
          <day>05</day>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>e29249</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://rehab.jmir.org/2022/1/e29249/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29249</pub-id>
          <pub-id pub-id-type="medline">34989694</pub-id>
          <pub-id pub-id-type="pii">v9i1e29249</pub-id>
          <pub-id pub-id-type="pmcid">PMC8771342</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kodish-Wachs</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Agassi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kenny</surname>
              <given-names>P III</given-names>
            </name>
            <name name-style="western">
              <surname>Overhage</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>A systematic comparison of contemporary automatic speech recognition engines for conversational clinical speech</article-title>
          <source>AMIA Annu Symp Proc</source>
          <year>2018</year>
          <month>12</month>
          <day>05</day>
          <volume>2018</volume>
          <fpage>683</fpage>
          <lpage>689</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30815110"/>
          </comment>
          <pub-id pub-id-type="medline">30815110</pub-id>
          <pub-id pub-id-type="pmcid">PMC6371385</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wisler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Berisha</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Liss</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>The relationship between perceptual disturbances in dysarthric speech and automatic speech recognition performance</article-title>
          <source>J Acoust Soc Am</source>
          <year>2016</year>
          <month>11</month>
          <volume>140</volume>
          <issue>5</issue>
          <fpage>EL416</fpage>
          <lpage>EL422</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/27908075"/>
          </comment>
          <pub-id pub-id-type="doi">10.1121/1.4967208</pub-id>
          <pub-id pub-id-type="medline">27908075</pub-id>
          <pub-id pub-id-type="pmcid">PMC6909999</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schuster</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Haderlein</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nöth</surname>
              <given-names>Elmar</given-names>
            </name>
            <name name-style="western">
              <surname>Lohscheller</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Eysholdt</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Rosanowski</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Intelligibility of laryngectomees' substitute speech: automatic speech recognition and subjective rating</article-title>
          <source>Eur Arch Otorhinolaryngol</source>
          <year>2006</year>
          <month>02</month>
          <day>7</day>
          <volume>263</volume>
          <issue>2</issue>
          <fpage>188</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1007/s00405-005-0974-6</pub-id>
          <pub-id pub-id-type="medline">16001246</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maier</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haderlein</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Stelzle</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Nöth</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Nkenke</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Rosanowski</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Schützenberger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schuster</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic speech recognition systems for the evaluation of voice and speech disorders in head and neck cancer</article-title>
          <source>EURASIP J Audio Speech Music Process</source>
          <year>2010</year>
          <month>12</month>
          <day>01</day>
          <volume>2010</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>7</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.5555/2907324.2907381"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maier</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nöth</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Nkenke</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Schuster</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automatic assessment of children's speech with cleft lip and palate</article-title>
          <year>2006</year>
          <month>10</month>
          <conf-name>IS-LTC 2006: 5th Slovenian and 1st International Conference on Language Technologies</conf-name>
          <conf-date>October 9-10, 2006</conf-date>
          <conf-loc>Ljubljana, Slovenia</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://nl.ijs.si/is-ltc06/proc/06_Maier.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ballard</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Etter</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Monroe</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tien Tan</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Feasibility of automatic speech recognition for providing feedback during tablet-based treatment for apraxia of speech plus aphasia</article-title>
          <source>Am J Speech Lang Pathol</source>
          <year>2019</year>
          <month>07</month>
          <day>15</day>
          <volume>28</volume>
          <issue>2S</issue>
          <fpage>818</fpage>
          <lpage>834</lpage>
          <pub-id pub-id-type="doi">10.1044/2018_AJSLP-MSC18-18-0109</pub-id>
          <pub-id pub-id-type="medline">31306595</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jacks</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haley</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Bishop</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Harmon</surname>
              <given-names>TG</given-names>
            </name>
          </person-group>
          <article-title>Automated speech recognition in adult stroke survivors: comparing human and computer transcriptions</article-title>
          <source>Folia Phoniatr Logop</source>
          <year>2019</year>
          <month>5</month>
          <day>22</day>
          <volume>71</volume>
          <issue>5-6</issue>
          <fpage>286</fpage>
          <lpage>296</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.karger.com?DOI=10.1159/000499156"/>
          </comment>
          <pub-id pub-id-type="doi">10.1159/000499156</pub-id>
          <pub-id pub-id-type="medline">31117105</pub-id>
          <pub-id pub-id-type="pii">000499156</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Green</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>MacDonald</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Cattiau</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Heywood</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cave</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Seaver</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ladewig</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Tobin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Brenner</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>PC</given-names>
            </name>
            <name name-style="western">
              <surname>Tomanek</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Automatic speech recognition of disordered speech: personalized models now outperforming human listeners on short phrases</article-title>
          <year>2021</year>
          <conf-name>Interspeech 2021</conf-name>
          <conf-date>August 30 to September 3, 2021</conf-date>
          <conf-loc>Brno, Czechia</conf-loc>
          <fpage>4778</fpage>
          <lpage>4782</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2021-1384</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>MacDonald</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Cattiau</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Heywood</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cave</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Seaver</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ladewig</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Tobin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Brenner</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>PC</given-names>
            </name>
            <name name-style="western">
              <surname>Green</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Tomanek</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Disordered speech data collection: lessons learned at 1 million utterances from Project Euphonia</article-title>
          <year>2021</year>
          <conf-name>Interspeech 2021</conf-name>
          <conf-date>August 30 to September 3, 2021</conf-date>
          <conf-loc>Brno, Czechia</conf-loc>
          <fpage>4833</fpage>
          <lpage>4837</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2021-697</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fox</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Green</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hain</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A comparative study of adaptive, automatic recognition of disordered speech</article-title>
          <year>2012</year>
          <conf-name>Interspeech 2012: 13th Annual Conference of the International Speech Communication Association (ISCA)</conf-name>
          <conf-date>September 9-13, 2012</conf-date>
          <conf-loc>Portland, OR</conf-loc>
          <fpage>1776</fpage>
          <lpage>1779</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2012-484</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>HV</given-names>
            </name>
            <name name-style="western">
              <surname>Hasegawa-Johnson</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Acoustic model adaptation using in-domain background models for dysarthric speech recognition</article-title>
          <source>Comput Speech Lang</source>
          <year>2013</year>
          <month>9</month>
          <volume>27</volume>
          <issue>6</issue>
          <fpage>1147</fpage>
          <lpage>1162</lpage>
          <pub-id pub-id-type="doi">10.1016/j.csl.2012.10.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vásquez-Correa</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Orozco-Arroyave</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Bocklet</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nöth</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Towards an automatic evaluation of the dysarthria level of patients with Parkinson's disease</article-title>
          <source>J Commun Disord</source>
          <year>2018</year>
          <month>11</month>
          <volume>76</volume>
          <fpage>21</fpage>
          <lpage>36</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jcomdis.2018.08.002</pub-id>
          <pub-id pub-id-type="medline">30149241</pub-id>
          <pub-id pub-id-type="pii">S0021-9924(17)30076-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Le</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Licata</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mower Provost</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Automatic quantitative analysis of spontaneous aphasic speech</article-title>
          <source>Speech Commun</source>
          <year>2018</year>
          <month>06</month>
          <volume>100</volume>
          <fpage>1</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.1016/j.specom.2018.04.001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dimauro</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Di Nicola</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bevilacqua</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Caivano</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Girardi</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Assessment of speech intelligibility in Parkinson's disease using a speech-to-text system</article-title>
          <source>IEEE Access</source>
          <year>2017</year>
          <month>10</month>
          <day>17</day>
          <volume>5</volume>
          <fpage>22199</fpage>
          <lpage>22208</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2017.2762475</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gutz</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Stipancic</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Yunusova</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Berry</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Green</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Validity of off-the-shelf automatic speech recognition for assessing speech intelligibility and speech severity in speakers with amyotrophic lateral sclerosis</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2022</year>
          <month>06</month>
          <day>08</day>
          <volume>65</volume>
          <issue>6</issue>
          <fpage>2128</fpage>
          <lpage>2143</lpage>
          <pub-id pub-id-type="doi">10.1044/2022_JSLHR-21-00589</pub-id>
          <pub-id pub-id-type="medline">35623334</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goudarzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Automatic speech recognition in noise for Parkinson's disease: a pilot study</article-title>
          <source>Front Artif Intell</source>
          <year>2021</year>
          <month>12</month>
          <day>22</day>
          <volume>4</volume>
          <fpage>809321</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/frai.2021.809321"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frai.2021.809321</pub-id>
          <pub-id pub-id-type="medline">35005616</pub-id>
          <pub-id pub-id-type="pmcid">PMC8727902</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasreddine</surname>
              <given-names>ZS</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Bédirian</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Charbonneau</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Whitehead</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Collin</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Cummings</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Chertkow</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>The Montreal Cognitive Assessment, MoCA: a brief screening tool for mild cognitive impairment</article-title>
          <source>J Am Geriatr Soc</source>
          <year>2005</year>
          <month>04</month>
          <volume>53</volume>
          <issue>4</issue>
          <fpage>695</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1532-5415.2005.53221.x</pub-id>
          <pub-id pub-id-type="medline">15817019</pub-id>
          <pub-id pub-id-type="pii">JGS53221</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fletcher</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>McAuliffe</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Lansford</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Liss</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Assessing vowel centralization in dysarthria: a comparison of methods</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2017</year>
          <month>02</month>
          <day>01</day>
          <volume>60</volume>
          <issue>2</issue>
          <fpage>341</fpage>
          <lpage>354</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28124069"/>
          </comment>
          <pub-id pub-id-type="doi">10.1044/2016_JSLHR-S-15-0355</pub-id>
          <pub-id pub-id-type="medline">28124069</pub-id>
          <pub-id pub-id-type="pii">2599995</pub-id>
          <pub-id pub-id-type="pmcid">PMC6194930</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Balota</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Yap</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hutchison</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Cortese</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kessler</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Loftis</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Neely</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Simpson</surname>
              <given-names>GB</given-names>
            </name>
            <name name-style="western">
              <surname>Treiman</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>The English Lexicon Project</article-title>
          <source>Behav Res Methods</source>
          <year>2007</year>
          <month>08</month>
          <volume>39</volume>
          <issue>3</issue>
          <fpage>445</fpage>
          <lpage>59</lpage>
          <pub-id pub-id-type="doi">10.3758/bf03193014</pub-id>
          <pub-id pub-id-type="medline">17958156</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>ES</given-names>
            </name>
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>YM</given-names>
            </name>
            <name name-style="western">
              <surname>Campanelli</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>MacLeod</surname>
              <given-names>AAN</given-names>
            </name>
            <name name-style="western">
              <surname>Escorial</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Maillart</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Effects of speech cues in French-speaking children with dysarthria</article-title>
          <source>Int J Lang Commun Disord</source>
          <year>2020</year>
          <month>05</month>
          <day>20</day>
          <volume>55</volume>
          <issue>3</issue>
          <fpage>401</fpage>
          <lpage>416</lpage>
          <pub-id pub-id-type="doi">10.1111/1460-6984.12526</pub-id>
          <pub-id pub-id-type="medline">32077196</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Keller</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Escorial</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>ES</given-names>
            </name>
          </person-group>
          <article-title>Speech treatment effects on narrative intelligibility in French-speaking children with dysarthria</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2021</year>
          <month>06</month>
          <day>18</day>
          <volume>64</volume>
          <issue>6S</issue>
          <fpage>2154</fpage>
          <lpage>2168</lpage>
          <pub-id pub-id-type="doi">10.1044/2020_JSLHR-20-00258</pub-id>
          <pub-id pub-id-type="medline">33719503</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chiu</surname>
              <given-names>YF</given-names>
            </name>
            <name name-style="western">
              <surname>Neel</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Predicting intelligibility deficits in Parkinson's disease with perceptual speech ratings</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2020</year>
          <month>02</month>
          <day>26</day>
          <volume>63</volume>
          <issue>2</issue>
          <fpage>433</fpage>
          <lpage>443</lpage>
          <pub-id pub-id-type="doi">10.1044/2019_JSLHR-19-00134</pub-id>
          <pub-id pub-id-type="medline">32097080</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maas</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Robin</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Austermann Hula</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Freedman</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Wulf</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ballard</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Principles of motor learning in treatment of motor speech disorders</article-title>
          <source>Am J Speech Lang Pathol</source>
          <year>2008</year>
          <month>08</month>
          <volume>17</volume>
          <issue>3</issue>
          <fpage>277</fpage>
          <lpage>98</lpage>
          <pub-id pub-id-type="doi">10.1044/1058-0360(2008/025)</pub-id>
          <pub-id pub-id-type="medline">18663111</pub-id>
          <pub-id pub-id-type="pii">17/3/277</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fontan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tardieu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gaillard</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Woisard</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ruiz</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Relationship between speech intelligibility and speech comprehension in babble noise</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2015</year>
          <month>06</month>
          <volume>58</volume>
          <issue>3</issue>
          <fpage>977</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1044/2015_JSLHR-H-13-0335</pub-id>
          <pub-id pub-id-type="medline">25809922</pub-id>
          <pub-id pub-id-type="pii">2212277</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Goudarzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bayés</surname>
              <given-names>À</given-names>
            </name>
            <name name-style="western">
              <surname>McAuliffe</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bulté</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>ES</given-names>
            </name>
          </person-group>
          <article-title>The effects of intensive speech treatment on conversational intelligibility in Spanish speakers with Parkinson's disease</article-title>
          <source>Am J Speech Lang Pathol</source>
          <year>2018</year>
          <month>02</month>
          <day>06</day>
          <volume>27</volume>
          <issue>1</issue>
          <fpage>154</fpage>
          <lpage>165</lpage>
          <pub-id pub-id-type="doi">10.1044/2017_AJSLP-17-0032</pub-id>
          <pub-id pub-id-type="medline">29351354</pub-id>
          <pub-id pub-id-type="pii">2670656</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>ANSI S3.6-2004: specifications for audiometers</article-title>
          <source>American National Standards Institute</source>
          <year>2004</year>
          <month>05</month>
          <day>13</day>
          <access-date>2022-10-03</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://webstore.ansi.org/Standards/ASA/ansis32004">https://webstore.ansi.org/Standards/ASA/ansis32004</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>ET</given-names>
            </name>
          </person-group>
          <article-title>The hidden dimension</article-title>
          <source>Leonardo</source>
          <year>1973</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>94</fpage>
          <pub-id pub-id-type="doi">10.2307/1572461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>ES</given-names>
            </name>
            <name name-style="western">
              <surname>Moya-Galé</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>YHM</given-names>
            </name>
            <name name-style="western">
              <surname>Freeman</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Forrest</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Brin</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Ramig</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>The effects of intensive speech treatment on intelligibility in Parkinson's disease: a randomised controlled trial</article-title>
          <source>EClinicalMedicine</source>
          <year>2020</year>
          <month>07</month>
          <volume>24</volume>
          <fpage>100429</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2589-5370(20)30173-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.eclinm.2020.100429</pub-id>
          <pub-id pub-id-type="medline">32639484</pub-id>
          <pub-id pub-id-type="pii">S2589-5370(20)30173-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC7327886</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Russis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Corno</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>On the impact of dysarthric speech on contemporary ASR cloud platforms</article-title>
          <source>J Reliable Intell Environ</source>
          <year>2019</year>
          <month>7</month>
          <day>6</day>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>163</fpage>
          <lpage>172</lpage>
          <pub-id pub-id-type="doi">10.1007/s40860-019-00085-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cannito</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Suiter</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Beverly</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chorna</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pfeiffer</surname>
              <given-names>RM</given-names>
            </name>
          </person-group>
          <article-title>Sentence intelligibility before and after voice treatment in speakers with idiopathic Parkinson's disease</article-title>
          <source>J Voice</source>
          <year>2012</year>
          <month>03</month>
          <volume>26</volume>
          <issue>2</issue>
          <fpage>214</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2011.08.014</pub-id>
          <pub-id pub-id-type="medline">22209057</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(11)00154-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>R Core Team</collab>
          </person-group>
          <article-title>R: a language and environment for statistical computing</article-title>
          <source>R Foundation for Statistical Computing</source>
          <access-date>2022-10-03</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.r-project.org/">https://www.r-project.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Venables</surname>
              <given-names>WN</given-names>
            </name>
            <name name-style="western">
              <surname>Ripley</surname>
              <given-names>BD</given-names>
            </name>
          </person-group>
          <source>Modern Applied Statistics with S (MASS). 4th ed</source>
          <year>2002</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lorig</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Self-management of chronic illness: a model for the future</article-title>
          <source>Generations</source>
          <year>1993</year>
          <month>10</month>
          <day>03</day>
          <volume>17</volume>
          <issue>3</issue>
          <fpage>11</fpage>
          <lpage>14</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jstor.org/stable/44877774"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hayes</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Identifying important issues for people with Parkinson's disease</article-title>
          <source>Br J Nurs</source>
          <year>2002</year>
          <month>01</month>
          <day>17</day>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>91</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.12968/bjon.2002.11.2.9309</pub-id>
          <pub-id pub-id-type="medline">11823736</pub-id>
          <pub-id pub-id-type="pii">&#60;ARTICLE_ID IdType=</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
