<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v23i9e25837</article-id>
      <article-id pub-id-type="pmid">34586074</article-id>
      <article-id pub-id-type="doi">10.2196/25837</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Development, Feasibility, Acceptability, and Utility of an Expressive Speech-Enabled Digital Health Agent to Deliver Online, Brief Motivational Interviewing for Alcohol Misuse: Descriptive Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Kramer</surname>
            <given-names>Lean</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Satterfield</surname>
            <given-names>Jason</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Emezue</surname>
            <given-names>Chuka</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Boustani</surname>
            <given-names>Maya</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Psychology</institution>
            <institution>Loma Linda University</institution>
            <addr-line>11130 Anderson St</addr-line>
            <addr-line>Suite 117</addr-line>
            <addr-line>Loma Linda, CA, 92350</addr-line>
            <country>United States</country>
            <phone>1 909 558 7680</phone>
            <email>mboustani@llu.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7739-7857</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Lunn</surname>
            <given-names>Stephanie</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3840-1822</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Visser</surname>
            <given-names>Ubbo</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1254-2566</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lisetti</surname>
            <given-names>Christine</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0756-133X</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Psychology</institution>
        <institution>Loma Linda University</institution>
        <addr-line>Loma Linda, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Knight Foundation School of Computing and Information Sciences</institution>
        <institution>Florida International University</institution>
        <addr-line>Miami, FL</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Computer Science</institution>
        <institution>University of Miami</institution>
        <addr-line>Miami, FL</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Maya Boustani <email>mboustani@llu.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>9</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>29</day>
        <month>9</month>
        <year>2021</year>
      </pub-date>
      <volume>23</volume>
      <issue>9</issue>
      <elocation-id>e25837</elocation-id>
      <history>
        <date date-type="received">
          <day>17</day>
          <month>11</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>9</day>
          <month>12</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>26</day>
          <month>5</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>29</day>
          <month>5</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Maya Boustani, Stephanie Lunn, Ubbo Visser, Christine Lisetti. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 29.09.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2021/9/e25837" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Digital health agents — embodied conversational agents designed specifically for health interventions — provide a promising alternative or supplement to behavioral health services by reducing barriers to access to care.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>Our goals were to (1) develop an expressive, speech-enabled digital health agent operating in a 3-dimensional virtual environment to deliver a brief behavioral health intervention over the internet to reduce alcohol use and to (2) understand its acceptability, feasibility, and utility with its end users.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We developed an expressive, speech-enabled digital health agent with facial expressions and body gestures operating in a 3-dimensional virtual office and able to deliver a brief behavioral health intervention over the internet to reduce alcohol use. We then asked 51 alcohol users to report on the digital health agent acceptability, feasibility, and utility.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The developed digital health agent uses speech recognition and a model of empathetic verbal and nonverbal behaviors to engage the user, and its performance enabled it to successfully deliver a brief behavioral health intervention over the internet to reduce alcohol use. Descriptive statistics indicated that participants had overwhelmingly positive experiences with the digital health agent, including engagement with the technology, acceptance, perceived utility, and intent to use the technology. Illustrative qualitative quotes provided further insight about the potential reach and impact of digital health agents in behavioral health care.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Web-delivered interventions delivered by expressive, speech-enabled digital health agents may provide an exciting complement or alternative to traditional one-on-one treatment. They may be especially helpful for hard-to-reach communities with behavioral workforce shortages.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>digital health agent</kwd>
        <kwd>virtual health assistant</kwd>
        <kwd>online intervention</kwd>
        <kwd>alcohol abuse</kwd>
        <kwd>brief intervention</kwd>
        <kwd>motivational interviewing</kwd>
        <kwd>intelligent virtual agent</kwd>
        <kwd>embodied conversational agent</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Alcohol use disorder (AUD) affects 10%-20% of men and 5%-10% of women over their lifetime, and 26.4% of adults engage in binge drinking. AUD is the third leading preventable cause of death [<xref ref-type="bibr" rid="ref1">1</xref>], with driving under the influence accounting for 31% of driving fatalities. In addition to personal costs associated with AUD, alcohol abuse costs the US economy an average of $249 billion per year. Motivational interviewing (MI) [<xref ref-type="bibr" rid="ref2">2</xref>] is an effective and scalable intervention for AUD [<xref ref-type="bibr" rid="ref3">3</xref>]. It is a client-centered counseling style that is directive and elicits behavior change by helping clients explore ambivalence and resolve it in order to develop <italic>intrinsic</italic> motivation to change. Adaptations of MI have bourgeoned to meet the need for motivational interventions that are brief and thus compatible within primary care settings [<xref ref-type="bibr" rid="ref4">4</xref>]. Brief motivational interviewing (BMI) interventions include MI’s style of communication (communicating empathy, increasing discrepancy, rolling with resistance, and supporting self-efficacy) with the common underlying elements of effective brief interventions (eg, feedback, menus of options for changing respectful of current readiness to change, supportive advice). BMI can be delivered in multiple settings, as both a standalone intervention and in combination with other strategies for substance use disorders, such as cognitive-behavioral therapy, and has been found to be effective across meta-analyses [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Despite the high rates of alcohol use and availability of these effective interventions, only 1 in 10 individuals with AUD receive care [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      </sec>
      <sec>
        <title>Barriers to Care</title>
        <p>A number of barriers prevent individuals from accessing the treatment they need, including acknowledging the need for treatment [<xref ref-type="bibr" rid="ref8">8</xref>], availability of trained providers [<xref ref-type="bibr" rid="ref9">9</xref>], proximity of providers, access to transportation, affordability, insurance coverage, scheduling, and stigma [<xref ref-type="bibr" rid="ref10">10</xref>]. Individuals living in rural settings or in poverty — where alcohol abuse is more prominent — are disproportionately impacted by these barriers [<xref ref-type="bibr" rid="ref9">9</xref>]. In rural settings in particular, anonymity is more difficult [<xref ref-type="bibr" rid="ref11">11</xref>] and increases stigma around help-seeking. Lack of flexible scheduling options for individuals who work full time further exacerbate barriers to treatment [<xref ref-type="bibr" rid="ref9">9</xref>]. Finally, when individuals do access treatment, it is not always an evidence-based treatment — further complicating issues around access to quality care.</p>
      </sec>
      <sec>
        <title>Digital Health Interventions</title>
        <p>Digital health interventions (DHIs) are interventions that are delivered via digital platforms (eg, applications, websites, mobile devices). Unlike telehealth (where a live provider meets with a consumer via a video chat), DHIs do not rely on a human provider to deliver services. As such, they have the potential to reduce a number of barriers associated with location (can be accessed from anywhere), scheduling (can be accessed at any time), stigma (can be accessed anonymously from the privacy of one’s home), and cost (most are affordable or free). Past research indicates that consumers tend to be more truthful when disclosing possibly stigmatizing information such as a drug or alcohol disorder to a computer versus a human [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref16">16</xref>] — providing another advantage to DHIs as they can be more informed about consumers’ at-risk behaviors than a human provider.</p>
        <p>A review of DHIs [<xref ref-type="bibr" rid="ref17">17</xref>] indicates that these interventions range from brief screening tools to several months of structured activities. Content includes screening and self-monitoring, personalized normative feedback, goal-setting activities, and interactive journaling. Benefits include reductions in alcohol consumption and consequences of heavy drinking. Despite all the benefits associated with DHIs, they are associated with high dropout rates [<xref ref-type="bibr" rid="ref18">18</xref>]. For instance, a systematic review of the use of mental health support smartphone applications indicates that only 4% of users engage daily with the applications [<xref ref-type="bibr" rid="ref19">19</xref>]. Researchers suggest that the use of a DHI without the support or recommendation of a mental health professional may limit its use [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Mohr et al [<xref ref-type="bibr" rid="ref21">21</xref>] pointed out that improvement in mental health conditions tends to require continued behavior change over many weeks or months, yet mental health technologies are mainly didactic, thereby not ideal for supporting engagement and behavior change. Most behavioral health technologies require some human backing from a mental health professional to sustain engagement. Qualitative studies point to lack of motivation due to frustrating technology, inadequate content, competing priorities, and lack of face-to-face encounters [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. This limits the promise of DHIs as a scalable solution to increase access to care, which our approach aims to improve.</p>
        <p>A review of DHIs designed specifically for MI [<xref ref-type="bibr" rid="ref24">24</xref>] further points out that, given the important emphasis on the interpersonal therapeutic communication style that is a core aspect of MI, delivering MI through these different modalities is difficult. For instance, can the “MI spirit,” or relational aspects, happen digitally? Therefore, comprehending the type of technology used to deliver relationally focused treatments provides an understanding of how technology may be used to replace face-to-face contact. The study found that DHIs for MI vary greatly in terms of technology and richness of the media used, ranging from text-only to audio files, video files, and interactive animated characters, and that the most common feature of these technologies was personalized feedback to the participant based on their input. Only a subset of a few programs used videos (eg, a “video-doctor” actor playing a doctor’s responses in MI style) or animations (eg, a robot’s head with facial expressions supporting participants during the intervention). These media were always developed with the aim to mirror interpersonal communication. Our current focus on developing and evaluating 3D virtual characters able to deliver a BMI, with appropriate facial expressions, body gestures, speech synthesis, and speech recognition in real time, aims at providing awareness into how technology may be used to replace face-to-face contact.</p>
        <p>The review by Shingleton and Palfai [<xref ref-type="bibr" rid="ref24">24</xref>] also found that, while surveyed articles explained methods for some aspects (eg, automatic computer prompts, chat rooms, emails, videos, animated characters) to deliver MI, most articles did not explain how they translated MI principles into the DHI nor whether or how the relational components were resolved. Translating aspects that require the MI spirit such as “expressing empathy” or “collaboration” to technology — versus consolidating commitment to change and developing discrepancy, on which most studies focused — is particularly difficult to implement in a piece of software. One conclusion was that future researchers need to detail both, not only how the technical aspects (eg, chat rooms, emails) are delivered but also how the relational aspects (eg, emoticons, videos of talking narrators) are delivered in order to increase the human-like discourse with the DHI. Asking questions to help understand how participants felt about and during the interaction were also encouraged as important “soft” outcomes to uncover ways to increase the “spirit” of MI within technology. As highlighted by Mohr et al [<xref ref-type="bibr" rid="ref21">21</xref>], while usability testing has increased in recent years, the design of DHIs has generally not included input from end users. Our focus on technology outcomes in this article aimed at providing insight into these “soft” outcomes, by explicitly asking users of our DHI-specific survey questions about their experience with an expressive, animated embodied conversational agent (ECA) in order to inform the impact of our DHI current design, our future redesigns, and other researchers’ DHIs.</p>
      </sec>
      <sec>
        <title>Embodied Conversational Agents</title>
        <p>ECAs (also known as virtual intelligent agents or virtual humans) are simulated human characters that may have the potential to increase consumer engagement in DHIs [<xref ref-type="bibr" rid="ref25">25</xref>]. Unlike avatars — which are virtual entities that represent and are controlled by the user (popular in video gaming) — ECAs are virtual entities of their own that interact with a consumer autonomously and anonymously. They are developed with the aim to look, sound, and behave as closely to humans as possible. Their ability to hold conversations is still limited [<xref ref-type="bibr" rid="ref26">26</xref>] but advancements in natural language processing and artificial intelligence (AI) hold promise in the future of ECAs as an alternative solution to traditional therapy for mental health and substance abuse concerns [<xref ref-type="bibr" rid="ref27">27</xref>]. ECAs have the ability to use sophisticated multimodal communication to build rapport [<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref41">41</xref>], communicate empathically [<xref ref-type="bibr" rid="ref32">32</xref>-<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref38">38</xref>], and engage in social talk [<xref ref-type="bibr" rid="ref42">42</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. Despite the promise of ECAs, research around the acceptability, feasibility, and utility of such technology by consumers of behavioral health interventions is limited. Exceptions can be found in a few studies using 3D ECAs with realistic animated facial expressions and mirroring of the users’ facial expressions [<xref ref-type="bibr" rid="ref25">25</xref>], a study including an ECA with a dialog management system allowing users to answer freely to the ECA (albeit without full robustness for broad dissemination without synchronous human support) [<xref ref-type="bibr" rid="ref26">26</xref>], and a few others using simple ECAs [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref50">50</xref>].</p>
        <p>In spite of their success, however, ECA development did not scale with the now abundant internet devices (smartphones, laptops) and the latest progress in 3D graphics. Some attempts have been made to build web-based, 3D ECAs [<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref53">53</xref>]. However, their implementation is still very basic, and they do not offer an integrated framework for web-based ECA development, including the ability to recognize and synthesize social cues in real time during spoken dialog, which is a significant technical challenge and which our ECA provides.</p>
      </sec>
      <sec>
        <title>Current Study</title>
        <p>This study aimed to fill the gap in knowledge of using ECAs in behavioral health contexts by establishing the acceptability, feasibility, and utility of using ECAs by consumers undergoing a BMI intervention for alcohol abuse. BMIs are highly structured (<italic>assessment</italic> of, followed by normative <italic>feedback</italic>, then <italic>menu</italic> of change options), making them amenable to delivery via DHI [<xref ref-type="bibr" rid="ref22">22</xref>], particularly if the “MI spirit,” or relational components, can be captured without face-to-face contact. One such BMI, namely the Drinker’s Check-Up (DCU) [<xref ref-type="bibr" rid="ref2">2</xref>] is the intervention used for this work. DCU has been computerized as a menu-based, text-only program delivered online that targets alcohol abuse, reducing drinking by an average of 50% at a 12-month follow-up [<xref ref-type="bibr" rid="ref50">50</xref>]. The DCU is one of the 2 English-language, web-based DHIs designed for the public that have been tested in randomized controlled trials (RCTs) [<xref ref-type="bibr" rid="ref7">7</xref>]. We therefore chose to study how the delivery of the DCU by an ECA will be perceived by its users, given that its nonverbal and other media features aim to address the observed limitations of the use of avatars in DHIs for MI that do not strengthen the social relationship with the user [<xref ref-type="bibr" rid="ref50">50</xref>].</p>
        <p>Using the technology acceptance model (TAM) [<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>] to guide our work, this study enabled us to determine if the ECA designed by our team using the empathic embodied virtual agent (eEVA; see <xref rid="figure1" ref-type="fig">Figure 1</xref> and <xref rid="figure2" ref-type="fig">Figure 2</xref>) framework for building digital health agents [<xref ref-type="bibr" rid="ref56">56</xref>] has enough personal characteristics and social abilities (eg, open-minded, supportive, respectful, friendly) to give users a positive experience (acceptability). The TAM stipulates that user acceptance can be predicted by the perceived usefulness (utility) and perceived ease of use (feasibility) of the technology. As such, we were interested in learning more about consumers’ perceived positive experience (acceptability), usefulness of eEVA (utility), and ease of use (feasibility) to better understand acceptability and potential for adoption of the technology. Having technology that consumers like and find easy to use and helpful increases the potential for adoption, which, in turn, increases access to care. Increases in access to care have the potential to improve health outcomes for alcohol users. Prior studies have found that MI for alcohol use (including online delivery via textual interface) improves health outcomes [<xref ref-type="bibr" rid="ref57">57</xref>].</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Our empathic embodied virtual agent (eEVA) delivering a brief motivational interviewing behavior change session.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e25837_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Empathic embodied virtual agent (eEVA) system overview. API: application programming interface; DCU: Drinker’s Check-Up; NVB: nonverbal behavior.</p>
          </caption>
          <graphic xlink:href="jmir_v23i9e25837_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Intervention</title>
        <sec>
          <title>DCU</title>
          <p>The intervention is based on DCU — an evidence-based intervention that uses strategies from MI [<xref ref-type="bibr" rid="ref57">57</xref>]. First, users provide detailed information about their drinking (eg, own drinking patterns or issues, family history of alcohol use). Next, they receive individualized feedback about their drinking habits, including information about risk factors and consequences [<xref ref-type="bibr" rid="ref58">58</xref>]. Finally, they resolve their ambivalence about whether to change their drinking, plan for a change, and set goals for change. The intervention provides resources to help users with changing their drinking [<xref ref-type="bibr" rid="ref59">59</xref>]. However, the DCU does not tell the clients what to do or not to do — it is up to the user whether they want to change their alcohol use. The DCU has been studied extensively and led to reductions in the quantity and frequency of drinking by 50% throughout a 12-month follow-up period, when compared to a waitlist control group [<xref ref-type="bibr" rid="ref57">57</xref>]. The DCU is 1 of the 2 web-based DHIs that have the strongest evidence supporting their efficacy based on RCTs [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
          <p>In this study, the DCU was delivered via an ECA (namely eEVA) rather than via its traditional textual interface. As a BMI, the eEVA intervention combines MI style of communication with the common underlying elements of effective brief interventions characterized by the acronym FRAMES [<xref ref-type="bibr" rid="ref2">2</xref>]: <italic>Feedback</italic> about client’s individual status is personalized and stored in a user model database, keeping a record of users’ answers for the next session(s); <italic>Responsibility</italic> for changing is left with the individual, and the language used throughout the intervention reflects this (eg, “I will not pressure you in any way”); <italic>Advice</italic> is provided in a supportive manner, with empathic choice of words and supportive body gestures (eg, leaning forward, head nodding); <italic>Menus</italic> of different options for changing that respect an individual’s readiness to change are offered; <italic>Empathic</italic> style of communication is central to the individual-clinician relationship, and it is conveyed by the ECA’s verbal utterances (eg, spoken reflections), nonverbal behavior (NVB; eg, smiling facial expressions, lean forwards, hand flips, nodding at appropriate times), and empathic choice of wording (eg, “It might be surprising to you to know that you are in the top percentile in drinking compared to people of your gender and age; you might want to review your answers again …”); and <italic>Self-efficacy</italic> is nurtured and emphasized throughout, including with choice of words and positive facial expressions (eg, various head nods and smiling facial expressions).</p>
        </sec>
        <sec>
          <title>Technical Implementation of the Intervention</title>
          <p>Our eEVA framework (shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>) provides (1) a realistic 3D WebGL graphics virtual environment with a realistic virtual office environment that can be “inhabited” by 1 of the 25 available racially diverse ECAs (shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>), each with physiologically realistic Facial Action Coding System–validated facial expression animations and full body animations; (2) real-time speech recognition of the user’s answers; (3) text and multiple choice input; (4) voice synthesis for the ECA’s spoken utterances; (5) ECA’s lip synchronization between phonemes and visemes; (6) ECA’s adaptive nonverbal responses such as head nods or leaning forward depending upon the utterance dialog act; (7) configurable dialogue content; and (8) ability to capture and process users’ social cues such as facial expression recognition (which will be enabled in a future study).</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Menu of diverse empathic embodied virtual agent (eEVA) embodied conversational agents consumers can select.</p>
            </caption>
            <graphic xlink:href="jmir_v23i9e25837_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>Technically speaking, the framework consists of 3 main components. First, the application layer consists of a modular client-side JavaScript mainframe that controls the multimodal user interface, audio and video input, graphical user interface (GUI) interaction, and services such as speech recognition and speech synthesis. Second, the JavaScript mainframe handles execution of a scenario (the content of the DCU in this study) — a collection of state machines that are created by developers. Third, the scenario states can be constructed to pull information (eg, the ECA’s speech, graphics to show) from the data layer — a database of content.</p>
          <p>The backbone of the client-side application is a JavaScript framework that handles the formation of a group of modules and the communication between them. Each module then implements various functionalities, including gaining feedback from the user (eg, asking to access microphone and camera) and processing input information (eg, analyzing users’ responses, extracting facial expressions if desired); determining how to answer the user (eg, words agent should say, NVBs); and answering the user through a multimodal 3D-embodied ECA, with speech synthesis, NVBs, and multimedia content (eg, text, images, and videos).</p>
          <p>This results in an interactive online application that can run on numerous platforms such as desktop, cell phone, autonomous robotic agent, and potentially smartwatch integrations (<xref rid="figure4" ref-type="fig">Figure 4</xref>). In addition, the user can also choose between a collection of 3D virtual characters to interact with — of different genders, races, and appearances. To personalize the eEVA system further, favorite chosen characters are remembered and displayed after login during the next interaction with the system. Distributing the framework core (eg, 3D character, perception, behavior) to consumer devices makes this technology scalable, with little to no overhead with additional users. Computer-intensive functionality such as speech and face recognition is asynchronous via web services or with built-in functionalities in the browser.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>Empathic embodied virtual agent (eEVA) running on different platforms: (a) desktop, (b) mobile phone, (c) autonomous robot, (d) smartwatch concept.</p>
            </caption>
            <graphic xlink:href="jmir_v23i9e25837_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>Unlike traditional ECAs, the eEVA design follows common modularity patterns found in robotics platforms such as ROS [<xref ref-type="bibr" rid="ref60">60</xref>], allowing us to generate collections of modules to cover a diversity of application use cases, such as various browsers, many internet bandwidth limitations, and interaction capabilities. For instance, when using speech recognition, to provide the transcript of the user’s spoken utterances to be used by the ECA application, based on browser capabilities, a specialized module can be used to either interface with the Web Speech API or to use another service such as Watson Speech to Text. The advantage of this design is the seamless passing from one module implementation to another, including at runtime, without affecting the rest of the application.</p>
          <p>To model social interaction, 3 main categories of module functionality are necessary, namely input/sensing modules (for perceiving social cues from the user in real time); social interaction decision-making modules<italic>,</italic> including an ECA behavior module, vocal command interpretation, and the scenario controller; and output/actuator modules for actually expressing verbal and nonverbal cues to the user. The functionalities of the main modules used in the current version of eEVA are listed in <xref ref-type="table" rid="table1">Table 1</xref>. Most modules have simple functions to retrieve or display information from and to the user or call functions from libraries (third-party or in-house) or services.</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>Listing of the most significant modules and their function descriptions used in the empathic embodied virtual agent (eEVA) for our health agent.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="300"/>
              <col width="700"/>
              <thead>
                <tr valign="top">
                  <td>Module</td>
                  <td>Function description</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Input/Sensing Module</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>User microphone interface using WebRTC application programming interface (API)</p>
                      </list-item>
                      <list-item>
                        <p>Speech recognition using Google Chrome API</p>
                      </list-item>
                      <list-item>
                        <p>Interface with CoreNLP</p>
                      </list-item>
                      <list-item>
                        <p>Graphic user interface (GUI) for direct user input (text, buttons)</p>
                      </list-item>
                    </list>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Social Interaction Decision-Making Modules</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Vocal command interpretation</p>
                      </list-item>
                      <list-item>
                        <p>Embodied conversational agent’s (ECA) behavior (gesture and facial animations)</p>
                      </list-item>
                      <list-item>
                        <p>Scenario controller (state machine execution)</p>
                      </list-item>
                    </list>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Output/Actuator Modules</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Speech synthesis</p>
                      </list-item>
                      <list-item>
                        <p>25 webGL 3D eEVA ethnically diverse characters</p>
                      </list-item>
                    </list>
                  </td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <p>We tested 2 types of network connections: broadband and 4G mobile data. The majority of the launch time consists of loading the 3D character and surrounding virtual environment, which takes about 30 seconds and 25 seconds on 4G and broadband, respectively. The experiments (<xref ref-type="table" rid="table2">Table 2</xref>) showed that the main distributed functionalities of the eEVA framework allow real-time interaction and adequate loading times. This was echoed by users, as discussed in our Results section.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Average response time and standard deviation analysis for the empathic embodied virtual agent (eEVA) using 4G or broadband connections over the internet between North America and Europe, with caching disabled (first run).</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="300"/>
              <col width="350"/>
              <col width="350"/>
              <thead>
                <tr valign="top">
                  <td>Functionality</td>
                  <td>Time on 4G mobile data (milliseconds)</td>
                  <td>Time on broadband internet (milliseconds)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Unity 3D character, mean (SD)</td>
                  <td>30018 (663)</td>
                  <td>24626 (1910)</td>
                </tr>
                <tr valign="top">
                  <td>TTS<sup>a</sup> (sentence), mean (SD)</td>
                  <td>939 (381)</td>
                  <td>551 (141)</td>
                </tr>
                <tr valign="top">
                  <td>TTS (word), mean (SD)</td>
                  <td>72 (40)</td>
                  <td>44 (23)</td>
                </tr>
                <tr valign="top">
                  <td>Speech recognition</td>
                  <td>~30 (offline processing)</td>
                  <td>N/A<sup>b</sup></td>
                </tr>
                <tr valign="top">
                  <td>Entire HTTP request, mean (SD)</td>
                  <td>1124 (166)</td>
                  <td>784 (66)</td>
                </tr>
                <tr valign="top">
                  <td>DOM<sup>c</sup> loading, mean (SD)</td>
                  <td>2313 (80)</td>
                  <td>1635 (224)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>TTS: text to speech.</p>
              </fn>
              <fn id="table2fn2">
                <p><sup>b</sup>N/A: not applicable.</p>
              </fn>
              <fn id="table2fn3">
                <p><sup>c</sup>DOM: Document Object Model.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Model of Empathic Verbal and Nonverbal Behavior</title>
          <p>To simulate some of the communication psycholinguistic signals of a counselor delivering a BMI, we first videotaped BMI sessions between a live licensed counselor and a client. Then, a clinical expert reviewed the videotapes to code verbal reflections and NVB. From these, the expert generated a set of rules for basal behaviors of the health agents. Based on the codes of verbal and NVBs, eEVA was implemented with the following verbal reflections: “Ah.” “Alright.” “Okay.” “Good.” “Sounds Good.” “Oh, okay.” “Great!” “Thanks for letting me know.” “Oh, I see.” “Okay, thanks!”</p>
          <p>In addition, the following NVB animations were synthesized on the agent’s face: smile, facial expressions, hand gestures (typing on a computer at a desk, hands resting on the agents’ legs, formless flick, two-handed flip, two-handed contrast gesture), body leans (forward), head gestures (nod, shake, nonshake), and eyebrow movements (up, neutral, and down), which our results (discussed later) showed are conducive of a positive experience for the user with the agent in the given health care context.</p>
          <p>Since it was determined that head nods are critical to portraying (some level of) active listening, we sought to offer 3 variants depending on the user’s chosen responses. We created a set of nods using established emotional expressions governed by activation of specific individual facial movement animations. All 3 of the head nods included activation of head down and eyes down. However, depending on the type of reaction required, these also included facial expressions (eg, smile, mild concern).</p>
          <p>The patterns observed in the videotapes of the counselor-client session also directed us towards creating rules about when certain statements should be made, to ensure the counselor did not appear judgmental and to make the interaction appear more natural. In all scenarios, the counselor began seated at the desk while typing on the computer, then looked up and moved to the chair closer to where the user perceives they are sitting during an initial greeting. Once seated, the counselor began with a greeting introduction and then moved on to delivering the DCU. Verbal responses to user responses were applied based on the “score” of each question to provide nonjudgmental reactions for higher scores that might indicate a problem and positive reactions for scores that might suggest healthy consumption levels. For example, “Sounds good” was used in response to a user mentioning that they wanted to change.</p>
          <p>In parallel, NVBs were applied using a set of states that were determined as appropriate given the context of the interaction as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>: Neutral, Explaining, Asking, and Listening. In the <italic>Neutral</italic> state, the counselor spoke and used a smile, a gaze, facial expressions, head gestures, or eyebrow movements. In the <italic>Explaining</italic> state, which was activated during long periods of speaking, the two-handed flip and two-handed contrast gesture were applied. When the counselor posited a question to the user, the <italic>Asking</italic> state was initiated, which included a single hand formless flick. While the counselor waited for the user to respond to a question, the <italic>Listening</italic> mode was initiated, which included a leaning forward gesture. The leaning gesture remained in effect until a choice was made, at which point the body resumed an upright sitting position.</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>Defining nonverbal behaviors for virtual health agents. All undergo the initial sitting sequences and then assume a neural sitting at idle position. From here, the agent can enter either the be explaining, asking, or listening loop.</p>
            </caption>
            <graphic xlink:href="jmir_v23i9e25837_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Participants</title>
        <p>Participants were alcohol users aged 21 to 55 years and recruited online to participate in the intervention. Participants had to have engaged in heavy drinking (consumed 5 drinks in one sitting at least once in the past year), not currently be receiving treatment for their AUD, and not have a medical condition for which alcohol use would be contraindicated. Users were also screened for severe mental illness. We recruited 51 participants as part of a larger RCT of the effectiveness of this program in reducing alcohol abuse. Participants were 62% (32/51) male, were 32% (19/51) female, and had a mean age of 28 (SD 15.8) years. Participants reported their race as White (21/51, 42%), Black (12/51, 24%), Asian (3/51, 7%), Other (2/51, 3%), and no response (12/51, 24%). Participants reported their ethnicity as non-Hispanic (43/51, 84%), Hispanic (8/51, 15%), and no response (2/51, 1%). Marital status was reported as married (23/51, 46%), single (17/51, 34%), divorced (3/51, 7%), widowed (1/51, 1%), or no response (6/51, 12%). Their education level was reported as high school (7/51, 14%), some college (13/51, 25%), Associate’s degree (10/51, 19%), Bachelor’s degree (18/51, 36%), and Graduate degree (3/51, 6%).</p>
      </sec>
      <sec>
        <title>Procedures</title>
        <p>Participants were recruited online via targeted advertising on Facebook, offering free treatment for alcohol users and compensation for research participation. When users clicked on the ad, they were redirected to an online screener (on Qualtrics) to ensure they were eligible for the study. If they were eligible, users were randomly assigned to receive the same DCU MI intervention delivered online either by an ECA (eEVA) or a textual interface. Participants in this study were those who were assigned to eEVA, since the control group assigned to the text-only interface could not comment on the ECA’s social features that they did not see. Once randomized, participants were provided with a username and password to enter the DHI platform eEVA. Participants had to log on, enter demographic information, and begin the intervention. After completing the intervention (participants were given 1 week to complete), they were redirected to an online survey on Qualtrics to provide feedback about their experience with the intervention.</p>
      </sec>
      <sec>
        <title>Measures</title>
        <p>After completing the intervention, participants were asked to provide feedback on the feasibility, acceptability, and utility of the technology. Specifically, we sought feedback regarding their engagement (acceptability), perceived utility, and intent to use the technology (feasibility). Participants completed a questionnaire developed for this study. Since there does not exist, to date, a standardized instrument to evaluate interaction with ECAs of various levels of complexities, we used and adapted relevant existing questionnaires commonly used for the evaluation of human interaction with technologies involving some social cues, whether embodied with graphics or with robot technologies. Questions were based on a combination of the engagement model by O’Brien and Toms [<xref ref-type="bibr" rid="ref61">61</xref>], Almere model by Heerink et al [<xref ref-type="bibr" rid="ref62">62</xref>], and “Godspeed questionnaire” by Bartneck et al [<xref ref-type="bibr" rid="ref63">63</xref>], which has been widely used to evaluate human-technology interactions using 5 key concepts — anthropomorphism, animacy, likability, perceived intelligence, and safety — that have been found useful for interacting with either ECAs or robots. All responses were on a 7-point Likert scale (1=Strongly Agree; 7=Strongly Disagree), with lower scores indicating more desirable findings. There was no cut-off as the measures were combined for the purpose of this study and were meant to provide descriptive feedback. In addition, each question included a blank space with a prompt of “Comments” for participants to provide optional qualitative feedback to each question.</p>
        <p>The engagement model by O’Brien and Toms [<xref ref-type="bibr" rid="ref61">61</xref>] explores acceptability of the ECA via constructs of user engagement with technology. Six attributes of a technology make it more likely that a user will engage with it (challenge using the technology, interest, motivation to use the technology, and appeal of the technology [eg, “I found the health assistant interesting”]). Responses were on a 7-point Likert scale (1=Strongly agree; 7=Strongly disagree).</p>
        <p>The Almere model evaluates the user’s acceptance of the digital health agent by relying on constructs from the Unified Theory of Acceptance and Use of Technology [<xref ref-type="bibr" rid="ref12">12</xref>]. Users replied to 13 statements (eg, “I enjoyed participating in this session with the health assistant”) that map to constructs that predict intent to use the technology and perceived usefulness of the technology. Responses were on a 7-point Likert scale (1=Strongly agree; 7=Strongly disagree).</p>
        <p>The Godspeed questionnaire consists of 12 questions that capture 5 constructs that measure human-like traits of robots, which we adapted for ECA (eg, “The health agent seemed warm”): (1) anthropomorphism (eg, moving rigidly or moving elegantly), (2) animacy (eg, mechanical or organic), (3) likeability (eg, unfriendly or friendly), (4) perceived intelligence (eg, incompetent or competent), and (5) perceived safety (eg, anxious or calm). Each trait could be rated as being very human-like to very unhuman-like on a 7-point scale.</p>
        <p>All questions included a fill-in option for participants to expand on their numerical responses with qualitative feedback if they wanted to. We highlight some of those comments in the Results section.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Acceptability and Utility</title>
        <p>Participants reported high acceptability and utility of the technology, as indicated by their scores on the Almere model questions (mean 2.31, SD 1.05). Most participants reported enjoying their interaction with the agent (44/51, 86%) stating “The questions she asked me, no one had asked me before and helped recognize my drinking problem.” They thought the agent was both physically appealing (38/51, 74%) and had a pleasant voice (42/51, 82%), stating:</p>
        <disp-quote>
          <p>He is neat, he has a good haircut, he is well dressed.</p>
        </disp-quote>
        <disp-quote>
          <p>He appears wise, intelligent, and healthy.</p>
        </disp-quote>
        <disp-quote>
          <p>The voice was adequate, calm, and confident.</p>
        </disp-quote>
        <p>Furthermore, participants indicated that they found it easy to interact with the health agent (45/51, 89%) and they learned to do so quickly (45/51, 89%), stating:</p>
        <disp-quote>
          <p>I just followed the instructions and voila!</p>
        </disp-quote>
        <disp-quote>
          <p>I just waited and followed his instructions; it was easy.</p>
        </disp-quote>
        <p>Participants had more difficulty with the voice feature of the technology, with only 69% (35/51) reporting that they felt like the agent understood them when they spoke into the microphone. Based on the qualitative feedback, this may have been due to issues with participants’ microphone setting rather than the technology itself:</p>
        <disp-quote>
          <p>I set my microphone up, but it seems there is a problem with it.</p>
        </disp-quote>
        <disp-quote>
          <p>I tried to speak my answers, but it never worked so I ended up typing them.</p>
        </disp-quote>
        <p>Those who were able to get their microphones to work seemed to have no difficulty speaking to the agent as echoed by their qualitative feedback (eg, “I don’t even repeat my answers; the agent understands me very well.”). Despite some difficulties with the microphone, 78% (40/51) felt like they could have a conversation with the agent, stating “I felt that he knows me, knows what I want” and reported that they sometimes felt like they were talking to a real person (36/51, 71%), stating that:</p>
        <disp-quote>
          <p>…his voice sounded quite real…</p>
        </disp-quote>
        <disp-quote>
          <p>…the gestures he made, way he moved around…</p>
        </disp-quote>
        <disp-quote>
          <p>he answered me like a real person…</p>
        </disp-quote>
        <p>Participants further reported that the health assistant was friendly (45/51, 89%) and they found it to be useful (44/51, 88%) because:</p>
        <disp-quote>
          <p>…he explained things that I did not know…</p>
        </disp-quote>
        <disp-quote>
          <p>…it helped me recognize that I have a problem…</p>
        </disp-quote>
        <disp-quote>
          <p>I learned a lot.</p>
        </disp-quote>
        <p>Participants reported that they were comfortable disclosing information about their drinking to their digital health assistant (48/51, 93%), with 83% (41/51) reporting that they were <italic>more</italic> comfortable disclosing their drinking to the digital health assistant over their medical doctor. Indeed, participants highlighted:</p>
        <disp-quote>
          <p>The assistant gives me a level of trust that I don’t have with other humans.</p>
        </disp-quote>
        <disp-quote>
          <p>It is more easy talking to (the health agent) than to a real person.</p>
        </disp-quote>
        <disp-quote>
          <p>I don’t feel like they are judging me.</p>
        </disp-quote>
        <p>Finally, participants reported trusting the advice the health agent gave them (43/51, 85%) and that they planned on following that advice (44/51, 86%), stating:</p>
        <disp-quote>
          <p>I think she is sincere and wants to help me with my problems.</p>
        </disp-quote>
        <disp-quote>
          <p>…because it is based on facts and studies and that is real and valid information for me.</p>
        </disp-quote>
        <p>One participant noted “I didn’t feel like I was given advice, more like information to be able to make my own decision. I was the one with the power to give myself advice.” <italic>—</italic> perfectly capturing the intent of MI.</p>
      </sec>
      <sec>
        <title>Engagement</title>
        <p>Participants were highly engaged with the DHI, as indicated by their score on the engagement questions (mean 2.86, SD –0.96), indicating that the majority of participants agreed with statements around how engaged they were. Specifically, 69% (35/51) were not worried about making mistakes while using the technology, stating:</p>
        <disp-quote>
          <p>At first, it was a little bit intimidating, but then I felt confident.</p>
        </disp-quote>
        <disp-quote>
          <p>The assistant feels understanding, attentive, very friendly.</p>
        </disp-quote>
        <p>A majority (44/51, 86%) thought it was a good idea to use the health assistant, reporting:</p>
        <disp-quote>
          <p>He is kind of like a home counselor who works with reliable information and statistics.</p>
        </disp-quote>
        <disp-quote>
          <p>It is practical, easy to use, and guides the person on what to do without forcing us to make a final decision.</p>
        </disp-quote>
        <p>Participants felt that the system could be adaptive to their needs (46/51, 90%), stating that “it could be adapted to other health problems like smoking<italic>.</italic>”</p>
        <p>Finally, 88% (45/51) found the health assistant to be interesting, indicating “I was impressed by the way it converts my answers into figures and important information for my health” and said they would interact with the agent again (43/51, 85%):</p>
        <disp-quote>
          <p>Setting a new exchange with the health assistant would help me to reach my goal.</p>
        </disp-quote>
      </sec>
      <sec>
        <title>Impressions of the Digital Health Agent</title>
        <p>Participants reported a high number of human-like traits on the Godspeed questions (mean 2.07, SD 0.89). Participants reported that the agent moved appropriately (43/51, 85%) and seemed warm (46/51, 90%), responsive (45/51, 89%), knowledgeable (47/51, 92%), relaxed (46/51, 90%), flexible (42/51, 83%), honest (46/51, 90%), respectful (46/51, 90%), confident (47/51, 92%), interested (44/51, 86%), open-minded or nonjudgmental (43/51, 84%), and supportive (45/51, 89%).</p>
        <p>Overall, participants’ responses to the questionnaires and qualitative feedback indicated that they found the delivery of an MI intervention by a digital health agent over the internet to be acceptable, be engaging, and have features that are close to human-like.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>Our goal with this study was to understand if the technology we developed was feasible (able to be implemented online), acceptable, useful, and easy to use by consumers. As such, we focused largely on the technology aspects of the intervention.</p>
      <sec>
        <title>Principal Findings</title>
        <p>This study provides an optimistic outlook for the use of digital health agents to deliver brief online interventions in the future. Consumers overwhelmingly reported positive experiences in their interactions with the agent, with many reporting that they trusted the agent and felt that they could more comfortably disclose information that they may not have disclosed to a human provider. This echoes what has been found in the literature around disclosing to computers versus humans [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref65">65</xref>].</p>
        <p>Further, participants tended to attribute many human-like traits to their agent (eg, friendly, trustworthy, kind) and commented positively on the physical appearance, voice, and physical gestures of the agent. Our team engaged in coding of verbal reflections, hand gestures, and facial expressions of a real therapist to enhance the digital agent’s nonverbal communication to resemble what a therapist might do in session [<xref ref-type="bibr" rid="ref66">66</xref>].</p>
        <p>Implications for this work are important. Given the shortage of mental health workforces in many locations, digital health agents may provide an acceptable complement to traditional face-to-face therapy, reducing demand for higher levels of care, where a digital health agent can act as a clinician “extender” to deliver booster sessions. Similar to telehealth services, digital health agents resolve a number of barriers to care such as transportation and scheduling.</p>
        <p>In addition, digital health agents reduce stigma around mental health care, are less costly than one-on-one therapy, and can be scaled out and disseminated. For individuals with high levels of social anxiety, digital health agents may provide them with a unique opportunity to get help. Given the digital nature of the agent, it is possible to adjust the programming to make the agent able to speak in multiple languages, reducing language barriers for minority and refugee populations. Already, consumers can pick a digital health agent from a library of diverse options of physical visual features (gender, age, race, and ethnicity; see <xref rid="figure2" ref-type="fig">Figure 2</xref>) and vocal features (gender). This is an exciting development given the lack of a diverse mental health workforce [<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref68">68</xref>].</p>
        <p>This study has provided the investigative team with valuable feedback to improve the technology, including improving the flow and tone of the voice, providing questions in text, and formatting the technology for use on mobile phones and with lower bandwidth.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Despite these enthusiastic findings and implications, it is important to note a number of limitations. First, digital health agents cannot replace traditional therapy and certainly cannot manage crisis situations. They are well suited for brief, structured interventions, but cannot replace the complex nature of a therapeutic relationship and complex therapeutic interventions such as family therapy and emotion-focused therapy. They were considered and studied in this article as clinician “extenders.” Second, this study was conducted with participants in the United States only. It is unclear if technology acceptability would be as high in other countries. Third, the impact of the DHI on actual alcohol outcomes remains unclear. A study is underway to better understand the effects of a digital health agent. Fourth, it is unclear whether ECAs are suitable for various health problems besides alcohol and for various other populations not studied here, such as the elderly or children. Finally, the access to and cost of reliable internet necessary to use ECAs may limit access to some — potentially further increasing the digital divide.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This type of intervention and research on digital health agents in virtual reality over the internet are still in their infancy, and there is much work to be done. For instance, the same BMI intervention delivered in this study could be adjusted to other health behaviors (eg, other substances, medication compliance, weight management). Other interventions could be delivered to screen or treat a variety of problems. Furthermore, our team is working on integrating natural language dialog management features such that the agent will respond to the consumer’s answers without needing specific input from which to read. We conducted preliminary work [<xref ref-type="bibr" rid="ref26">26</xref>] for a speech-enabled ECA for BMI interventions with promising results. However, natural language understanding is still a very open research area of computer science, and its use in DHIs is not robust enough to deploy with real users who need support and help, without the potential frustrations generated by unreliable agent’s speech understanding. Our team has also worked on features based on AI that allow a PC-based agent (ie, operating on PC only and not web-based) to pick up on the consumer’s facial expressions in real time (eg, if the consumer smiles, the agent smiles back), which has been shown to improve users’ engagement with digital health agents [<xref ref-type="bibr" rid="ref25">25</xref>], and we plan to add this type of feature to our web-based eEVA system.</p>
        <p>Despite all these exciting potential developments, it is critical to evaluate DHIs with high levels of rigor before they can be deployed for the population at large. As pointed out by Carroll [<xref ref-type="bibr" rid="ref7">7</xref>], very few of the many available internet-based interventions have been carefully evaluated in well-controlled clinical trials, and the majority of those studies have been conducted with college populations, bringing into questions the generalization of the results to broader society. The conclusions that can be drawn from many studies are constrained by high levels of dropout, high attrition, and weak control conditions (eg, waitlists). To that end, we will report on the results of the RCT we conducted to assess the efficacy of the eEVA DHI compared to a text-only version of the intervention.</p>
        <p>We furthermore consider that these digital health agents and DHIs can only complement the unique experience of psychosocial therapy and serve as “clinician extenders” [<xref ref-type="bibr" rid="ref7">7</xref>]. As suggested by Mohr et al [<xref ref-type="bibr" rid="ref21">21</xref>], mental health technologies in general should be considered as sociotechnical systems (or technology-enabled services rather than mere products) that must fit within an ecosystem of mental health services (involving human support and organizational factors). Our aim is to provide relief to a clogged mental health system and provide online access to self-help to individuals who otherwise would not access traditional face-to-face care. Further research on whether DHIs increase access to care by removing the barriers identified earlier (eg, availability and proximity of trained providers, affordability, stigma) or whether they increase the digital divide is needed [<xref ref-type="bibr" rid="ref21">21</xref>]. Digital health agents, even with integrated AI, will not replace human therapists; they should be considered therapist extenders.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AUD</term>
          <def>
            <p>alcohol use disorder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">BMI</term>
          <def>
            <p>brief motivational interviewing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DCU</term>
          <def>
            <p>Drinker’s Check-Up</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DHI</term>
          <def>
            <p>digital health intervention</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">ECA</term>
          <def>
            <p>embodied conversational agent</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">eEVA</term>
          <def>
            <p>empathic embodied virtual agent</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">GUI</term>
          <def>
            <p>graphical user interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MI</term>
          <def>
            <p>motivational interviewing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">NVB</term>
          <def>
            <p>nonverbal behavior</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">RCT</term>
          <def>
            <p>randomized controlled trial</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">TAM</term>
          <def>
            <p>technology acceptance model</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The research described in this article was supported by a grant from the US National Science Foundation Award No.1423260 to Florida International University and Loma Linda University.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Obesity and overweight</article-title>
          <source>World Health Organization</source>
          <year>2011</year>
          <access-date>2021-07-13</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/news-room/fact-sheets/detail/obesity-and-overweight">https://www.who.int/news-room/fact-sheets/detail/obesity-and-overweight</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Rollnick</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Motivational interviewing: preparing people for change (2nd edition)</source>
          <year>2002</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>The Guilford Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rubak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sandbaek</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lauritzen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Motivational interviewing: a systematic review and meta-analysis</article-title>
          <source>Br J Gen Pract</source>
          <year>2005</year>
          <month>04</month>
          <volume>55</volume>
          <issue>513</issue>
          <fpage>305</fpage>
          <lpage>12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bjgp.org/cgi/pmidlookup?view=long&#38;pmid=15826439"/>
          </comment>
          <pub-id pub-id-type="medline">15826439</pub-id>
          <pub-id pub-id-type="pmcid">PMC1463134</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Burke</surname>
              <given-names>BL</given-names>
            </name>
            <name name-style="western">
              <surname>Arkowitz</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Menchola</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The efficacy of motivational interviewing: a meta-analysis of controlled clinical trials</article-title>
          <source>J Consult Clin Psychol</source>
          <year>2003</year>
          <month>10</month>
          <volume>71</volume>
          <issue>5</issue>
          <fpage>843</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1037/0022-006X.71.5.843</pub-id>
          <pub-id pub-id-type="medline">14516234</pub-id>
          <pub-id pub-id-type="pii">2003-07816-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heckman</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Egleston</surname>
              <given-names>BL</given-names>
            </name>
            <name name-style="western">
              <surname>Hofmann</surname>
              <given-names>MT</given-names>
            </name>
          </person-group>
          <article-title>Efficacy of motivational interviewing for smoking cessation: a systematic review and meta-analysis</article-title>
          <source>Tob Control</source>
          <year>2010</year>
          <month>10</month>
          <day>30</day>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>410</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/20675688"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/tc.2009.033175</pub-id>
          <pub-id pub-id-type="medline">20675688</pub-id>
          <pub-id pub-id-type="pii">tc.2009.033175</pub-id>
          <pub-id pub-id-type="pmcid">PMC2947553</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Acevedo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Panas</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Garnick</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Acevedo-Garcia</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ritter</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Disparities in the treatment of substance use disorders: does where you live matter?</article-title>
          <source>J Behav Health Serv Res</source>
          <year>2018</year>
          <month>10</month>
          <day>12</day>
          <volume>45</volume>
          <issue>4</issue>
          <fpage>533</fpage>
          <lpage>549</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29435862"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11414-018-9586-y</pub-id>
          <pub-id pub-id-type="medline">29435862</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11414-018-9586-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC6087681</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carroll</surname>
              <given-names>KM</given-names>
            </name>
          </person-group>
          <article-title>Lost in translation? Moving contingency management and cognitive behavioral therapy into clinical practice</article-title>
          <source>Ann N Y Acad Sci</source>
          <year>2014</year>
          <month>10</month>
          <volume>1327</volume>
          <fpage>94</fpage>
          <lpage>111</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25204847"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/nyas.12501</pub-id>
          <pub-id pub-id-type="medline">25204847</pub-id>
          <pub-id pub-id-type="pmcid">PMC4206586</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scott</surname>
              <given-names>CK</given-names>
            </name>
            <name name-style="western">
              <surname>Grella</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Dennis</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Nicholson</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Linking Individuals with Substance Use Disorders (SUDs) in Primary Care to SUD Treatment: the Recovery Management Checkups-Primary Care (RMC-PC) Pilot Study</article-title>
          <source>J Behav Health Serv Res</source>
          <year>2018</year>
          <month>04</month>
          <day>27</day>
          <volume>45</volume>
          <issue>2</issue>
          <fpage>160</fpage>
          <lpage>173</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29181779"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11414-017-9576-5</pub-id>
          <pub-id pub-id-type="medline">29181779</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11414-017-9576-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC5871558</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wamsley</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Satterfield</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Curtis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lundgren</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Satre</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Alcohol and Drug Screening, Brief Intervention, and Referral to Treatment (SBIRT) Training and Implementation: Perspectives from 4 Health Professions</article-title>
          <source>J Addict Med</source>
          <year>2018</year>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>262</fpage>
          <lpage>272</lpage>
          <pub-id pub-id-type="doi">10.1097/ADM.0000000000000410</pub-id>
          <pub-id pub-id-type="medline">30063221</pub-id>
          <pub-id pub-id-type="pii">01271255-201808000-00004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Valdez</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>DO</given-names>
            </name>
            <name name-style="western">
              <surname>Ruiz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Oren</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Carvajal</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Exploring structural, sociocultural, and individual barriers to alcohol abuse treatment among Hispanic men</article-title>
          <source>Am J Mens Health</source>
          <year>2018</year>
          <month>11</month>
          <day>27</day>
          <volume>12</volume>
          <issue>6</issue>
          <fpage>1948</fpage>
          <lpage>1957</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/1557988318790882?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1557988318790882</pub-id>
          <pub-id pub-id-type="medline">30051746</pub-id>
          <pub-id pub-id-type="pmcid">PMC6199428</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leggio</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>de Witte</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chick</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Alcohol and Alcoholism: Then, Now and The Future of 'The Red Journal'</article-title>
          <source>Alcohol Alcohol</source>
          <year>2018</year>
          <month>11</month>
          <day>01</day>
          <volume>53</volume>
          <issue>6</issue>
          <fpage>637</fpage>
          <lpage>638</lpage>
          <pub-id pub-id-type="doi">10.1093/alcalc/agy075</pub-id>
          <pub-id pub-id-type="medline">30339185</pub-id>
          <pub-id pub-id-type="pii">5137193</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hogg-Johnson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>DE</given-names>
            </name>
            <name name-style="western">
              <surname>Skinner</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Glazier</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Levinson</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Computer-assisted screening for intimate partner violence and control: a randomized trial</article-title>
          <source>Ann Intern Med</source>
          <year>2009</year>
          <month>07</month>
          <day>21</day>
          <volume>151</volume>
          <issue>2</issue>
          <fpage>93</fpage>
          <lpage>102</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/abs/10.7326/0003-4819-151-2-200907210-00124?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/0003-4819-151-2-200907210-00124</pub-id>
          <pub-id pub-id-type="medline">19487706</pub-id>
          <pub-id pub-id-type="pii">0000605-200907210-00124</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Card</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lucas</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Computer interrogation in medical practice</article-title>
          <source>International Journal of Man-Machine Studies</source>
          <year>1981</year>
          <month>1</month>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>49</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1016/s0020-7373(81)80032-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghanem</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>Hutton</surname>
              <given-names>HE</given-names>
            </name>
            <name name-style="western">
              <surname>Zenilman</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Zimba</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Erbelding</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>Audio computer assisted self interview and face to face interview modes in assessing response bias among STD clinic patients</article-title>
          <source>Sex Transm Infect</source>
          <year>2005</year>
          <month>10</month>
          <day>01</day>
          <volume>81</volume>
          <issue>5</issue>
          <fpage>421</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sti.bmj.com/lookup/pmidlookup?view=long&#38;pmid=16199744"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/sti.2004.013193</pub-id>
          <pub-id pub-id-type="medline">16199744</pub-id>
          <pub-id pub-id-type="pii">81/5/421</pub-id>
          <pub-id pub-id-type="pmcid">PMC1745029</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kissinger</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rice</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Farley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Trim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jewitt</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Margavio</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>DH</given-names>
            </name>
          </person-group>
          <article-title>Application of computer-assisted interviews to sexual behavior research</article-title>
          <source>Am J Epidemiol</source>
          <year>1999</year>
          <month>05</month>
          <day>15</day>
          <volume>149</volume>
          <issue>10</issue>
          <fpage>950</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1093/oxfordjournals.aje.a009739</pub-id>
          <pub-id pub-id-type="medline">10342804</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Newman</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Des Jarlais</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Turner</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Gribble</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cooley</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Paone</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The differential effects of face-to-face and computer interview modes</article-title>
          <source>Am J Public Health</source>
          <year>2002</year>
          <month>02</month>
          <volume>92</volume>
          <issue>2</issue>
          <fpage>294</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.2105/ajph.92.2.294</pub-id>
          <pub-id pub-id-type="medline">11818309</pub-id>
          <pub-id pub-id-type="pmcid">PMC1447060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Lemmen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kramer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chopra</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Internet-Delivered Health Interventions That Work: Systematic Review of Meta-Analyses and Evaluation of Website Availability</article-title>
          <source>J Med Internet Res</source>
          <year>2017</year>
          <month>03</month>
          <day>24</day>
          <volume>19</volume>
          <issue>3</issue>
          <fpage>e90</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2017/3/e90/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.7111</pub-id>
          <pub-id pub-id-type="medline">28341617</pub-id>
          <pub-id pub-id-type="pii">v19i3e90</pub-id>
          <pub-id pub-id-type="pmcid">PMC5384996</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yeager</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Benight</surname>
              <given-names>CC</given-names>
            </name>
          </person-group>
          <article-title>If we build it, will they come? Issues of engagement with digital health interventions for trauma recovery</article-title>
          <source>Mhealth</source>
          <year>2018</year>
          <volume>4</volume>
          <fpage>37</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.21037/mhealth.2018.08.04"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/mhealth.2018.08.04</pub-id>
          <pub-id pub-id-type="medline">30363749</pub-id>
          <pub-id pub-id-type="pii">mh-04-2018.08.04</pub-id>
          <pub-id pub-id-type="pmcid">PMC6182033</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kerst</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zielasek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gaebel</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Smartphone applications for depression: a systematic literature review and a survey of health care professionals' attitudes towards their use in clinical practice</article-title>
          <source>Eur Arch Psychiatry Clin Neurosci</source>
          <year>2020</year>
          <month>03</month>
          <volume>270</volume>
          <issue>2</issue>
          <fpage>139</fpage>
          <lpage>152</lpage>
          <pub-id pub-id-type="doi">10.1007/s00406-018-0974-3</pub-id>
          <pub-id pub-id-type="medline">30607530</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00406-018-0974-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Price</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gros</surname>
              <given-names>DF</given-names>
            </name>
            <name name-style="western">
              <surname>McCauley</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Gros</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Ruggiero</surname>
              <given-names>KJ</given-names>
            </name>
          </person-group>
          <article-title>Nonuse and dropout attrition for a web-based mental health intervention delivered in a post-disaster context</article-title>
          <source>Psychiatry</source>
          <year>2012</year>
          <volume>75</volume>
          <issue>3</issue>
          <fpage>267</fpage>
          <lpage>84</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22913502"/>
          </comment>
          <pub-id pub-id-type="doi">10.1521/psyc.2012.75.3.267</pub-id>
          <pub-id pub-id-type="medline">22913502</pub-id>
          <pub-id pub-id-type="pmcid">PMC3696953</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mohr</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Weingardt</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schueller</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Three Problems With Current Digital Mental Health Research . . . and Three Things We Can Do About Them</article-title>
          <source>Psychiatr Serv</source>
          <year>2017</year>
          <month>05</month>
          <day>01</day>
          <volume>68</volume>
          <issue>5</issue>
          <fpage>427</fpage>
          <lpage>429</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28412890"/>
          </comment>
          <pub-id pub-id-type="doi">10.1176/appi.ps.201600541</pub-id>
          <pub-id pub-id-type="medline">28412890</pub-id>
          <pub-id pub-id-type="pmcid">PMC6903906</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Karlsen</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Oord</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Graue</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Oftedal</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Dropout From an eHealth Intervention for Adults With Type 2 Diabetes: A Qualitative Study</article-title>
          <source>J Med Internet Res</source>
          <year>2017</year>
          <month>05</month>
          <day>30</day>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>e187</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2017/5/e187/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.7479</pub-id>
          <pub-id pub-id-type="medline">28559223</pub-id>
          <pub-id pub-id-type="pii">v19i5e187</pub-id>
          <pub-id pub-id-type="pmcid">PMC5470008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hollis</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Falconer</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Whittington</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Stockton</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Glazebrook</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Davies</surname>
              <given-names>EB</given-names>
            </name>
          </person-group>
          <article-title>Annual Research Review: Digital health interventions for children and young people with mental health problems - a systematic and meta-review</article-title>
          <source>J Child Psychol Psychiatry</source>
          <year>2017</year>
          <month>04</month>
          <day>10</day>
          <volume>58</volume>
          <issue>4</issue>
          <fpage>474</fpage>
          <lpage>503</lpage>
          <pub-id pub-id-type="doi">10.1111/jcpp.12663</pub-id>
          <pub-id pub-id-type="medline">27943285</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shingleton</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Palfai</surname>
              <given-names>TP</given-names>
            </name>
          </person-group>
          <article-title>Technology-delivered adaptations of motivational interviewing for health-related behaviors: A systematic review of the current research</article-title>
          <source>Patient Educ Couns</source>
          <year>2016</year>
          <month>01</month>
          <volume>99</volume>
          <issue>1</issue>
          <fpage>17</fpage>
          <lpage>35</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/26298219"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.pec.2015.08.005</pub-id>
          <pub-id pub-id-type="medline">26298219</pub-id>
          <pub-id pub-id-type="pii">S0738-3991(15)30043-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC4691359</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lisetti</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Amini</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yasavur</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Rishe</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>I can help you change! An empathic virtual agent delivers behavior change health interventions</article-title>
          <source>ACM Trans. Manage. Inf. Syst</source>
          <year>2013</year>
          <month>12</month>
          <volume>4</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1145/2544103</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yasavur</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Lisetti</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rishe</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Let’s talk! speaking virtual counselor offers you a brief intervention</article-title>
          <source>J Multimodal User Interfaces</source>
          <year>2014</year>
          <month>9</month>
          <day>5</day>
          <volume>8</volume>
          <issue>4</issue>
          <fpage>381</fpage>
          <lpage>398</lpage>
          <pub-id pub-id-type="doi">10.1007/s12193-014-0169-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Provoost</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Ruwaard</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Riper</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Embodied conversational agents in clinical psychology: a scoping review</article-title>
          <source>J Med Internet Res</source>
          <year>2017</year>
          <month>05</month>
          <day>09</day>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>e151</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2017/5/e151/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.6553</pub-id>
          <pub-id pub-id-type="medline">28487267</pub-id>
          <pub-id pub-id-type="pii">v19i5e151</pub-id>
          <pub-id pub-id-type="pmcid">PMC5442350</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gerten</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fast</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Pelachaud</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>André</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chollet</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Karpouzis</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Pelé</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Creating Rapport with Virtual Agents</article-title>
          <source>Intelligent Virtual Agents. IVA 2007. Lecture Notes in Computer Science, vol 4722</source>
          <year>2007</year>
          <publisher-loc>Berlin, Heidelberg</publisher-loc>
          <publisher-name>Springer Publishing Company</publisher-name>
          <fpage>125</fpage>
          <lpage>138</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Okhmatovskaia</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lamothe</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Marsella</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Morales</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>van der Werf</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>LP</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Young</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aylett</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ballin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Olivier</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Virtual Rapport</article-title>
          <source>Intelligent Virtual Agents. IVA 2006. Lecture Notes in Computer Science, vol 4133</source>
          <year>2006</year>
          <publisher-loc>Berlin, Heidelberg</publisher-loc>
          <publisher-name>Springer Publishing Company</publisher-name>
          <fpage>14</fpage>
          <lpage>27</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Watt</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Prendinger</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lester</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ishizuka</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Agreeable People Like Agreeable Virtual Humans</article-title>
          <source>Intelligent Virtual Agents. IVA 2008. Lecture Notes in Computer Science, vol 5208</source>
          <year>2008</year>
          <publisher-loc>Berlin, Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>253</fpage>
          <lpage>261</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Watt</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Does the contingency of agents' nonverbal feedback affect users' social anxiety?</article-title>
          <year>2008</year>
          <conf-name>7th international joint conference on Autonomous agents and multiagent systems</conf-name>
          <conf-date>May 12-16, 2008</conf-date>
          <conf-loc>Estoril, Portugal</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McQuiggan</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Lester</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Modeling and evaluating empathy in embodied companion agents</article-title>
          <source>International Journal of Human-Computer Studies</source>
          <year>2007</year>
          <month>4</month>
          <volume>65</volume>
          <issue>4</issue>
          <fpage>348</fpage>
          <lpage>360</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2006.11.015</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boukricha</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wachsmuth</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Empathy-based emotional alignment for a virtual human: a three-step approach</article-title>
          <source>Künstl Intell</source>
          <year>2011</year>
          <month>5</month>
          <day>19</day>
          <volume>25</volume>
          <issue>3</issue>
          <fpage>195</fpage>
          <lpage>204</lpage>
          <pub-id pub-id-type="doi">10.1007/s13218-011-0109-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boukricha</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wachsmuth</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hofstätter</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Grammer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Pleasure-arousal-dominance driven facial expression simulation</article-title>
          <year>2009</year>
          <conf-name>3rd International Conference on Affective Computing and Intelligent Interaction and Workshops</conf-name>
          <conf-date>September 10-12, 2009</conf-date>
          <conf-loc>Amsterdam, Netherlands</conf-loc>
          <pub-id pub-id-type="doi">10.1109/acii.2009.5349579</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boukricha</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Becker</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wachsmuth</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Simulating empathy for the virtual human max</article-title>
          <year>2007</year>
          <conf-name>2nd Workshop on Emotion and Computing - Current Research and Future Impact</conf-name>
          <conf-date>2007</conf-date>
          <conf-loc>Osnabrück, Germany</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pub.uni-bielefeld.de/record/2276595"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pelachaud</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Modelling multimodal expression of emotion in a virtual agent</article-title>
          <source>Philos Trans R Soc Lond B Biol Sci</source>
          <year>2009</year>
          <month>12</month>
          <day>12</day>
          <volume>364</volume>
          <issue>1535</issue>
          <fpage>3539</fpage>
          <lpage>48</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/19884148"/>
          </comment>
          <pub-id pub-id-type="doi">10.1098/rstb.2009.0186</pub-id>
          <pub-id pub-id-type="medline">19884148</pub-id>
          <pub-id pub-id-type="pii">364/1535/3539</pub-id>
          <pub-id pub-id-type="pmcid">PMC2781894</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pelachaud</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Studies on gesture expressivity for a virtual agent</article-title>
          <source>Speech Communication</source>
          <year>2009</year>
          <month>7</month>
          <volume>51</volume>
          <issue>7</issue>
          <fpage>630</fpage>
          <lpage>639</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.specom.2008.04.009"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.specom.2008.04.009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prendinger</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ishizuka</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The empathic companion: a character-based interface that addresses users' affective states</article-title>
          <source>Applied Artificial Intelligence</source>
          <year>2005</year>
          <month>03</month>
          <day>09</day>
          <volume>19</volume>
          <issue>3-4</issue>
          <fpage>267</fpage>
          <lpage>285</lpage>
          <pub-id pub-id-type="doi">10.1080/08839510590910174</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Von Der Pütten</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Krämer</surname>
              <given-names>NC</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Who's there? Can a Virtual Agent Really Elicit Social Presence?</article-title>
          <year>2009</year>
          <conf-name>12th Annual International Workshop on Presence</conf-name>
          <conf-date>November 11-13, 2009</conf-date>
          <conf-loc>Los Angeles, CA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>von den Putten</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Reipen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wiedmann</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kopp</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krämer</surname>
              <given-names>NC</given-names>
            </name>
          </person-group>
          <article-title>The Impact of Different Embodied Agent-Feedback on Users´ Behavior</article-title>
          <year>2009</year>
          <conf-name>9th International Conference on Intelligent Virtual Agents</conf-name>
          <conf-date>September 14-16, 2009</conf-date>
          <conf-loc>Amsterdam, Netherlands</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-642-04380-2_86</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Rapport and facial expression</article-title>
          <year>2009</year>
          <conf-name>3rd International Conference on Affective Computing and Intelligent Interaction and Workshops</conf-name>
          <conf-date>September 10-12, 2009</conf-date>
          <conf-loc>Amsterdam, Netherlands</conf-loc>
          <pub-id pub-id-type="doi">10.1109/acii.2009.5349514</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Giorgino</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Health dialog systems for patients and consumers</article-title>
          <source>J Biomed Inform</source>
          <year>2006</year>
          <month>10</month>
          <volume>39</volume>
          <issue>5</issue>
          <fpage>556</fpage>
          <lpage>71</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(05)00141-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2005.12.004</pub-id>
          <pub-id pub-id-type="medline">16464643</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(05)00141-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gruber</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Establishing the computer-patient working alliance in automated health behavior change interventions</article-title>
          <source>Patient Educ Couns</source>
          <year>2005</year>
          <month>10</month>
          <volume>59</volume>
          <issue>1</issue>
          <fpage>21</fpage>
          <lpage>30</lpage>
          <pub-id pub-id-type="doi">10.1016/j.pec.2004.09.008</pub-id>
          <pub-id pub-id-type="medline">16198215</pub-id>
          <pub-id pub-id-type="pii">S0738-3991(04)00307-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cassell</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Negotiated collusion: Modeling social language and its relationship effects in intelligent agents</article-title>
          <source>User Model User-Adap Inter</source>
          <year>2003</year>
          <volume>13</volume>
          <fpage>89</fpage>
          <lpage>132</lpage>
          <pub-id pub-id-type="doi">10.1023/A:1024026532471</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Klüwer</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>"I Like Your Shirt"-Dialogue Acts for Enabling Social Talk in Conversational Agents</article-title>
          <year>2011</year>
          <conf-name>International Workshop on Intelligent Virtual Agents</conf-name>
          <conf-date>September 15-17, 2011</conf-date>
          <conf-loc>Reykjavik, Iceland</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-642-23974-8_2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schulman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sidner</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>An Intelligent Conversational Agent for Promoting Long-Term Health Behavior Change Using Motivational Interviewing</article-title>
          <year>2011</year>
          <conf-name>AAAI Spring Symposium Series</conf-name>
          <conf-date>March 21-23, 2011</conf-date>
          <conf-loc>Palo Alto, CA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>TW</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Jack</surname>
              <given-names>BW</given-names>
            </name>
            <name name-style="western">
              <surname>Paasche-Orlow</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Pfeifer</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Odonnell</surname>
              <given-names>Julie</given-names>
            </name>
          </person-group>
          <article-title>Response to a relational agent by hospital patients with depressive symptoms</article-title>
          <source>Interact Comput</source>
          <year>2010</year>
          <month>07</month>
          <day>01</day>
          <volume>22</volume>
          <issue>4</issue>
          <fpage>289</fpage>
          <lpage>298</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/20628581"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.intcom.2009.12.001</pub-id>
          <pub-id pub-id-type="medline">20628581</pub-id>
          <pub-id pub-id-type="pmcid">PMC2901553</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>WL</given-names>
            </name>
            <name name-style="western">
              <surname>LaBore</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chui</surname>
              <given-names>YC</given-names>
            </name>
          </person-group>
          <article-title>A pedagogical agent for psychosocial intervention on a handheld computer</article-title>
          <year>2004</year>
          <conf-name>AAAI Fall Symposium on Dialogue Systems for Health Communication</conf-name>
          <conf-date>October 22-24, 2004</conf-date>
          <conf-loc>Arlington, VA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kimmel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Branas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ivins</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Weaver</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Modeling emotion and behavior in animated personas to facilitate human behavior change: the case of the HEART-SENSE game</article-title>
          <source>Health Care Manag Sci</source>
          <year>2001</year>
          <month>09</month>
          <volume>4</volume>
          <issue>3</issue>
          <fpage>213</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1023/a:1011448916375</pub-id>
          <pub-id pub-id-type="medline">11519847</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Friederichs</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bolman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Oenema</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Guyaux</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lechner</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Motivational interviewing in a Web-based physical activity intervention with an avatar: randomized controlled trial</article-title>
          <source>J Med Internet Res</source>
          <year>2014</year>
          <volume>16</volume>
          <issue>2</issue>
          <fpage>e48</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.jmir.org/2014/2/e48/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.2974</pub-id>
          <pub-id pub-id-type="medline">24550153</pub-id>
          <pub-id pub-id-type="pii">v16i2e48</pub-id>
          <pub-id pub-id-type="pmcid">PMC3936285</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Llorach</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Blat</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Say Hi to Eliza. An Embodied Conversational Agent on the Web</article-title>
          <year>2017</year>
          <conf-name>17th International Conference Intelligence Virtual Agents (IVA)</conf-name>
          <conf-date>August 27-30, 2017</conf-date>
          <conf-loc>Stockholm, Sweden</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-319-67401-8_34</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ramanarayanan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Tilsen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Proctor</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Töger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Goldstein</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nayak</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Narayanan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Analysis of speech production real-time MRI</article-title>
          <source>Computer Speech &#38; Language</source>
          <year>2018</year>
          <month>11</month>
          <volume>52</volume>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.csl.2018.04.002"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.csl.2018.04.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schroeder</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rowan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Toledo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Paradiso</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Czerwinski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mark</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Linehan</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Pocket Skills: A Conversational Mobile Web App To Support Dialectical Behavioral Therapy</article-title>
          <year>2018</year>
          <conf-name>Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 21-26, 2018</conf-date>
          <conf-loc>Montreal, Quebec, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3173574.3173972</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>FD</given-names>
            </name>
          </person-group>
          <article-title>Perceived usefulness, perceived ease of use, and user acceptance of information technology</article-title>
          <source>MIS Quarterly</source>
          <year>1989</year>
          <month>09</month>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>319</fpage>
          <lpage>340</lpage>
          <pub-id pub-id-type="doi">10.2307/249008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>FD</given-names>
            </name>
            <name name-style="western">
              <surname>Bagozzi</surname>
              <given-names>RP</given-names>
            </name>
            <name name-style="western">
              <surname>Warshaw</surname>
              <given-names>PR</given-names>
            </name>
          </person-group>
          <article-title>User Acceptance of Computer Technology: A Comparison of Two Theoretical Models</article-title>
          <source>Management Science</source>
          <year>1989</year>
          <month>08</month>
          <volume>35</volume>
          <issue>8</issue>
          <fpage>982</fpage>
          <lpage>1003</lpage>
          <pub-id pub-id-type="doi">10.1287/mnsc.35.8.982</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Polceanu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lisetti</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Time to Go ONLINE! A Modular Framework for Building Internet-based Socially Interactive Agents</article-title>
          <year>2019</year>
          <conf-name>19th ACM International Conference on Intelligent Virtual Agents</conf-name>
          <conf-date>July 2-5, 2019</conf-date>
          <conf-loc>Paris, France</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3308532.3329452</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hester</surname>
              <given-names>RK</given-names>
            </name>
            <name name-style="western">
              <surname>Squires</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Delaney</surname>
              <given-names>HD</given-names>
            </name>
          </person-group>
          <article-title>The Drinker's Check-up: 12-month outcomes of a controlled clinical trial of a stand-alone software program for problem drinkers</article-title>
          <source>J Subst Abuse Treat</source>
          <year>2005</year>
          <month>03</month>
          <volume>28</volume>
          <issue>2</issue>
          <fpage>159</fpage>
          <lpage>69</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsat.2004.12.002</pub-id>
          <pub-id pub-id-type="medline">15780546</pub-id>
          <pub-id pub-id-type="pii">S0740-5472(04)00156-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kilian Wells</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Moonie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pharr</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Association between alcohol screening and brief intervention during routine check-ups and alcohol consumption among adults living in California</article-title>
          <source>Arch Psychiatr Nurs</source>
          <year>2018</year>
          <month>12</month>
          <volume>32</volume>
          <issue>6</issue>
          <fpage>872</fpage>
          <lpage>877</lpage>
          <pub-id pub-id-type="doi">10.1016/j.apnu.2018.07.001</pub-id>
          <pub-id pub-id-type="medline">30454631</pub-id>
          <pub-id pub-id-type="pii">S0883-9417(17)30369-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Stringer</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>EH</given-names>
            </name>
          </person-group>
          <article-title>Stigma as a barrier to substance abuse treatment among those with unmet need: an analysis of parenthood and marital status</article-title>
          <source>Journal of Family Issues</source>
          <year>2015</year>
          <month>04</month>
          <day>24</day>
          <volume>39</volume>
          <issue>1</issue>
          <fpage>3</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.1177/0192513x15581659</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Quigley</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gerkey</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Conley</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Faust</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Foote</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Leibs</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Berger</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wheeler</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ROS: an open-source Robot Operating System</article-title>
          <year>2009</year>
          <conf-name>ICRA workshop on open source software</conf-name>
          <conf-date>May 12-17, 2009</conf-date>
          <conf-loc>Kobe, Japan</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Toms</surname>
              <given-names>EG</given-names>
            </name>
          </person-group>
          <article-title>What is user engagement? A conceptual framework for defining user engagement with technology</article-title>
          <source>J. Am. Soc. Inf. Sci</source>
          <year>2008</year>
          <month>04</month>
          <volume>59</volume>
          <issue>6</issue>
          <fpage>938</fpage>
          <lpage>955</lpage>
          <pub-id pub-id-type="doi">10.1002/asi.20801</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heerink</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kröse</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Evers</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Wielinga</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Assessing Acceptance of Assistive Social Agent Technology by Older Adults: the Almere Model</article-title>
          <source>Int J of Soc Robotics</source>
          <year>2010</year>
          <month>9</month>
          <day>4</day>
          <volume>2</volume>
          <issue>4</issue>
          <fpage>361</fpage>
          <lpage>375</lpage>
          <pub-id pub-id-type="doi">10.1007/s12369-010-0068-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bartneck</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kulić</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Croft</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zoghbi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Measurement Instruments for the Anthropomorphism, Animacy, Likeability, Perceived Intelligence, and Perceived Safety of Robots</article-title>
          <source>Int J of Soc Robotics</source>
          <year>2008</year>
          <month>11</month>
          <day>20</day>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>71</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1007/s12369-008-0001-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lucas</surname>
              <given-names>GM</given-names>
            </name>
            <name name-style="western">
              <surname>Rizzo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stratou</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Boberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers</article-title>
          <source>Front. Robot. AI</source>
          <year>2017</year>
          <month>10</month>
          <day>12</day>
          <volume>4</volume>
          <fpage>53</fpage>
          <lpage>79</lpage>
          <pub-id pub-id-type="doi">10.3389/frobt.2017.00051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lucas</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gratch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>It’s only a computer: Virtual humans increase willingness to disclose</article-title>
          <source>Computers in Human Behavior</source>
          <year>2014</year>
          <month>08</month>
          <volume>37</volume>
          <fpage>94</fpage>
          <lpage>100</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.chb.2014.04.043"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.chb.2014.04.043</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boustani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lunn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Polcneau</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lisetti</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Increasing consumer engagement with computer-delivered behavioral interventions: Incorporating verbal reflections and non-verbal gestures to a digital health agent</article-title>
          <year>2019</year>
          <conf-name>53rd Annual Convention of the Association of Behavioral and Cognitive Therapies (ABCT)</conf-name>
          <conf-date>November 21-24, 2019</conf-date>
          <conf-loc>Atlanta, GA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Buche</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Singer</surname>
              <given-names>PM</given-names>
            </name>
          </person-group>
          <article-title>Factors impacting the development of a diverse behavioral health workforce</article-title>
          <source>University of Michigan School of Public Health Behavioral Health Workforce Research Center</source>
          <year>2017</year>
          <access-date>2021-08-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mha.ohio.gov/Portals/0/assets/HealthProfessionals/Training%20and%20Workforce%20Development/CareerPathways/Factors_Impacting_%20Behavioral_Health_Workforce-Diversity.pdf?ver=2019-03-13-102735-210">https://mha.ohio.gov/Portals/0/assets/HealthProfessionals/Training%20and%20Workforce%20Development/CareerPathways/Factors_Impacting_%20Behavioral_Health_Workforce-Diversity.pdf?ver=2019-03-13-102735-210</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>FF</given-names>
            </name>
            <name name-style="western">
              <surname>West</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wilk</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Narrow</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hales</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Mental health practitioners and trainees</article-title>
          <source>Medicine</source>
          <year>2002</year>
          <fpage>327</fpage>
          <lpage>368</lpage>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
