<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v24i3e29506</article-id>
      <article-id pub-id-type="pmid">35254278</article-id>
      <article-id pub-id-type="doi">10.2196/29506</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>How Can Research on Artificial Empathy Be Enhanced by Applying Deepfakes?</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bidmon</surname>
            <given-names>Sonja</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hashim</surname>
            <given-names>Anis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Hsuan-Chia</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9198-0697</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Rahmanti</surname>
            <given-names>Annisa Ristya</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9478-6267</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Huang</surname>
            <given-names>Chih-Wei</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2551-6199</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Yu-Chuan Jack</given-names>
          </name>
          <degrees>MD, PhD, FACMI</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Graduate Institute of Biomedical Informatics</institution>
            <institution>College of Medical Science and Technology</institution>
            <institution>Taipei Medical University</institution>
            <addr-line>No 172-1, Sec 2 Keelung Rd</addr-line>
            <addr-line>Taipei, 106</addr-line>
            <country>Taiwan</country>
            <fax>886 2 6638 0233</fax>
            <phone>886 966 546 813</phone>
            <email>jack@tmu.edu.tw</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff6" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6497-4232</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Graduate Institute of Biomedical Informatics</institution>
        <institution>College of Medical Science and Technology</institution>
        <institution>Taipei Medical University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>International Center for Health Information Technology</institution>
        <institution>Taipei Medical University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Research Center of Big Data and Meta-analysis</institution>
        <institution>Wan Fang Hospital</institution>
        <institution>Taipei Medical University</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Clinical Big Data Research Center</institution>
        <institution>Taipei Medical University Hospital</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Health Policy Management</institution>
        <institution>Faculty of Medicine, Public Health, and Nursing</institution>
        <institution>Universitas Gadjah Mada</institution>
        <addr-line>Yogyakarta</addr-line>
        <country>Indonesia</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Department of Dermatology</institution>
        <institution>Wanfang Hospital</institution>
        <addr-line>Taipei</addr-line>
        <country>Taiwan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Yu-Chuan Jack Li <email>jack@tmu.edu.tw</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>3</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>4</day>
        <month>3</month>
        <year>2022</year>
      </pub-date>
      <volume>24</volume>
      <issue>3</issue>
      <elocation-id>e29506</elocation-id>
      <history>
        <date date-type="received">
          <day>13</day>
          <month>4</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>11</day>
          <month>8</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>6</day>
          <month>12</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>28</day>
          <month>12</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Hsuan-Chia Yang, Annisa Ristya Rahmanti, Chih-Wei Huang, Yu-Chuan Jack Li. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 04.03.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2022/3/e29506" xlink:type="simple"/>
      <abstract>
        <p>We propose the idea of using an open data set of doctor-patient interactions to develop artificial empathy based on facial emotion recognition. Facial emotion recognition allows a doctor to analyze patients' emotions, so that they can reach out to their patients through empathic care. However, face recognition data sets are often difficult to acquire; many researchers struggle with small samples of face recognition data sets. Further, sharing medical images or videos has not been possible, as this approach may violate patient privacy. The use of deepfake technology is a promising approach to deidentifying video recordings of patients’ clinical encounters. Such technology can revolutionize the implementation of facial emotion recognition by replacing a patient's face in an image or video with an unrecognizable face—one with a facial expression that is similar to that of the original. This technology will further enhance the potential use of artificial empathy in helping doctors provide empathic care to achieve good doctor-patient therapeutic relationships, and this may result in better patient satisfaction and adherence to treatment.</p>
      </abstract>
      <kwd-group>
        <kwd>artificial empathy</kwd>
        <kwd>deepfakes</kwd>
        <kwd>doctor-patient relationship</kwd>
        <kwd>face emotion recognition</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>facial recognition</kwd>
        <kwd>facial emotion recognition</kwd>
        <kwd>medical images</kwd>
        <kwd>patient</kwd>
        <kwd>physician</kwd>
        <kwd>therapy</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Good doctor-patient communication is one of the key requirements of building a successful, therapeutic doctor-patient relationship [<xref ref-type="bibr" rid="ref1">1</xref>]. This type of communication enables physicians to provide better-quality care that may impact patients’ health. Studies on good doctor-patient communication have demonstrated a strong positive correlation between physician communication skills and patient satisfaction, which is likely associated with patients’ adherence to treatment; their experience of care; and, consequently, improved clinical outcomes [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref5">5</xref>].</p>
      <p>We acknowledge the importance of good doctor-patient communication; doctors must understand patients’ perspectives through verbal conversation and nonverbal behaviors (eg, posture, gesture, eye contact, facial expression, etc) [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. Establishing communication involving nonverbal messages is very important in building a good doctor-patient relationship because such communication conveys more expressive and meaningful messages than those conveyed in a verbal conversation [<xref ref-type="bibr" rid="ref8">8</xref>]. One research study indicates that nonverbal messages contribute to up to 90% of messages delivered in human interactions [<xref ref-type="bibr" rid="ref6">6</xref>]. Another study also estimates that more than half of outpatient clinic patients believe that establishing positive nonverbal behaviors indicates that a doctor is more attentive toward their patient and thus results in better patient satisfaction and adherence to treatment [<xref ref-type="bibr" rid="ref8">8</xref>].</p>
      <p>Although several studies have reported that human nonverbal behaviors are significantly associated with patient satisfaction and compliance to a treatment plan, physicians are often clueless about nonverbal messages [<xref ref-type="bibr" rid="ref6">6</xref>]. Doctors should be more aware of their nonverbal behaviors because patients are cognizant of them. Doctors also need to recognize and evaluate patients’ nonverbal behaviors and their own nonverbal behaviors toward patients.</p>
      <p>Artificial intelligence (AI) offers great potential for exploring nonverbal communication in doctor-patient encounters [<xref ref-type="bibr" rid="ref9">9</xref>]. For example, AI may help a doctor become more empathic by analyzing human facial expressions through emotion recognition. Once an emotionally intelligent AI identifies an emotion, it can guide a doctor to express more empathy based on each patient’s unique emotional needs [<xref ref-type="bibr" rid="ref10">10</xref>].</p>
      <p>Empathy refers to the ability to understand or feel what another person is experiencing, and showing empathy may lead to better behavioral outcomes [<xref ref-type="bibr" rid="ref9">9</xref>]. Empathy can be learned, and the use of AI technology introduces a promising approach to incorporating artificial empathy in the doctor-patient therapeutic relationship [<xref ref-type="bibr" rid="ref11">11</xref>]. However, human emotions are very complex. An emotionally intelligent AI should learn a range of emotions (ie, those that patients experience) from facial expressions, voices, and physiological signals to empathize with human emotions [<xref ref-type="bibr" rid="ref12">12</xref>]. These emotions can be captured by using various modalities, such as video, audio, text, and physiological signals [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
      <p>Among all forms of human communication channels, facial expressions are recognized as the most essential and influential [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. The human face can express various thoughts, emotions, and behaviors [<xref ref-type="bibr" rid="ref15">15</xref>]. It can convey important aspects in human interpersonal communication and nonverbal expressions in social interactions [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Compared to the amount of information that can be conveyed via emotion recognition technology, facial expressions convey 55% of the emotional expression transmitted in multimodal human interactions, whereas verbal information, text communication, and communication via physiological signals only convey 20%, 15%, and 10% of the total information in interactions, respectively [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
      <p>Many researchers have been studying facial expressions by using automatic facial emotion recognition (FER) to gain a better understanding of the human emotions linked with empathy [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref24">24</xref>]. They have proposed various machine learning algorithms, such as support vector machines, Bayesian belief networks, and neural network models, for recognizing and describing emotions based on observed facial expressions recorded on images or videos [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. Although mounting literature has been introduced on machine learning and deep learning for automatically extracting emotions from the human face, developing a highly accurate FER system requires a lot of training data and a high-quality computational system [<xref ref-type="bibr" rid="ref21">21</xref>]. In addition, the data set must include diverse facial views in terms of angles, frame rates, races, and genders, among others [<xref ref-type="bibr" rid="ref21">21</xref>].</p>
      <p>Many public data sets are available for FER [<xref ref-type="bibr" rid="ref25">25</xref>]. However, most public data sets are not sufficient for supporting doctor-patient interactions. Creating our own medical data sets is also not possible, since this process is expensive and time consuming [<xref ref-type="bibr" rid="ref26">26</xref>]. Moreover, researchers often struggle with acquiring sufficient data for training a face recognition model due to privacy concerns. Data sharing and the pooling of medical images or videos are not even possible, as these approaches may violate patient privacy. Herein, we present our study on the emerging AI is known as <italic>deepfakes</italic>—a technology that enables face deidentification for recorded videos of patients’ clinical encounters. This technology can revolutionize FER by replacing patients’ faces in images or videos with an unrecognizable face, thereby anonymizing patients. This could protect patients’ privacy when it comes to clinical encounter videos and allow medical video data sharing to become more feasible. Moreover, using an open clinical encounter video data set can also promote more in-depth research within the academic community. Thus, deepfake technology will further enhance the clinical application of artificial empathy for medical application purposes.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Human FER</title>
        <p>Human FER plays a significant role in understanding people's nonverbal ways of communicating with others [<xref ref-type="bibr" rid="ref19">19</xref>]. It has attracted the interest of scientific populations in various fields due to its superiority among other forms of emotion recognition [<xref ref-type="bibr" rid="ref22">22</xref>]. As it is not only limited to human-computer interactions or human-robot interactions, facial expression analysis has become a popular research topic in various health care areas, such as the diagnosis or assessment of cognitive impairment (eg, autism spectrum disorders in children), depression monitoring, pain monitoring in Parkinson Disease, and clinical communication in doctor-patient consultations [<xref ref-type="bibr" rid="ref27">27</xref>].</p>
        <p>The main objective of FER is to accurately classify various facial expressions according to a person’s emotional state [<xref ref-type="bibr" rid="ref21">21</xref>]. The classical FER approach is usually divided into the following three major stages: (1) facial feature detection, (2) feature extraction, and (3) emotion recognition [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. However, traditional FER has been reported to be unable to extract facial expressions in an uncontrolled environment with diverse facial views [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. On the other hand, a recent study using a deep learning–based FER approach has successfully achieved superior accuracy over that of traditional FER [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>].</p>
      </sec>
      <sec>
        <title>Deepfake Technology</title>
        <p>The rapid growth of computer vision and deep learning technology has driven the recently emerged phenomena of deepfakes (<italic>deep learning</italic> and <italic>fake</italic>), which can automatically forge images and videos that humans cannot easily recognize [<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. In addition, deepfake techniques offer the possibility of generating unrecognizable images of a person’s face and altering or swapping a person’s face in existing images and videos with another face that exhibits the same expressions as the original face [<xref ref-type="bibr" rid="ref29">29</xref>]. Various deepfake attempts have been used for negative purposes, such as creating controversial content related to celebrities, politicians, companies, and even individuals to damage their reputation [<xref ref-type="bibr" rid="ref30">30</xref>]. Although the harmful effects of deepfake technology have raised public concerns, there are also advantages to using this technology. For example, it can provide privacy protection in some critical medical applications, such as face deidentification for patients [<xref ref-type="bibr" rid="ref32">32</xref>]. Further, although deepfake technology can easily manipulate the low-level semantics of visual and audio features, a recent study suggested that it might be difficult for deepfake technology to forge the high-level semantic features of human emotions [<xref ref-type="bibr" rid="ref31">31</xref>].</p>
        <p>Deepfake technology is mainly developed by using deep learning—an AI-based method that can be used to train deep networks [<xref ref-type="bibr" rid="ref29">29</xref>]. The popular approach to implementing deepfake techniques is based on the generative adversarial network (GAN) model [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. There are several types and examples of deepfakes, such as photo deepfakes, audio deepfakes, video deepfakes, and audio-video deepfakes.</p>
      </sec>
      <sec>
        <title>Data Set</title>
        <p>To simulate how deepfake technology enables face deidentification for recorded videos of doctor-patient clinical encounters, we recruited 348 adult patients and 4 doctors from Taipei Municipal Wanfang Hospital and Taipei Medical University Hospital from March to December 2019. After excluding video data from 21 patients due to video damage, we collected video data from 327 patients. The data set focused on the interactions between doctors and patients in dermatology outpatient clinics. The subjects in the data set are all from the Taiwanese population.</p>
      </sec>
      <sec>
        <title>The FER System in the Deepfake Model Setup</title>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref> illustrates the workflow of the FER system before and after proposing deepfake technology. First, we created synchronized recordings by using 2 cameras to capture doctor-patient interactions in the dermatology outpatient clinic. We assumed that the face was the most relevant and accessible channel for nonverbal communication in health care [<xref ref-type="bibr" rid="ref6">6</xref>]. Therefore, we then used a facial expression recognition system developed by the Industrial Technology Research Institute to detect emotions and analyze the emotional changes of the doctors and patients across time. This facial expression recognition system has been deployed using training data from 28,710 Asian face images and has an accuracy of 95% for the extended Cohn-Kanade data set [<xref ref-type="bibr" rid="ref35">35</xref>].</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>The facial emotion recognition system workflow. ITRI: Industrial Technology Research Institute.</p>
          </caption>
          <graphic xlink:href="jmir_v24i3e29506_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We identified facial expressions by using the main points of an individual’s face (eg, eyes, eyebrows, the tip of the nose, lip corners, etc) to track facial movement. This allowed us to observe the emotional experiences of the doctors and patients when they expressed the following seven facial expressions: anger, disgust, fear, neutral, happiness, sadness, and surprise. The system then provided a summary of the emotional changes of both the doctors and the patients with a temporal resolution of up to 1 second. Additionally, our model managed to filter out any irrelevant face targets (ie, faces other than those of the doctors and patients). Finally, the summary results of the doctor and patient emotion analyses were used as a reference data set to develop artificial empathy. The system then created recommendations, so that doctors could provide an immediate response based on patients’ situations.</p>
        <p>It should be noted however that our artificial empathy reference data training set was built by using limited face recognition data sets. Therefore, we tried to improve the model by proposing the use of open data from a clinical encounter video manipulated by deepfake technology, which can enable medical data sharing without violating patient privacy. Furthermore, these open data allowed us to connect with real-world clinical encounter video data sets, so that we could use different model facial expression recognition systems to analyze patients’ and doctors’ emotional experiences (<xref rid="figure1" ref-type="fig">Figure 1</xref>).</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>Our study was approved by Taipei Medical University (TMU)-Joint Institutional Review Board (TMU-JIRB No: N201810020).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>The clinical encounter video—the source of our face recognition data set—consists of video data from 327 patients—208 female patients and 119 male patients (age: mean 51, SD 19.06 years). The average consultation time on the recorded video was 4.61 (SD 3.04) minutes; the longest duration of a consultation was 25.55 minutes, and the shortest was 0.33 minutes. Our artificial empathy algorithm was developed by using FER algorithms. This algorithm learned a range of patient emotions by analyzing expressions, so that doctors could provide an immediate response based on patients’ emotional experiences. In general, this FER system achieved a mean detection rate of &#62;80% on real-world data.</p>
      <p>Our face recognition data set for artificial empathy was solely based on basic emotions. The system evaluation reported expressions of anger, happiness, disgust, and sadness, which were more likely to be expressed by the doctors than by the patients (<italic>P</italic>&#60;.001). Moreover, patients also tended to more commonly express neutral emotions and surprise when compared to doctors (<italic>P</italic>&#60;.001). The overall emotions of the doctors were dominated by emotions of sadness (expressions: 8580/17,397, 49.3%), happiness (expressions: 7541/17,397, 43.3%), anger (expressions: 629/17,397, 3.6%), surprise (expressions: 436/17,397, 2.5%), and disgust (expressions: 201/17,397, 1.2%), whereas the emotions of patients consisted of happiness (expressions: 5766/12,606, 45.7%), sadness (expressions: 5773/12,606, 45.8%), surprise (expressions: 890/12,606, 7.1%), and anger (expressions: 126/12,606, 0.9%). <xref rid="figure2" ref-type="fig">Figure 2</xref> illustrates the emotional expressions of both doctors and patients. The system used the results of the emotion analysis to remind the doctors to change their behaviors according to patients’ situations, so that the patients felt like the doctors understood their emotions and situations.</p>
      <p>The original face recognition data set consists of personal data (ie, patients’ faces). However, we can only release the results of the emotional expression analysis as a reference for the development of artificial empathy. As noted previously, our approach only involved using a small amount of training data (only Asian face images). Therefore, to improve model performance, we need to anonymize the clinical interaction video by performing face deidentification. Face deidentification allows us to share our face recognition data set as open data for clinical research. To enable face image data sharing, a researcher can perform traditional face deidentification techniques, such as masking an image by covering a patient’s face region with a colored box (<xref rid="figure3" ref-type="fig">Figure 3</xref>).</p>
      <p>Of note however, as our research aims to develop artificial empathy to support good doctor-patient relationships, the masking method cannot be performed, as it is very difficult to validate masked images with the results of an emotion expression analysis. Deepfake technology offers a method for swapping a patient's original face with another from an open-source face data set to generate an unrecognizable image with similar expressions and attributes to those of the original face image. This face swapping method can be adopted for use with the face recognition reference data set for our artificial empathy algorithm to avoid violating patient privacy and ethical concerns. We adopted video deepfake technology based on face swapping (<xref rid="figure3" ref-type="fig">Figure 3</xref>), which was proposed in the first order motion model for image animation [<xref ref-type="bibr" rid="ref36">36</xref>]. This approach involved adopting a novel deep learning framework for image animation known as <italic>Monkey-Net</italic> and modifying it by using a set of self-learned key points combined with local affine transformations [<xref ref-type="bibr" rid="ref36">36</xref>]. This framework enables a dense motion transfer network to generate a video in which the source image is animated according to a given driving video sequence with complex motions [<xref ref-type="bibr" rid="ref36">36</xref>]. Unlike the original GAN model, which relied on costly ground-truth pretrained models that resulted in the poor generation quality of image or video outputs, the first order motion model for image animation can handle high-resolution data sets with profile images and can thus become a reference benchmark model for our face recognition data set.</p>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Screenshots of the recorded video simulation of the doctor-patient relationship in the dermatology outpatient clinic.</p>
        </caption>
        <graphic xlink:href="jmir_v24i3e29506_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure3" position="float">
        <label>Figure 3</label>
        <caption>
          <p>Comparison between traditional face deidentification and face swapping by using deepfake technology on an image of a patient's face.</p>
        </caption>
        <graphic xlink:href="jmir_v24i3e29506_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>Our FER study revealed how doctors more commonly express emotions like anger, happiness, disgust, and sadness than patients. Because nonverbal messages like facial expressions contribute the most to the messages delivered in human interactions, doctors need to be more careful when expressing their emotions during clinical interactions. For example, doctors should never be used to expressing anger, disgust, or other negative emotions that represent poor communication skills, as this may ruin treatment goals and result in frustration for both patients and health care practitioners [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
        <p>Positive emotions (eg, happiness) represent good communication skills, as they may help people understand how another person feels and what they think and allow people to understand each other better [<xref ref-type="bibr" rid="ref37">37</xref>]. Furthermore, positive emotions can help build patients' trust in their doctors [<xref ref-type="bibr" rid="ref38">38</xref>]. Trust from a patient’s perspective refers to the acceptance of a vulnerable situation in which patients believe that doctors will provide adequate and fair medical care to help them based on their needs [<xref ref-type="bibr" rid="ref39">39</xref>]. When patients trust their doctors, they are more likely to share valid and reliable information related to their condition, acknowledge health problems more readily, comprehend medical information efficiently, and comply with treatment plans accordingly [<xref ref-type="bibr" rid="ref39">39</xref>]. They also tend to seek preventive care earlier and return for follow-up care, which may prevent further disease complications [<xref ref-type="bibr" rid="ref39">39</xref>].</p>
        <p>In addition to physicians’ medical knowledge and clinical skills, patients’ perceptions of physicians’ ability to provide adequate information, actively listen, and empathize are believed to be associated with patient satisfaction and trust [<xref ref-type="bibr" rid="ref3">3</xref>]. A physician's capability to exhibit effective communication skills and provide empathic care is beneficial for patients in terms of improving good doctor-patient relationships and for the physicians themselves, as these factors can increase job performance satisfaction and lower the risk of stress and physical burnout among physicians [<xref ref-type="bibr" rid="ref40">40</xref>]. Empathic care may also reduce the rate of medical errors and help to avoid conflict with patients [<xref ref-type="bibr" rid="ref38">38</xref>].</p>
        <p>We believe that our FER system and face recognition data set can serve as a decision support system that can guide doctors when a patient requires special attention for achieving therapeutic goals. For example, if doctors express a negative facial expression (eg, anger, disgust, and sadness), the system will remind them to change their facial expressions. Moreover, if a patient also expresses a negative facial expression, the system will suggest that the doctor should use a different approach to accommodate the patient’s emotional situation. Based on our results, the major shortcoming that we need to address is that FER technology relies on the quality of data training and the quantity of training data [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. We believe that in the future, we can improve the system’s precision and accuracy by collecting more data from more subjects with various sociodemographic backgrounds. This is only possible if we adopt deepfake technology (eg, GANs), which can learn the facial features of a human face on images and videos and replace it with another person's face [<xref ref-type="bibr" rid="ref41">41</xref>]. Thus, deepfake technology can replace a patient’s face image and create fake face images with similar facial expressions in videos. With the use of deepfake technology, the recorded video database of outpatient doctor-patient interactions will become more accessible. Applying deepfakes to deidentify FER data sets may benefit the development of artificial empathy, as this approach may not violate the privacy and security of interpersonal situations.</p>
        <p>Similar to our study, a recent study reported using deepfake technology to generate open-source, high-quality medical video data sets of Parkinson disease examination videos to deidentify subjects [<xref ref-type="bibr" rid="ref32">32</xref>]. This study also applied the face swapping technique and real-time multi-person system to detect human motion key points based on open-source videos from the Deep Fake Detection data set [<xref ref-type="bibr" rid="ref32">32</xref>]. Meanwhile, our approach involved using a self-supervised formulation consisting of self-learned key points combined with local affine transformations [<xref ref-type="bibr" rid="ref36">36</xref>]. We believe that this self-learned model could preserve the represented emotional states of people in the original face recognition data set.</p>
        <p>Our study has some limitations. First, our approach only involved using a single information modality—video deepfakes—which could have resulted in inaccurate emotion classification. In the future, we can combine both video and audio deepfakes to better represent the emotional states of a target person. Second, moral and ethical concerns need to be considered when using deepfake technology for the deidentification of medical data sets. However, our study highlighted the positive ways of using deepfakes for privacy protection when using face recognition data sets in medical settings. Thus, instead of raising an ethical problem, this study will help prevent the use of deepfakes for malicious purposes and encourage their use in medical applications.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>We propose using an open data set of clinical encounter videos as a reference data training set to develop artificial empathy based on an FER system, given that FER technologies rely on extensive data training. Yet, due to privacy concerns, it has always been difficult for researchers to acquire a face recognition data set. Therefore, we suggest the adoption of deepfakes. Deepfake technology can deidentify faces in images or videos and manipulate them so that the proper target face becomes unrecognizable, thereby preventing the violation of patient privacy. Such technology can also generate the same facial expressions as those in the original image or video. Therefore, this technology might promote medical video data sharing, improve the implementation of FER systems in clinical settings, and protect sensitive data. Furthermore, deepfake technology will further enhance the potential use of artificial empathy in helping doctors provide empathic care based on patients’ emotional experiences to achieve a good doctor-patient therapeutic relationship.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">FER</term>
          <def>
            <p>facial emotion recognition</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">GAN</term>
          <def>
            <p>generative adversarial network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research is funded by the Ministry of Science and Technology (grants MOST 110-2320-B-038-029-MY3, 110-2221-E-038-002 -MY2, and 110-2622-E-038 -003-CC1).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ha</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Longnecker</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Doctor-patient communication: a review</article-title>
          <source>Ochsner J</source>
          <year>2010</year>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>38</fpage>
          <lpage>43</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21603354"/>
          </comment>
          <pub-id pub-id-type="medline">21603354</pub-id>
          <pub-id pub-id-type="pmcid">PMC3096184</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zachariae</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pedersen</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Jensen</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Ehrnrooth</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Rossen</surname>
              <given-names>PB</given-names>
            </name>
            <name name-style="western">
              <surname>von der Maase</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Association of perceived physician communication style with patient satisfaction, distress, cancer-related self-efficacy, and perceived control over the disease</article-title>
          <source>Br J Cancer</source>
          <year>2003</year>
          <month>03</month>
          <day>10</day>
          <volume>88</volume>
          <issue>5</issue>
          <fpage>658</fpage>
          <lpage>665</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/12618870"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/sj.bjc.6600798</pub-id>
          <pub-id pub-id-type="medline">12618870</pub-id>
          <pub-id pub-id-type="pii">6600798</pub-id>
          <pub-id pub-id-type="pmcid">PMC2376357</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sullivan</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Stein</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Savetsky</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Samet</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>The doctor-patient relationship and HIV-infected patients' satisfaction with primary care physicians</article-title>
          <source>J Gen Intern Med</source>
          <year>2000</year>
          <month>07</month>
          <volume>15</volume>
          <issue>7</issue>
          <fpage>462</fpage>
          <lpage>469</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/2p8dw9fk"/>
          </comment>
          <pub-id pub-id-type="doi">10.1046/j.1525-1497.2000.03359.x</pub-id>
          <pub-id pub-id-type="medline">10940132</pub-id>
          <pub-id pub-id-type="pii">jgi03359</pub-id>
          <pub-id pub-id-type="pmcid">PMC1495486</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Renzi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Abeni</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Picardi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Agostini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Melchi</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Pasquini</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Puddu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Braga</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Factors associated with patient satisfaction with care among dermatological outpatients</article-title>
          <source>Br J Dermatol</source>
          <year>2001</year>
          <month>10</month>
          <volume>145</volume>
          <issue>4</issue>
          <fpage>617</fpage>
          <lpage>623</lpage>
          <pub-id pub-id-type="doi">10.1046/j.1365-2133.2001.04445.x</pub-id>
          <pub-id pub-id-type="medline">11703289</pub-id>
          <pub-id pub-id-type="pii">4445</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cánovas</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Carrascosa</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>García</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fernández</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Calvo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Monsalve</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Soriano</surname>
              <given-names>JF</given-names>
            </name>
            <collab>Empathy Study Group</collab>
          </person-group>
          <article-title>Impact of empathy in the patient-doctor relationship on chronic pain relief and quality of life: A prospective study in Spanish pain clinics</article-title>
          <source>Pain Med</source>
          <year>2018</year>
          <month>07</month>
          <day>01</day>
          <volume>19</volume>
          <issue>7</issue>
          <fpage>1304</fpage>
          <lpage>1314</lpage>
          <pub-id pub-id-type="doi">10.1093/pm/pnx160</pub-id>
          <pub-id pub-id-type="medline">29016846</pub-id>
          <pub-id pub-id-type="pii">3964520</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ranjan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kumari</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chakrawarty</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>How can doctors improve their communication skills?</article-title>
          <source>J Clin Diagn Res</source>
          <year>2015</year>
          <month>03</month>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>JE01</fpage>
          <lpage>JE04</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25954636"/>
          </comment>
          <pub-id pub-id-type="doi">10.7860/JCDR/2015/12072.5712</pub-id>
          <pub-id pub-id-type="medline">25954636</pub-id>
          <pub-id pub-id-type="pmcid">PMC4413084</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Butow</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hoque</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Using artificial intelligence to analyse and teach communication in healthcare</article-title>
          <source>Breast</source>
          <year>2020</year>
          <month>04</month>
          <volume>50</volume>
          <fpage>49</fpage>
          <lpage>55</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0960-9776(20)30009-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.breast.2020.01.008</pub-id>
          <pub-id pub-id-type="medline">32007704</pub-id>
          <pub-id pub-id-type="pii">S0960-9776(20)30009-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7375542</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>FH</given-names>
            </name>
            <name name-style="western">
              <surname>Hanif</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tabassum</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qidwai</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Nanji</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Patient attitudes towards physician nonverbal behaviors during consultancy: Result from a developing country</article-title>
          <source>ISRN Family Med</source>
          <year>2014</year>
          <month>02</month>
          <day>04</day>
          <volume>2014</volume>
          <fpage>473654</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/24977140"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2014/473654</pub-id>
          <pub-id pub-id-type="medline">24977140</pub-id>
          <pub-id pub-id-type="pmcid">PMC4041264</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cuff</surname>
              <given-names>BMP</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Howat</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>Empathy: A review of the concept</article-title>
          <source>Emot Rev</source>
          <year>2014</year>
          <month>12</month>
          <day>01</day>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>144</fpage>
          <lpage>153</lpage>
          <pub-id pub-id-type="doi">10.1177/1754073914558466</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aminololama-Shakeri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>López</surname>
              <given-names>JE</given-names>
            </name>
          </person-group>
          <article-title>The doctor-patient relationship with artificial intelligence</article-title>
          <source>AJR Am J Roentgenol</source>
          <year>2019</year>
          <month>02</month>
          <volume>212</volume>
          <issue>2</issue>
          <fpage>308</fpage>
          <lpage>310</lpage>
          <pub-id pub-id-type="doi">10.2214/AJR.18.20509</pub-id>
          <pub-id pub-id-type="medline">30540210</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>YCJ</given-names>
            </name>
          </person-group>
          <article-title>How can artificial intelligence make medicine more preemptive?</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>08</month>
          <day>11</day>
          <volume>22</volume>
          <issue>8</issue>
          <fpage>e17211</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/8/e17211/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17211</pub-id>
          <pub-id pub-id-type="medline">32780024</pub-id>
          <pub-id pub-id-type="pii">v22i8e17211</pub-id>
          <pub-id pub-id-type="pmcid">PMC7448175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sebe</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Gevers</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>TS</given-names>
            </name>
          </person-group>
          <article-title>Multimodal approaches for emotion recognition: A survey</article-title>
          <year>2005</year>
          <month>01</month>
          <day>17</day>
          <conf-name>Electronic Imaging 2005</conf-name>
          <conf-date>January 16-20 2005</conf-date>
          <conf-loc>San Jose, California, United States</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.600746</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bänziger</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Grandjean</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Emotion recognition from expressions in face, voice, and body: the Multimodal Emotion Recognition Test (MERT)</article-title>
          <source>Emotion</source>
          <year>2009</year>
          <month>10</month>
          <volume>9</volume>
          <issue>5</issue>
          <fpage>691</fpage>
          <lpage>704</lpage>
          <pub-id pub-id-type="doi">10.1037/a0017088</pub-id>
          <pub-id pub-id-type="medline">19803591</pub-id>
          <pub-id pub-id-type="pii">2009-17981-010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lazzeri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mazzei</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Greco</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rotesi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lanatà</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>De Rossi</surname>
              <given-names>DE</given-names>
            </name>
          </person-group>
          <article-title>Can a humanoid face be expressive? A psychophysiological investigation</article-title>
          <source>Front Bioeng Biotechnol</source>
          <year>2015</year>
          <month>05</month>
          <day>26</day>
          <volume>3</volume>
          <fpage>64</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fbioe.2015.00064"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fbioe.2015.00064</pub-id>
          <pub-id pub-id-type="medline">26075199</pub-id>
          <pub-id pub-id-type="pmcid">PMC4443734</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frank</surname>
              <given-names>MG</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Smelser</surname>
              <given-names>NJ</given-names>
            </name>
            <name name-style="western">
              <surname>Baltes</surname>
              <given-names>PB</given-names>
            </name>
          </person-group>
          <article-title>Facial expressions</article-title>
          <source>International Encyclopedia of the Social &#38; Behavioral Sciences</source>
          <year>2001</year>
          <publisher-loc>Amsterdam, Netherlands</publisher-loc>
          <publisher-name>Elsevier</publisher-name>
          <fpage>5230</fpage>
          <lpage>5234</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mancini</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Biolcati</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Agnoli</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Andrei</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Trombini</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Recognition of facial emotional expressions among Italian pre-adolescents, and their affective reactions</article-title>
          <source>Front Psychol</source>
          <year>2018</year>
          <month>08</month>
          <day>03</day>
          <volume>9</volume>
          <fpage>1303</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2018.01303"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2018.01303</pub-id>
          <pub-id pub-id-type="medline">30123150</pub-id>
          <pub-id pub-id-type="pmcid">PMC6085998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jack</surname>
              <given-names>RE</given-names>
            </name>
            <name name-style="western">
              <surname>Schyns</surname>
              <given-names>PG</given-names>
            </name>
          </person-group>
          <article-title>The human face as a dynamic tool for social communication</article-title>
          <source>Curr Biol</source>
          <year>2015</year>
          <month>07</month>
          <day>20</day>
          <volume>25</volume>
          <issue>14</issue>
          <fpage>R621</fpage>
          <lpage>R634</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0960-9822(15)00655-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cub.2015.05.052</pub-id>
          <pub-id pub-id-type="medline">26196493</pub-id>
          <pub-id pub-id-type="pii">S0960-9822(15)00655-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frith</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Role of facial expressions in social interactions</article-title>
          <source>Philos Trans R Soc Lond B Biol Sci</source>
          <year>2009</year>
          <month>12</month>
          <day>12</day>
          <volume>364</volume>
          <issue>1535</issue>
          <fpage>3453</fpage>
          <lpage>3458</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/19884140"/>
          </comment>
          <pub-id pub-id-type="doi">10.1098/rstb.2009.0142</pub-id>
          <pub-id pub-id-type="medline">19884140</pub-id>
          <pub-id pub-id-type="pii">364/1535/3453</pub-id>
          <pub-id pub-id-type="pmcid">PMC2781887</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saxena</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khanna</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Emotion recognition and detection methods: A comprehensive survey</article-title>
          <source>Journal of Artificial Intelligence and Systems</source>
          <year>2020</year>
          <volume>2</volume>
          <fpage>53</fpage>
          <lpage>79</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://iecscience.org/uploads/jpapers/202003/dnQToaqdF8IRjhE62pfIovCkDJ2jXAcZdK6KHRzM.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.33969/ais.2020.21005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mehendale</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Facial emotion recognition using convolutional neural networks (FERC)</article-title>
          <source>SN Appl Sci</source>
          <year>2020</year>
          <month>02</month>
          <day>18</day>
          <volume>2</volume>
          <issue>446</issue>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1007/s42452-020-2234-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s42452-020-2234-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akhand</surname>
              <given-names>MAH</given-names>
            </name>
            <name name-style="western">
              <surname>Roy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Siddique</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kamal</surname>
              <given-names>MAS</given-names>
            </name>
            <name name-style="western">
              <surname>Shimamura</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Facial emotion recognition using transfer learning in the deep CNN</article-title>
          <source>Electronics (Basel)</source>
          <year>2021</year>
          <month>04</month>
          <day>27</day>
          <volume>10</volume>
          <issue>9</issue>
          <fpage>1036</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mdpi-res.com/d_attachment/electronics/electronics-10-01036/article_deploy/electronics-10-01036-v2.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/electronics10091036</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Facial expression emotion recognition model integrating philosophy and machine learning theory</article-title>
          <source>Front Psychol</source>
          <year>2021</year>
          <month>09</month>
          <day>27</day>
          <volume>12</volume>
          <fpage>759485</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2021.759485"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2021.759485</pub-id>
          <pub-id pub-id-type="medline">34646223</pub-id>
          <pub-id pub-id-type="pmcid">PMC8503687</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Uddin</surname>
              <given-names>MZ</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Deligiannidis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Arabnia</surname>
              <given-names>HR</given-names>
            </name>
          </person-group>
          <article-title>Chapter 26 - A local feature-based facial expression recognition system from depth video</article-title>
          <source>Emerging Trends in Image Processing, Computer Vision and Pattern Recognition</source>
          <year>2015</year>
          <publisher-loc>Burlington, Massachusetts</publisher-loc>
          <publisher-name>Morgan Kaufmann</publisher-name>
          <fpage>407</fpage>
          <lpage>419</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>John</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abhishek</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Ajayan</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sanoop</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>VR</given-names>
            </name>
          </person-group>
          <article-title>Real-time facial emotion recognition system with improved preprocessing and feature extraction</article-title>
          <year>2020</year>
          <month>10</month>
          <day>06</day>
          <conf-name>2020 Third International Conference on Smart Systems and Inventive Technology (ICSSIT)</conf-name>
          <conf-date>August 20-22, 2020</conf-date>
          <conf-loc>Tirunelveli, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icssit48917.2020.9214207</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Minaee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Minaei</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abdolrashidi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep-emotion: Facial expression recognition using attentional convolutional network</article-title>
          <source>Sensors (Basel)</source>
          <year>2021</year>
          <month>04</month>
          <day>27</day>
          <volume>21</volume>
          <issue>9</issue>
          <fpage>3046</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s21093046"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s21093046</pub-id>
          <pub-id pub-id-type="medline">33925371</pub-id>
          <pub-id pub-id-type="pii">s21093046</pub-id>
          <pub-id pub-id-type="pmcid">PMC8123912</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>HW</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>VD</given-names>
            </name>
            <name name-style="western">
              <surname>Vonikakis</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for emotion recognition on small datasets using transfer learning</article-title>
          <year>2015</year>
          <month>11</month>
          <conf-name>ICMI '15: International Conference on Multimodal Interaction</conf-name>
          <conf-date>November 9-13, 2015</conf-date>
          <conf-loc>Seattle, Washington, USA</conf-loc>
          <fpage>443</fpage>
          <lpage>449</lpage>
          <pub-id pub-id-type="doi">10.1145/2818346.2830593</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Carcagnì</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mazzeo</surname>
              <given-names>PL</given-names>
            </name>
            <name name-style="western">
              <surname>Spagnolo</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cazzato</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Distante</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Analysis of facial information for healthcare applications: A survey on computer vision-based approaches</article-title>
          <source>Information (Basel)</source>
          <year>2020</year>
          <month>02</month>
          <day>26</day>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>128</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mdpi-res.com/d_attachment/information/information-11-00128/article_deploy/information-11-00128-v2.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/info11030128</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Surcinelli</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Andrei</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Montebarocci</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Grandi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Emotion recognition of facial expressions presented in profile</article-title>
          <source>Psychol Rep</source>
          <year>2021</year>
          <month>05</month>
          <day>26</day>
          <fpage>332941211018403</fpage>
          <pub-id pub-id-type="doi">10.1177/00332941211018403</pub-id>
          <pub-id pub-id-type="medline">34039106</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>TT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>QVH</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Huynh-The</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nahavandi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>TT</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for deepfakes creation and detection: A survey</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on February 6, 2022
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/1909.11573.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guarnera</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Giudice</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Battiato</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>DeepFake detection by analyzing convolutional traces</article-title>
          <year>2020</year>
          <month>07</month>
          <day>28</day>
          <conf-name>2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</conf-name>
          <conf-date>June 14-19, 2020</conf-date>
          <conf-loc>Seattle, Washington, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvprw50498.2020.00341</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosler</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Salvi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Murray</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Antonacci</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Bestagini</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tubaro</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stamm</surname>
              <given-names>MC</given-names>
            </name>
          </person-group>
          <article-title>Do deepfakes feel emotions? A semantic approach to detecting deepfakes via emotional inconsistencies</article-title>
          <year>2021</year>
          <month>09</month>
          <day>01</day>
          <conf-name>2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</conf-name>
          <conf-date>June 19-25, 2021</conf-date>
          <conf-loc>Nashville, Tennessee, USA</conf-loc>
          <fpage>1013</fpage>
          <lpage>1022</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sui</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Deepfakes for medical video de-identification: Privacy protection and diagnostic information preservation</article-title>
          <year>2020</year>
          <month>02</month>
          <conf-name>AIES '20: AAAI/ACM Conference on AI, Ethics, and Society</conf-name>
          <conf-date>February 7-9, 2020</conf-date>
          <conf-loc>New York, New York, USA</conf-loc>
          <fpage>414</fpage>
          <lpage>420</lpage>
          <pub-id pub-id-type="doi">10.1145/3375627.3375849</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence, deepfakes and a future of ectypes</article-title>
          <source>Philos Technol</source>
          <year>2018</year>
          <month>8</month>
          <day>1</day>
          <volume>31</volume>
          <issue>3</issue>
          <fpage>317</fpage>
          <lpage>321</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-018-0325-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kietzmann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>LW</given-names>
            </name>
            <name name-style="western">
              <surname>McCarthy</surname>
              <given-names>IP</given-names>
            </name>
            <name name-style="western">
              <surname>Kietzmann</surname>
              <given-names>TC</given-names>
            </name>
          </person-group>
          <article-title>Deepfakes: Trick or treat?</article-title>
          <source>Bus Horiz</source>
          <year>2020</year>
          <volume>63</volume>
          <issue>2</issue>
          <fpage>135</fpage>
          <lpage>146</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bushor.2019.11.006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhizhong</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Meiying</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yiyu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jiahua</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ziyun</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Minghong</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yuxian</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Smart Monitoring Re-Upgraded--Introducing Facial Emotion Recognition [in Chinese]</article-title>
          <source>Computer and Communication</source>
          <year>2018</year>
          <month>10</month>
          <day>25</day>
          <issue>175</issue>
          <fpage>29</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.29917/CCLTJ</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siarohin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lathuilière</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tulyakov</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ricci</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sebe</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>First order motion model for image animation</article-title>
          <year>2019</year>
          <month>12</month>
          <conf-name>33rd International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 8-14, 2019</conf-date>
          <conf-loc>Vancouver, Canada</conf-loc>
          <fpage>7137</fpage>
          <lpage>7147</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Markides</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The importance of good communication between patient and health professionals</article-title>
          <source>J Pediatr Hematol Oncol</source>
          <year>2011</year>
          <month>10</month>
          <volume>33 Suppl 2</volume>
          <fpage>S123</fpage>
          <lpage>S125</lpage>
          <pub-id pub-id-type="doi">10.1097/MPH.0b013e318230e1e5</pub-id>
          <pub-id pub-id-type="medline">21952568</pub-id>
          <pub-id pub-id-type="pii">00043426-201110001-00011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kerasidou</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and the ongoing need for empathy, compassion and trust in healthcare</article-title>
          <source>Bull World Health Organ</source>
          <year>2020</year>
          <month>04</month>
          <day>01</day>
          <volume>98</volume>
          <issue>4</issue>
          <fpage>245</fpage>
          <lpage>250</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/32284647"/>
          </comment>
          <pub-id pub-id-type="doi">10.2471/BLT.19.237198</pub-id>
          <pub-id pub-id-type="medline">32284647</pub-id>
          <pub-id pub-id-type="pii">BLT.19.237198</pub-id>
          <pub-id pub-id-type="pmcid">PMC7133472</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thom</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Pawlson</surname>
              <given-names>LG</given-names>
            </name>
          </person-group>
          <article-title>Measuring patients' trust in physicians when assessing quality of care</article-title>
          <source>Health Aff (Millwood)</source>
          <year>2004</year>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>124</fpage>
          <lpage>132</lpage>
          <pub-id pub-id-type="doi">10.1377/hlthaff.23.4.124</pub-id>
          <pub-id pub-id-type="medline">15318572</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bogiatzaki</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Frengidou</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Savakis</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Trigoni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Galanis</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Anagnostopoulos</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Empathy and burnout of healthcare professionals in public hospitals of Greece</article-title>
          <source>Int J Caring Sci</source>
          <year>2019</year>
          <volume>12</volume>
          <issue>2</issue>
          <fpage>611</fpage>
          <lpage>626</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.internationaljournalofcaringsciences.org/docs/4_boyiatzaki_original_12_2.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>YU</given-names>
            </name>
            <name name-style="western">
              <surname>Yoo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>YH</given-names>
            </name>
            <name name-style="western">
              <surname>Shim</surname>
              <given-names>WH</given-names>
            </name>
          </person-group>
          <article-title>De-identification of facial features in magnetic resonance images: Software development using deep learning technology</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>12</month>
          <day>10</day>
          <volume>22</volume>
          <issue>12</issue>
          <fpage>e22739</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/12/e22739/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/22739</pub-id>
          <pub-id pub-id-type="medline">33208302</pub-id>
          <pub-id pub-id-type="pii">v22i12e22739</pub-id>
          <pub-id pub-id-type="pmcid">PMC7759440</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
