<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v22i12e22739</article-id>
      <article-id pub-id-type="pmid">33208302</article-id>
      <article-id pub-id-type="doi">10.2196/22739</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>De-Identification of Facial Features in Magnetic Resonance Images: Software Development Using Deep Learning Technology</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Kukafka</surname>
            <given-names>Rita</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Marcus</surname>
            <given-names>Daniel</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Phillips</surname>
            <given-names>Andelka</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>N B</surname>
            <given-names>Prakash</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Jeong</surname>
            <given-names>Yeon Uk</given-names>
          </name>
          <degrees>BSc, PharmD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8807-9347</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Yoo</surname>
            <given-names>Soyoung</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2953-508X</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Young-Hak</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3610-486X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Shim</surname>
            <given-names>Woo Hyun</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Medical Science</institution>
            <institution>Asan Medical Institute of Convergence Science and Technology, Asan Medical Center</institution>
            <institution>University of Ulsan College of Medicine</institution>
            <addr-line>88, Olympic-ro 43-Gil, Songpa-gu</addr-line>
            <addr-line>Seoul, 05505</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 3010 2775</phone>
            <email>swh@amc.seoul.kr</email>
          </address>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7251-2916</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Medical Science</institution>
        <institution>Asan Medical Institute of Convergence Science and Technology, Asan Medical Center</institution>
        <institution>University of Ulsan College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Human Research Protection Center</institution>
        <institution>Asan Institute of Life Sciences, Asan Medical Center</institution>
        <institution>University of Ulsan College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Division of Cardiology</institution>
        <institution>Department of Internal Medicine, Asan Medical Center</institution>
        <institution>University of Ulsan College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Information Medicine</institution>
        <institution>Asan Medical Center</institution>
        <institution>University of Ulsan College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Radiology</institution>
        <institution>Asan Medical Center</institution>
        <institution>University of Ulsan College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Woo Hyun Shim <email>swh@amc.seoul.kr</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>12</month>
        <year>2020</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>10</day>
        <month>12</month>
        <year>2020</year>
      </pub-date>
      <volume>22</volume>
      <issue>12</issue>
      <elocation-id>e22739</elocation-id>
      <history>
        <date date-type="received">
          <day>22</day>
          <month>7</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>19</day>
          <month>8</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>9</day>
          <month>9</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>12</day>
          <month>11</month>
          <year>2020</year>
        </date>
      </history>
      <copyright-statement>©Yeon Uk Jeong, Soyoung Yoo, Young-Hak Kim, Woo Hyun Shim. Originally published in the Journal of Medical Internet Research (http://www.jmir.org), 10.12.2020.</copyright-statement>
      <copyright-year>2020</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on http://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="http://www.jmir.org/2020/12/e22739/" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>High-resolution medical images that include facial regions can be used to recognize the subject’s face when reconstructing 3-dimensional (3D)-rendered images from 2-dimensional (2D) sequential images, which might constitute a risk of infringement of personal information when sharing data. According to the Health Insurance Portability and Accountability Act (HIPAA) privacy rules, full-face photographic images and any comparable image are direct identifiers and considered as protected health information. Moreover, the General Data Protection Regulation (GDPR) categorizes facial images as biometric data and stipulates that special restrictions should be placed on the processing of biometric data.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to develop software that can remove the header information from Digital Imaging and Communications in Medicine (DICOM) format files and facial features (eyes, nose, and ears) at the 2D sliced-image level to anonymize personal information in medical images.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A total of 240 cranial magnetic resonance (MR) images were used to train the deep learning model (144, 48, and 48 for the training, validation, and test sets, respectively, from the Alzheimer's Disease Neuroimaging Initiative [ADNI] database). To overcome the small sample size problem, we used a data augmentation technique to create 576 images per epoch. We used attention-gated U-net for the basic structure of our deep learning model. To validate the performance of the software, we adapted an external test set comprising 100 cranial MR images from the Open Access Series of Imaging Studies (OASIS) database.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The facial features (eyes, nose, and ears) were successfully detected and anonymized in both test sets (48 from ADNI and 100 from OASIS). Each result was manually validated in both the 2D image plane and the 3D-rendered images. Furthermore, the ADNI test set was verified using Microsoft Azure's face recognition artificial intelligence service. By adding a user interface, we developed and distributed (via GitHub) software named “Deface program” for medical images as an open-source project.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>We developed deep learning–based software for the anonymization of MR images that distorts the eyes, nose, and ears to prevent facial identification of the subject in reconstructed 3D images. It could be used to share medical big data for secondary research while making both data providers and recipients compliant with the relevant privacy regulations.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>de-identification</kwd>
        <kwd>privacy protection</kwd>
        <kwd>personal information protection</kwd>
        <kwd>medical image</kwd>
        <kwd>deep learning</kwd>
        <kwd>facial feature detection</kwd>
        <kwd>HIPAA</kwd>
        <kwd>GDPR</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>It is becoming important to handle and share big data in the health care field, and accordingly, there is a big trend to share and protect individual patient data for secondary research [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. To utilize big data, data anonymization is necessary so as not to violate laws for personal privacy such as those stipulated by the Health Insurance Portability and Accountability Act (HIPAA) in the United States and General Data Protection Regulation (GDPR) in Europe [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. There is a trade-off between data usability and privacy protection. Nevertheless, sufficient administrative and technical measures for previously collected information in accordance with personal information protection regulations are necessary when using the information secondarily without consent.</p>
      <p>High-resolution magnetic resonance (MR) images of the head risk exposing a subject’s face, which can be regarded at the level of photography by facial reconstruction [<xref ref-type="bibr" rid="ref6">6</xref>]. According to HIPAA's privacy rules, full-face photographic images and any comparable images are considered to be protected health information (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Budin et al [<xref ref-type="bibr" rid="ref7">7</xref>] tested human observer recognition of 3-dimensional (3D)-rendered MR images and reported that the likelihood of correctly matching a 3D-rendered face image with a portrait of that person is higher than random guessing. Additionally, anyone can reproduce the 3D facial image from head MR images through 3D volume rendering using freeware. Therefore, it is necessary to anonymize medical images that include the face.</p>
      <p>Facial image anonymization is not fully conducted in public medical image repositories, while some public databases even provide the original images. For example, the Alzheimer's Disease Neuroimaging Initiative (ADNI) [<xref ref-type="bibr" rid="ref8">8</xref>] and Open Access Series of Imaging Studies (OASIS) [<xref ref-type="bibr" rid="ref9">9</xref>] usually anonymize only metadata, while the original MR images are shared in a nonanonymized form. Anonymizing only the metadata from the medical image is not sufficient to prevent identification from the remaining medical images after removing the metadata, and existing anonymizing software is rarely used to prevent the possibility of recognition due to concerns over the deterioration of the brain image quality [<xref ref-type="bibr" rid="ref10">10</xref>].</p>
      <p>Previous approaches to anonymizing faces in medical images usually remove the entire facial region using a voxel classifier and mask the brain to preserve the brain image using a skull stripping technique or a convex hull [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. However, since using a voxel classifier and skull stripping can be affected by variation in the characteristics of the MR images, they can produce unexpected results from heterogeneous MR image data [<xref ref-type="bibr" rid="ref13">13</xref>]. In addition, the solution of cutting off the face has the limitation of information loss concerning the eye orbits, nasal cavity, and other underlying structures [<xref ref-type="bibr" rid="ref6">6</xref>]. In anonymization work for medical image sharing, consistent processing of heterogeneous data and minimizing data loss will help researchers using secondary data.</p>
      <p>The aim of this study was to develop software that can selectively distort the eyes, nose, and ears, which are the main factors for identifying a face, and make a robust anonymization algorithm that can be used on various MR images.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Defacing Process Overview</title>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref> schematically illustrates our Deface program development process (<xref rid="figure1" ref-type="fig">Figure 1</xref>A) and an application example (<xref rid="figure1" ref-type="fig">Figure 1</xref>B). We created a deep learning model that learns the labels of the eyes, nose, and ears. The training set consisted of 3D cranial MR images and manually marked regions corresponding to each MR image. We implemented data augmentation to increase the diversity of the training data. The deep learning model was developed based on a 3D convolutional neural network. The trained model, called a “facial feature detector,” can detect the eyes, nose, and ears in a 3D MR image. After the regions of the facial features have been obtained from a nonanonymous 3D MR image through the facial feature detector, the regions are anonymized according to each characteristic.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Process of (A) developing the facial feature detector, which is a deep learning model that can detect the eyes, nose, and ears in 3-dimensional (3D) magnetic resonance (MR) images, and (B) distorting the facial features in nonanonymized cranial MR images.</p>
          </caption>
          <graphic xlink:href="jmir_v22i12e22739_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Image Acquisition</title>
        <p>The Neuroimaging Informatics Technology Initiative (NIFTI) and Digital Imaging and Communications in Medicine (DICOM) formats of MR imaging (MRI) files were collected from the ADNI database (Magnetization Prepared RApid Gradient Echo [MPRAGE] scans; voxel size: 1.0 x 1.0 x 1.2 mm; inplane resolution: 1.0 x 1.0 mm<sup>2</sup>; interslice spacing: 1.2 mm; field of view [FOV]: 240 x 256 x 160 mm). A total of 240 NIFTI format files were used in the creation of the deep learning model: 144, 48, and 48 for the training, validation, and test sets, respectively.</p>
        <p>Other NIFTI-format MRI files were collected from the OASIS-3 database for use as the external test set. The 100 MR images differed in orientation, resolution, and intensity from those in the ADNI data (MPRAGE scans; voxel size: 1.0 x 1.0 x 1.0 mm; FOV: 176 x 256 x 256 mm).</p>
      </sec>
      <sec>
        <title>Labeling</title>
        <p>In general, supervised learning requires pairs consisting of the input object and the desired output value. In this study, the input object is a 3D cranial MR image, and the output values are regions containing the eyes, nose, or ears (the facial features). We manually drew labels that were the same as the desired output values in all of the ADNI and 20 OASIS-3 images using the AFNI program [<xref ref-type="bibr" rid="ref14">14</xref>]. In <xref rid="figure1" ref-type="fig">Figure 1</xref>A, the manually drawn labels show the eyes (red) and nose (green), which are marked as spherical shapes at the corresponding positions, and the ears (blue), which are marked as the auricle regions. Each center point of the eyes and nose area was labeled in the form of a sphere. Since ears have different sizes and shapes for each person, only the auricle of the ear was segmented and labeled.</p>
      </sec>
      <sec>
        <title>Data Augmentation</title>
        <p>Three image augmentations were performed per 1 image in the training set. The augmented images were randomly transformed and then used for model training. As a result, 576 images per epoch were trained. Data augmentation was performed by filtering Gaussian noise, rotating from –15° to +15° around each axis in the image, randomly flipping each axis, randomly transposing between the axes, shifting each axis from 0 to 0.10, shearing each axis from 0 to 0.20, and resizing the image from 0.90 to 1.10 times the original size. After executing 1 image augmentation per original image, the validation set was validated for a total of 96 images per epoch.</p>
      </sec>
      <sec>
        <title>The Deep Learning Algorithm</title>
        <p>The deep learning model was trained with the manually labeled data. We created a deep learning model that can generate labels similar to manually drawn labels on the regions of the eyes, nose, and ears from cranial MR image input. The basic structure of our deep learning model is attention-gated U-net [<xref ref-type="bibr" rid="ref15">15</xref>]. The detailed structure of our model can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
      </sec>
      <sec>
        <title>Metric and Loss Function</title>
        <p>In machine learning, the “loss” or “error” variable is set to achieve the goals through the training of the model. In addition, the “metric” variable indicates how much we have achieved the goals through the model training. A machine learning model has metrics to indicate the achievement rate and is trained to reduce loss.</p>
        <p>In this study, the metric to determine whether the model can make labels similar to the manually drawn labels is the Dice coefficient, which is double the area of overlap divided by the total number of pixels or voxels in both images: It returns 1 if the predicted regions of the model exactly match the correct answers from the labels and 0 if the regions do not overlap. When the region of the label is <italic>Y</italic> and the region predicted by the trained model is <italic>X</italic>, the Dice coefficient can be represented by:</p>
        <p>
          <graphic xlink:href="jmir_v22i12e22739_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </p>
        <p>This can also be expressed as:</p>
        <p>
          <graphic xlink:href="jmir_v22i12e22739_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </p>
        <p>where <italic>TP</italic> is the number of true positives, <italic>FP</italic> is the number of false positives, and <italic>FN</italic> is the number of false negatives.</p>
        <p>The loss function in our model was: 1 – the Dice coefficient + 0.1 × categorical cross-entropy. Categorical cross-entropy is the loss function mainly used in multiclass classification, and it induces our model to learn to distinguish whether a specific pixel is from the eye, nose, ear, or another area. This model computes the loss function between the correct answer labels and the predictive labels and is trained in the direction of loss reduction (toward zero).</p>
        <p>The model calculated Dice coefficients for 96 images in the validation set for each epoch. After 5 epochs at the highest metric score, learning was stopped when there was no further improvement.</p>
      </sec>
      <sec>
        <title>Image Processing</title>
        <p>Here, we describe the process of image anonymization based on the output of the facial feature detector. The deep learning model was trained by identifying the eyes, nose, and ears (5 regions), after which the program proceeded with the image anonymization process.</p>
        <p>Identification of the eyes, nose, and ears was automatically conducted on different images according to each feature by the deep learning algorithm. The detection region for the eyes is a spherical area covering the eyeball and the skin around the eye. The process of anonymizing the surface of the eye consists of 2 steps. First, based on the detection regions for the eyes, 2 boxes capable of covering the periocular area (the skin around the eyes) are formed. Second, the contour of the face surface was obtained within the range of the boxes, and a range of ±2 voxels along each axis from that surface was modified to the same intensity value. The nose was processed by removing the image and setting the intensity of the voxels to 0 in the area where the binding box for the detected region was doubled to each side. The detection region for the ears is the protruding part called the auricle. For the anonymization of the ears, random values were assigned to each voxel of the detection regions of the ears, and those values were generated in the noise range of the air in the MR image.</p>
        <p>In the case of the medical images in DICOM format, it is necessary to anonymize the personal information in the header, and so we carried this out on the 20 DICOM headers using the Deface program (the DICOM headers are listed in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). The list was selected based on the HIPAA safe harbor provision [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>In the 23rd epoch, the average Dice coefficient of the validation set was the highest at 0.821. In the 28th epoch, the training of the model was stopped because the Dice score of the validation set did not improve. The average Dice score of 576 images trained over 23 epochs was 0.801. The average Dice scores using the test sets comprising 48 ADNI and 20 OASIS-3 images were 0.859 and 0.794, respectively. The Deface program was applied to the ADNI data, but anonymization was performed on the OASIS-3 data without any additional manipulation.</p>
      <p><xref rid="figure1" ref-type="fig">Figure 1</xref>B shows the process of distorting a sample nonanonymized cranial MR image. Three axial views of the cross-sectional MR image were obtained from a representative image in the ADNI test set. The first is the nonanonymized cranial MR image, the second is an MR image with the detection regions (the labels of the eyes, nose, and ears predicted by the facial feature detector) as output for the facial feature detector, and the third is the final anonymized image based on the detection regions (red marks denoting the eyes, green marks denoting the nose, and blue marks denoting the ears). It took 177.91 seconds to save the detection region pictures and distorted MR images as NIFTI format files from 48 images of the ADNI test set. The image was distorted according to the characteristics of each facial feature. The 3D box space containing the entire volume of the nose was removed. The eyes were covered with similar brightness intensity on the surface. For the ears, the detection regions were replaced by space with noise.</p>
      <p>We applied the Deface program to 48 ADNI images and 100 OASIS-3 images as the test sets and then confirmed the accuracy of distorting the facial features in the 3D reconstructions of the face (<xref rid="figure2" ref-type="fig">Figure 2</xref> shows the 3D volume-rendered images). Since face reconstruction is in violation of the OASIS data use terms, OASIS data were not included in the figure. A sample image was selected from the ADNI test sets, and we compared the before and after anonymization. As shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, the facial features clearly identifiable in the 3D images beforehand are distorted after processing: The auricle and nose have disappeared, and the eyes appear blurry.</p>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>3-dimensional (3D) volume rendering of magnetic resonance images (MRI), showing the raw and distorted images from the Alzheimer's Disease Neuroimaging Initiative (ADNI) .</p>
        </caption>
        <graphic xlink:href="jmir_v22i12e22739_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>The Deface program was used to validate the de-identification performance by Microsoft Azure’s facial recognition artificial intelligence service (Face detection_01 model) [<xref ref-type="bibr" rid="ref17">17</xref>]. We found that all 48 reconstructed face images from the ADNI test set were de-identified. Although 46 unmodified images were recognized as faces and location information of face landmarks was derived, the faces in all 48 defaced images were not recognized. The other 2 unmodified images failed the face recognition process because they were noisy or parts of the face were cropped. The result of the face detection service for 1 representative image of the ADNI test set can be found in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>In this study, we developed a program that can recognize the eyes, nose, and ears in MR images by applying artificial intelligence, after which they were blurred. We implemented the facial feature detector based on the 3D U-net deep learning model to automatically detect the eyes, nose, and ears. The reason for the development of this anonymization program is that 3D facial reconstruction of high-resolution MRI can show an individual’s similarity to a facial photograph [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>], which contravenes the rules for protecting personal information required by regulating bodies such as HIPAA. Anonymization is required for the sharing of medical image data so as not to infringe on the personal information rules. However, distorting images is disadvantageous for secondary research due to the loss of information, but too little distorting leads to the possibility of recognition [<xref ref-type="bibr" rid="ref10">10</xref>]. We attempted to anonymize the face while minimizing the loss of information by modifying only the surface of the eyes, nose, and ears in cranial MR images. In addition, in the case of the DICOM format, a function to remove text including personal information that can be obtained from the header was added. We released the source code to GitHub [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref>A shows the process of developing the facial feature detector. The cranial MR images and manually marked facial features (eyes, nose, and ears) were used as the training set. We drew different labels for each facial feature for the manually drawn labels. Although the eyes and nose can be specified in a range of only the central location information, the shape of the ear varies relatively widely among people. Furthermore, because the ears are adjacent to the brain, images of the brain can be obscured during the image distortion Therefore, only the segmented regions of the auricle were used as labels so that the program did not select regions other than the ear.</p>
        <p>Although the training data for the deep learning model comprised 144 images from ADNI, we introduced data augmentation to achieve robust performance in other MRI standards (<xref rid="figure1" ref-type="fig">Figure 1</xref>A). The training set was augmented via various techniques so that the facial feature detector could show robust performance even with unknown data. We evaluated OASIS-3 data in which the adjustment, orientation, FOV, resolution, and intensity histograms were completely different from the ADNI data in the training set. We confirmed that the facial features were distorted in 100 OASIS-3 images by the MRI viewer. Labels were manually drawn on 20 OASIS-3 images, and our facial feature detector worked well, with an average Dice coefficient of 0.794. This has the potential to assist in the construction of anonymous big data with different MRI standards collected from multiple institutions.</p>
        <p>We applied different processes to blur each facial feature location. The eyes are close to the frontal lobe, so they were distorted only along the surface. The intensity of the pixels was converted to a value similar to the surface of the skin to make it appear on the surface when 3D rendering. Since the nose is usually the most protuberant part of the face, the area that covers the entire range of the nose was deleted to make it impossible to infer the original shape of the nose. The 3D box space containing the entire volume of the nose was removed to prevent recognition via the nose shape. The ears were only segmented by the facial feature detector, so only the corresponding regions were distorted to preserve the brain image. If regions such as the shape of the ears are simply removed, the shape of the ears may be revealed by the noise from air in the MR image. We reduced the possibility of recognition by replacing the ear regions with generated random values within the air noise range of the input MR image.</p>
        <p>3D facial reconstruction of high-resolution MRI can be generated by a freeware MRI viewer [<xref ref-type="bibr" rid="ref19">19</xref>]. Moreover, the faces of patients in MR images from publicly available data can be revealed (<xref rid="figure2" ref-type="fig">Figure 2</xref>). As the OASIS-3 images are smoother than the ADNI images, they can be reconstructed with a clearer face image in the case of high-resolution MRI. However, we showed that the face could be distorted in the 3D-rendered image after applying our Deface program. Since the image was preserved except for the user-designated facial features, researchers can obtain the necessary information from MRI images without revealing the patient’s identity.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>Previous studies have applied techniques to remove the entire face regions, and the evaluation of anonymization was via direct human observation of face landmarks [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. In another technique, the Human Connectome Project [<xref ref-type="bibr" rid="ref20">20</xref>], a public repository of MRI images, conducted distorting by modifying a certain thickness of the facial surface [<xref ref-type="bibr" rid="ref13">13</xref>]. We distorted the ears in addition to the face surface, with options to blur the eyes, nose, and ears separately, as may be required when conducting secondary research. The images with eyes, nose, and ears anonymized were verified by applying a face recognition tool. Furthermore, while previous studies have applied algorithms to process single MRI datasets, our Deface program was tested on 2 different MRI datasets to improve compatibility.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Among the facial features, wrinkles or the mouth can be identifiers but were not considered in this study. To train the deep learning model, we needed to manually draw labels that mark facial features. We are planning to construct a training dataset that takes into account additional facial features for further study. Once labeled training data comprising any desired facial feature have been constructed, our facial feature detector can evolve through deep learning.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Patients’ faces can be reconstructed from high-resolution cranial MR images at the photograph level, so there is a risk of infringing the personal information rules prescribed by HIPAA and GDPR when sharing data. Hence, we suggested a method to perceive the facial features in MR images via deep learning technology to specifically blur certain facial features. Users can create anonymization regions that blur the desired parts of the patient’s face (eyes, nose, or ears), which helps provide data for secondary research without violating relevant personal information regulations.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Information Protection Regulations.</p>
        <media xlink:href="jmir_v22i12e22739_app1.docx" xlink:title="DOCX File , 21 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Deep learning model structure.</p>
        <media xlink:href="jmir_v22i12e22739_app2.docx" xlink:title="DOCX File , 127 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>DICOM header with personal information.</p>
        <media xlink:href="jmir_v22i12e22739_app3.docx" xlink:title="DOCX File , 14 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Face recognition test.</p>
        <media xlink:href="jmir_v22i12e22739_app4.docx" xlink:title="DOCX File , 249 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">2D</term>
          <def>
            <p>two-dimensional</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">3D</term>
          <def>
            <p>three-dimensional</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ADNI</term>
          <def>
            <p>Alzheimer's Disease Neuroimaging Initiative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DICOM</term>
          <def>
            <p>Digital Imaging and Communications in Medicine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">FOV</term>
          <def>
            <p>field of view</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">GDPR</term>
          <def>
            <p>General Data Protection Regulation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">HIPAA</term>
          <def>
            <p>Health Insurance Portability and Accountability Act</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">MPRAGE</term>
          <def>
            <p>Magnetization Prepared RApid Gradient Echo</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MR</term>
          <def>
            <p>magnetic resonance</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">MRI</term>
          <def>
            <p>magnetic resonance imaging</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">NIFTI</term>
          <def>
            <p>Neuroimaging Informatics Technology Initiative</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">OASIS</term>
          <def>
            <p>Open Access Series of Imaging Studies</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was supported by an Institute for Information &#38; Communications Technology Promotion (IITP) grant funded by the Korean government (MSIT) (2018-0-00861), Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Education (2017R1D1A1B03030713), and a grant from the Korea Health Technology R&#38;D Project through the Korea Health Industry Development Institute (KHIDI), funded by the Ministry of Health &#38; Welfare, Republic of Korea (grant number: HI18C2383). We thank the Biomedical Computing core facility at the ConveRgence mEDIcine research cenTer (CREDIT), Asan Medical Center for their technical support and instrumentation funded by Asan Institute for Life Sciences (2018-776).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Raghupathi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Raghupathi</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Big data analytics in healthcare: promise and potential</article-title>
          <source>Health Inf Sci Syst</source>
          <year>2014</year>
          <volume>2</volume>
          <fpage>3</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25825667"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/2047-2501-2-3</pub-id>
          <pub-id pub-id-type="medline">25825667</pub-id>
          <pub-id pub-id-type="pii">14</pub-id>
          <pub-id pub-id-type="pmcid">PMC4341817</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vallance</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chalmers</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Secure use of individual patient data from clinical trials</article-title>
          <source>The Lancet</source>
          <year>2013</year>
          <month>09</month>
          <day>28</day>
          <volume>382</volume>
          <issue>9898</issue>
          <fpage>1073</fpage>
          <lpage>1074</lpage>
          <pub-id pub-id-type="doi">10.1016/S0140-6736(13)62001-2</pub-id>
          <pub-id pub-id-type="medline">24075034</pub-id>
          <pub-id pub-id-type="pii">S0140-6736(13)62001-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>El Emam</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rodgers</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Malin</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Anonymising and sharing individual patient data</article-title>
          <source>BMJ</source>
          <year>2015</year>
          <month>03</month>
          <day>20</day>
          <volume>350</volume>
          <fpage>h1139</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25794882"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.h1139</pub-id>
          <pub-id pub-id-type="medline">25794882</pub-id>
          <pub-id pub-id-type="pmcid">PMC4707567</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chevrier</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Foufi</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Gaudet-Blavignac</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Robert</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lovis</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Use and Understanding of Anonymization and De-Identification in the Biomedical Literature: Scoping Review</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>05</month>
          <day>31</day>
          <volume>21</volume>
          <issue>5</issue>
          <fpage>e13484</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/5/e13484/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/13484</pub-id>
          <pub-id pub-id-type="medline">31152528</pub-id>
          <pub-id pub-id-type="pii">v21i5e13484</pub-id>
          <pub-id pub-id-type="pmcid">PMC6658290</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kayaalp</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Patient Privacy in the Era of Big Data</article-title>
          <source>Balkan Med J</source>
          <year>2018</year>
          <month>01</month>
          <day>20</day>
          <volume>35</volume>
          <issue>1</issue>
          <fpage>8</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.4274/balkanmedj.2017.0966"/>
          </comment>
          <pub-id pub-id-type="doi">10.4274/balkanmedj.2017.0966</pub-id>
          <pub-id pub-id-type="medline">28903886</pub-id>
          <pub-id pub-id-type="pmcid">PMC5820452</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prior</surname>
              <given-names>FW</given-names>
            </name>
            <name name-style="western">
              <surname>Brunsden</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hildebolt</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Nolan</surname>
              <given-names>TS</given-names>
            </name>
            <name name-style="western">
              <surname>Pringle</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vaishnavi</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Larson-Prior</surname>
              <given-names>LJ</given-names>
            </name>
          </person-group>
          <article-title>Facial recognition from volume-rendered magnetic resonance imaging data</article-title>
          <source>IEEE Trans Inf Technol Biomed</source>
          <year>2009</year>
          <month>01</month>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>5</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1109/TITB.2008.2003335</pub-id>
          <pub-id pub-id-type="medline">19129018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Budin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bullitt</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Preventing facial recognition when rendering MR images of the head in three dimensions</article-title>
          <source>Med Image Anal</source>
          <year>2008</year>
          <month>06</month>
          <volume>12</volume>
          <issue>3</issue>
          <fpage>229</fpage>
          <lpage>39</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/18069044"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2007.10.008</pub-id>
          <pub-id pub-id-type="medline">18069044</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(07)00102-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC2504704</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <source>Alzheimer's disease neuroimaging initiative</source>
          <year>2020</year>
          <access-date>2019-06-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://adni.loni.usc.edu/">http://adni.loni.usc.edu/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <source>OASIS: Open access series of imaging studies</source>
          <year>2020</year>
          <access-date>2019-08-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.oasis-brains.org/">https://www.oasis-brains.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schwarz</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>Kremers</surname>
              <given-names>WK</given-names>
            </name>
            <name name-style="western">
              <surname>Therneau</surname>
              <given-names>TM</given-names>
            </name>
            <name name-style="western">
              <surname>Sharp</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Gunter</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Vemuri</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Arani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Spychalla</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kantarci</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Knopman</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Petersen</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Jack</surname>
              <given-names>CR</given-names>
            </name>
          </person-group>
          <article-title>Identification of Anonymous MRI Research Participants with Face-Recognition Software</article-title>
          <source>N Engl J Med</source>
          <year>2019</year>
          <month>10</month>
          <day>24</day>
          <volume>381</volume>
          <issue>17</issue>
          <fpage>1684</fpage>
          <lpage>1686</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31644852"/>
          </comment>
          <pub-id pub-id-type="doi">10.1056/NEJMc1908881</pub-id>
          <pub-id pub-id-type="medline">31644852</pub-id>
          <pub-id pub-id-type="pmcid">PMC7091256</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schimke</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kuehler</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hale</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Preserving privacy in structural neuroimages</article-title>
          <source>Data and Applications Security and Privacy XXV</source>
          <year>2011</year>
          <publisher-loc>Heidelberg, Germany</publisher-loc>
          <publisher-name>Springer Publishing Company</publisher-name>
          <fpage>301</fpage>
          <lpage>8</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bischoff-Grethe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ozyurt</surname>
              <given-names>IB</given-names>
            </name>
            <name name-style="western">
              <surname>Busa</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Quinn</surname>
              <given-names>BT</given-names>
            </name>
            <name name-style="western">
              <surname>Fennema-Notestine</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bondi</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Jernigan</surname>
              <given-names>TL</given-names>
            </name>
            <name name-style="western">
              <surname>Dale</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>GG</given-names>
            </name>
            <name name-style="western">
              <surname>Fischl</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A technique for the deidentification of structural brain MR images</article-title>
          <source>Hum Brain Mapp</source>
          <year>2007</year>
          <month>09</month>
          <volume>28</volume>
          <issue>9</issue>
          <fpage>892</fpage>
          <lpage>903</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/17295313"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/hbm.20312</pub-id>
          <pub-id pub-id-type="medline">17295313</pub-id>
          <pub-id pub-id-type="pmcid">PMC2408762</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Milchenko</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Marcus</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Obscuring surface anatomy in volumetric imaging data</article-title>
          <source>Neuroinformatics</source>
          <year>2013</year>
          <month>01</month>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>65</fpage>
          <lpage>75</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22968671"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s12021-012-9160-3</pub-id>
          <pub-id pub-id-type="medline">22968671</pub-id>
          <pub-id pub-id-type="pmcid">PMC3538950</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cox</surname>
              <given-names>RW</given-names>
            </name>
          </person-group>
          <article-title>AFNI: software for analysis and visualization of functional magnetic resonance neuroimages</article-title>
          <source>Comput Biomed Res</source>
          <year>1996</year>
          <month>06</month>
          <volume>29</volume>
          <issue>3</issue>
          <fpage>162</fpage>
          <lpage>73</lpage>
          <pub-id pub-id-type="doi">10.1006/cbmr.1996.0014</pub-id>
          <pub-id pub-id-type="medline">8812068</pub-id>
          <pub-id pub-id-type="pii">S0010480996900142</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schlemper</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Oktay</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Schaap</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Heinrich</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kainz</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Glocker</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Rueckert</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Attention gated networks: Learning to leverage salient regions in medical images</article-title>
          <source>Med Image Anal</source>
          <year>2019</year>
          <month>04</month>
          <volume>53</volume>
          <fpage>197</fpage>
          <lpage>207</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1361-8415(18)30613-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.media.2019.01.012</pub-id>
          <pub-id pub-id-type="medline">30802813</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(18)30613-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <article-title>Title 45, Subtitle A, Subchapter C, Part 164, Subpart E, §164.514</article-title>
          <source>Electronic code of federal regulations</source>
          <year>2020</year>
          <access-date>2020-11-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ecfr.gov/cgi-bin/text-idx?SID=20e0360351a51dd55ee6e80cc9aae47c&#38;node=se45.1.164_1514&#38;rgn=div8">https://www.ecfr.gov/cgi-bin/text-idx?SID=20e0360351a51dd55ee6e80cc9aae47c&#38;node=se45.1.164_1514&#38;rgn=div8</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <article-title>Facial recognition AI service</article-title>
          <source>Microsoft Azure</source>
          <year>2020</year>
          <access-date>2020-11-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://azure.microsoft.com/en-us/services/cognitive-services/face/#demo">https://azure.microsoft.com/en-us/services/cognitive-services/face/#demo</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <article-title>yeonuk-Jeong / De-facer: De-identifier reconstructable facial information in Medical image (CT, MRI)</article-title>
          <source>GitHub</source>
          <year>2020</year>
          <access-date>2020-11-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/yeonuk-Jeong/Defacer">https://github.com/yeonuk-Jeong/Defacer</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Debus</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Floca</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ingrisch</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kompan</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Maier-Hein</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Abdollahi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nolden</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>MITK-ModelFit: A generic open-source framework for model fits and their exploration in medical imaging - design, implementation and application on the example of DCE-MRI</article-title>
          <source>BMC Bioinformatics</source>
          <year>2019</year>
          <month>01</month>
          <day>16</day>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>31</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-018-2588-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12859-018-2588-1</pub-id>
          <pub-id pub-id-type="medline">30651067</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12859-018-2588-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC6335810</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Van Essen</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Barch</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Behrens</surname>
              <given-names>TEJ</given-names>
            </name>
            <name name-style="western">
              <surname>Yacoub</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ugurbil</surname>
              <given-names>K</given-names>
            </name>
            <collab>WU-Minn HCP Consortium</collab>
          </person-group>
          <article-title>The WU-Minn Human Connectome Project: an overview</article-title>
          <source>Neuroimage</source>
          <year>2013</year>
          <month>10</month>
          <day>15</day>
          <volume>80</volume>
          <fpage>62</fpage>
          <lpage>79</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/23684880"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.05.041</pub-id>
          <pub-id pub-id-type="medline">23684880</pub-id>
          <pub-id pub-id-type="pii">S1053-8119(13)00535-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC3724347</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
