<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e66873</article-id><article-id pub-id-type="doi">10.2196/66873</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Artificial Intelligence&#x2013;Enabled Facial Privacy Protection for Ocular Diagnosis: Development and Validation Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Tan</surname><given-names>Haizhu</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Chen</surname><given-names>Hongyu</given-names></name><degrees>BD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wang</surname><given-names>Zhenmao</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>He</surname><given-names>Mingguang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wei</surname><given-names>Chiyu</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sun</surname><given-names>Lei</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Xueqin</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shi</surname><given-names>Danli</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Huang</surname><given-names>Chengcheng</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Guo</surname><given-names>Anping</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff8">8</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Preventive Medicine, Shantou University Medical College</institution><addr-line>22 Xinling Rd</addr-line><addr-line>Shantou</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Optoelectronic Information Science and Engineering, Physical and Materials Science College, Guangzhou University</institution><addr-line>Guangzhou</addr-line><country>China</country></aff><aff id="aff3"><institution>Han&#x2019;s Laser Technology Industry Group Co., Ltd</institution><addr-line>Shenzhen</addr-line><country>China</country></aff><aff id="aff4"><institution>Joint Shantou International Eye Center of Shantou University and The Chinese University of Hong Kong</institution><addr-line>Shantou</addr-line><country>China (Hong Kong)</country></aff><aff id="aff5"><institution>The Hong Kong Polytechnic University, Kowloon</institution><addr-line>Hong Kong</addr-line><country>China (Hong Kong)</country></aff><aff id="aff6"><institution>Department of Ophthalmology, the Fourth Affiliated Hospital of Harbin Medical University</institution><addr-line>Harbin</addr-line><country>China</country></aff><aff id="aff7"><institution>University of Science and Technology of China</institution><addr-line>Hefei</addr-line><country>China</country></aff><aff id="aff8"><institution>Department of Pharmacy, First Affiliated Hospital of University of Science and Technology of China, Division of Life Sciences and Medicine, University of Science and Technology of China</institution><addr-line>Hefei</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Cardoso</surname><given-names>Taiane de Azevedo</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Abdollahi</surname><given-names>Mirsaeed</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Wu</surname><given-names>Wenjun</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Haizhu Tan, PhD, Department of Preventive Medicine, Shantou University Medical College, 22 Xinling Rd, Shantou, 515031, China, 86 13318055534; <email>linnanqia@126.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>9</day><month>7</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e66873</elocation-id><history><date date-type="received"><day>25</day><month>09</month><year>2024</year></date><date date-type="rev-recd"><day>02</day><month>04</month><year>2025</year></date><date date-type="accepted"><day>10</day><month>04</month><year>2025</year></date></history><copyright-statement>&#x00A9; Haizhu Tan, Hongyu Chen, Zhenmao Wang, Mingguang He, Chiyu Wei, Lei Sun, Xueqin Wang, Danli Shi, Chengcheng Huang, Anping Guo. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 9.7.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e66873"/><related-article related-article-type="correction-forward" ext-link-type="doi" xlink:href="10.2196/84928" xlink:title="This is a corrected version. See correction statement in" xlink:type="simple">https://www.jmir.org/2025/1/e84928</related-article><abstract><sec><title>Background</title><p>Facial biometric data, while valuable for clinical applications, poses substantial privacy and security risks.</p></sec><sec><title>Objective</title><p>This paper aims to address the privacy and security concerns related to facial biometric data and support auxiliary diagnoses, in pursuit of which we developed Digital FaceDefender, an artificial intelligence&#x2013;driven privacy safeguard solution.</p></sec><sec sec-type="methods"><title>Methods</title><p>We constructed a diverse set of digitally synthesized Asian face avatars representing both sexes, spanning ages 5 to 85 years in 10-year increments, using 70,000 facial images and 13,061 Asian face images. Landmark data were separately extracted from both patient and avatar images. Affine transformations ensured spatial alignment, followed by color correction and Gaussian blur to enhance fusion quality. For auxiliary diagnosis, we established 95% CIs for pixel distances within the eye region on a cohort of 1163 individuals, serving as diagnostic benchmarks. Reidentification risk was assessed using the ArcFace model, applied to 2500 images reconstructed via Detailed Expression Capture and Animation (DECA). Finally, Cohen Kappa analyses (n=114) was applied to assess agreement between diagnostic benchmarks and ophthalmologists&#x2019; evaluations.</p></sec><sec sec-type="results"><title>Results</title><p>Compared to the DM method, Digital FaceDefender significantly reduced facial similarity scores (FDface vs raw images: 0.31; FLAME_FDface vs raw images: 0.09) and achieved zero Rank-1 accuracy in Pose #2-#3 and Pose #2-#5, with near-zero accuracy in Pose #4 (0.02) and Pose #5 (0.04), confirming lower reidentification risk. Cohen Kappa analysis demonstrated moderate agreement between our benchmarks and ophthalmologists&#x2019; assessments for the left eye (&#x03BA;=0.37) and right eye (&#x03BA;=0.45; both <italic>P</italic>&#x003C;.001), validating diagnostic reliability of the benchmarks. Furthermore, the user-friendly Digital FaceDefender platform has been established and is readily accessible for use.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>In summary, Digital FaceDefender effectively balances privacy protection and diagnostic use.</p></sec></abstract><kwd-group><kwd>facial biometric data</kwd><kwd>Digital FaceDefender</kwd><kwd>privacy protection</kwd><kwd>auxiliary diagnosis</kwd><kwd>artificial intelligence</kwd><kwd>ocular disease</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>With the rapid advancement of artificial intelligence (AI) applications in medical imaging, a vast array of newly generated medical images now encompass a wide range of personal information, including nonbiometric, physiological, behavioral biometric, and soft biometric identifiers [<xref ref-type="bibr" rid="ref1">1</xref>]. These images undergo digitization, storage, transmission, and retrieval by healthcare organizations for various purposes [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. However, this digitalization process has brought about significant concerns regarding security and multifaceted privacy, spanning these identifiers and more. In response to these concerns, the primary objective of our study is to develop a model that effectively extracts critical features for auxiliary diagnosis in eye hospitals while simultaneously ensuring patient privacy protection.</p><p>In their seminal work published in Nature Medicine, Yang et al [<xref ref-type="bibr" rid="ref4">4</xref>] introduced the concept of a &#x201C;Digital Mask&#x201D; (DM) designed to protect patient privacy while preserving disease-relevant features critical for diagnosis. While this innovation represents a significant step forward in privacy preservation, Meeus et al [<xref ref-type="bibr" rid="ref5">5</xref>] subsequently raised concerns about the reidentification risks associated with the DM. Specifically, their study extracted frames from facial videos provided by Yang et al [<xref ref-type="bibr" rid="ref4">4</xref>], with one frame serving as a reference image and another used to compute the mask. Facial regions, excluding the eyes, were masked while maintaining facial contours, thus creating the &#x201C;DMface.&#x201D; The Faces Learned with an Articulated Model and Expressions (FLAME) model was then applied to generate a novel facial mask, termed &#x201C;FLAME_DMface,&#x201D; by integrating the Skinned Multi-Person Linear Model body model [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. Meeus et al [<xref ref-type="bibr" rid="ref5">5</xref>] evaluated the reidentification risks by comparing FLAME_DMface with the corresponding reference images using the Additive Angular Margin Loss-based ArcFace model. Their findings revealed potential reidentification vulnerabilities [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>Yang et al [<xref ref-type="bibr" rid="ref9">9</xref>] responded by emphasizing the secure maintenance of the original clinical examination videos, arguing that reidentification attacks using the FLAME model would be rendered irrelevant if the original videos were kept inaccessible. While we acknowledge the efforts of Yang et al [<xref ref-type="bibr" rid="ref9">9</xref>] to address patient privacy concerns and mitigate identification risks, we also concur with the feedings by Meeus et al [<xref ref-type="bibr" rid="ref5">5</xref>] that the reidentification risks associated with DM persist following FLAME processing. The retention of facial contours, including key regions such as the face, nose, eyes, and mouth, underscores the necessity for refining deidentification techniques to further reduce the risk of unauthorized reidentification.</p><p>In clinical environments, the balance between maintaining patient privacy and enabling auxiliary diagnosis is of paramount importance, particularly in specialized fields like ocular disease. In such cases, partial exposure of the eyes and periocular region may be necessary, but DM&#x2019;s focus on extracting disease-relevant features from a limited eye region&#x2014;comprising only the upper and lower eyelids and the iris&#x2014;may be insufficient for comprehensive disease characterization, as shown in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p><p>Furthermore, previous research, such as that by Neumann et al [<xref ref-type="bibr" rid="ref10">10</xref>], has highlighted the negative impact of extensive facial coverings in medical settings. Such coverings may hinder clinician-patient communication, diminish empathy [<xref ref-type="bibr" rid="ref11">11</xref>], and reduce diagnostic accuracy [<xref ref-type="bibr" rid="ref12">12</xref>] by obscuring essential facial cues [<xref ref-type="bibr" rid="ref13">13</xref>] related to age, sex, and expression. These challenges became particularly evident during the COVID-19 pandemic and among Muslim women wearing veils [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. As illustrated in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, DM may exacerbate these issues due to its extensive coverage, which is visually comparable to a white plaster cast.</p><p>In our study, we first developed an AI-driven method&#x2014;Digital FaceDefender&#x2014;and assessed the reidentification risks and the mean similarity between images generated using our proposed method and the DM approach. In addition, we developed and validated an auxiliary diagnostic benchmark. We anticipate that this innovative approach will enhance the diagnostic capabilities of medical professionals, providing essential support for early-stage patient evaluation while ensuring robust patient privacy protection.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>In this study, we introduced Digital FaceDefender, a method designed to enhance privacy protection while supporting auxiliary diagnosis of ocular diseases in eye hospitals. <xref ref-type="fig" rid="figure1">Figure 1</xref> shows the detailed workflow, with the region of interest (ROI) clearly marked.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>The workflow of Digital FaceDefender. ROI: region of interest.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e66873_fig01.png"/></fig></sec><sec id="s2-2"><title>Ethical Considerations</title><p>The research protocol was reviewed and approved by the Institutional Review Board/Ethics Committee of the Fourth Affiliated Hospital of Harbin Medical University (approval 2023-Ethics Review-54; see <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). All participants provided informed consent in accordance with the principles outlined in the Declaration of Helsinki.</p><p>To protect privacy, all face images presented in the paper were generated using our image fusion method or masked rather than displaying original face images from our private database.</p></sec><sec id="s2-3"><title>Dataset</title><p>In this retrospective study, we collected face images from 3 sources: 2 publicly available datasets (Flickr-Faces-HQ [FFHQ] and CASIA-FaceV5) and 1 private dataset from the Fourth Affiliated Hospital of Harbin Medical University. The images from the latter dataset were randomly selected.</p><p>Based on their extensive clinical diagnostic experience, two board-certified ophthalmologists (one Chief Physician and one Associate Chief Physician), both licensed by the Ministry of Health of the People&#x2019;s Republic of China, independently evaluated a random sample of facial images from 1136 healthy individuals in our private database. These ophthalmologists, affiliated with the Joint Shantou International Eye Center and the Department of Ophthalmology at the Fourth Affiliated Hospital of Harbin Medical University, assessed the images using a standardized diagnostic protocol. Only individuals exhibiting clear ocular abnormalities, including esotropia, exotropia, vertical strabismus, and ptosis, were further assessed and included in this study. To ensure diagnostic consistency, the evaluations were performed independently, and inter-rater agreement was quantified using the Cohen Kappa coefficient.</p></sec><sec id="s2-4"><title>Image Preprocessing</title><p>Several preprocessing techniques were used to ensure the consistency and quality of patient images. Initially, facial alignment was performed by rotating the original images based on the line connecting the irises, ensuring the face was centered and horizontally oriented. All images were resized to 1024&#x00D7;1024 RGB (red, green, and blue) color space, the standard required for generating digitally synthesized avatars. If the original image size was smaller than this resolution, a white background was added; if larger, the image was cropped to maintain central facial positioning. Given that patient images are captured under variable conditions, including differences in lighting and background noise, a standard RGB color correction method [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>] was used to adjust brightness, ensuring consistency with the digitally synthesized avatars&#x2019; faces.</p></sec><sec id="s2-5"><title>Creation of Digitally Synthesized Avatars</title><p>The FFHQ dataset, consisting of 70,000 high-quality PNG images with a resolution of 1024&#x00D7;1024 pixels, was used to the style-based generative adversarial network architecture (StyleGAN2) [<xref ref-type="bibr" rid="ref19">19</xref>] model, which is known for generating realistic digitally synthesized avatars. However, this dataset is biased towards Caucasian faces (69% Caucasian, 4% Black, and 27% other) raising concerns regarding generalizability to more diverse populations [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>], raising concerns about generalizability to diverse populations.</p><p>To address this issue, we used the generator_yellow-stylegan2-config-f.pkl file, which was trained on 13,016 Asian facial images from the SeePrettyFace website [<xref ref-type="bibr" rid="ref22">22</xref>]. We generated male and female digitally synthesized avatars with varying age progressions, ranging from 5 to 85 years old in increments of 10 years, to simulate realistic age-related facial changes.</p></sec><sec id="s2-6"><title>Generation and Comparison of FDface and DMface Fusion Images</title><p>Accurate generation of lesion positions and morphologies is critical for medical image generation [<xref ref-type="bibr" rid="ref23">23</xref>]. While generative adversarial networks (GANs) excel at capturing global features, they often struggle with detailed pathological structures, as suggested by Kazeminia et al [<xref ref-type="bibr" rid="ref24">24</xref>], and Han et al [<xref ref-type="bibr" rid="ref25">25</xref>]. To mitigate this, we used Google&#x2019;s MediaPipe library [<xref ref-type="bibr" rid="ref26">26</xref>] for facial landmark detection. The Face Mesh detector identifies 3D coordinates for 468 facial landmarks (see <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p><p>Affine transformations preserve collinearity and parallelism [<xref ref-type="bibr" rid="ref26">26</xref>], making them suitable for adjusting discrepancies in the eye regions between patient and avatar images. We applied the Open Computer Vision Library (OpenCV) warpAffine() function [<xref ref-type="bibr" rid="ref27">27</xref>] to adjust the eye regions while preserving the original proportions and positions. These transformed images, referred to as preliminary fusion images (see <xref ref-type="fig" rid="figure1">Figure 1</xref>), were then used for the fusion process.</p><p>During image fusion, disparities in skin tone, brightness, and unnatural fusion boundaries can arise, potentially affecting diagnostic accuracy. To rectify this, we applied RGB color correction [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>] to harmonize patient images with avatar facial features. In addition, a Gaussian blur [<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>] was applied to the fused image edges to smooth boundary transitions [<xref ref-type="bibr" rid="ref31">31</xref>] and create a more natural appearance. <xref ref-type="fig" rid="figure2">Figure 2</xref> shows the detailed process by which the final high-quality fusion images (referred to as FDface) are generated through the fusion of raw RGB facial images and digitally synthesized avatar facial images. The workflow begins with the extraction of landmarks from both the raw images and the digitally synthesized avatars, followed by the image fusion step. This is followed by color correction and background adjustment to ensure consistency and improve visual appeal. Finally, Gaussian filtering is applied to smooth the image, further enhancing its quality and resulting in the final high-quality fused facial images. Furthermore, we generated both DMface and FDface images by applying the DM technique and Digital FaceDefender to the same original images for comparative analysis.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Workflow for the image fusion process. RGB: red, green, and blue.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e66873_fig02.png"/></fig></sec><sec id="s2-7"><title>Reidentification Risk Assessment</title><p>To assess the effectiveness of Digital FaceDefender in reducing reidentification risks while preserving essential facial features necessary for accurate diagnoses, we conducted a comparative analysis against DM using the Detailed Expression Capture and Animation (DECA) [<xref ref-type="bibr" rid="ref8">8</xref>] framework. Specifically, we used the FLAME model to reconstruct facial images, generating DMface and FDface representations from the CASIA-FaceV5 dataset, which comprises 2500 facial images from 500 individuals, each captured in five different poses. These reconstructed images were labeled as FLAME_DMface and FLAME_FDface, respectively.</p><p>To evaluate the reduction in reidentification risk associated with Digital FaceDefender, we used ArcFace (IR-SE50 model) to compute facial similarity scores and Rank-1 accuracy. Specifically, we separately quantified the mean similarity scores between FDface and FLAME_FDface images and their corresponding original facial images. These similarity scores indicate the extent to which the generated images resemble the original faces. In addition, we used Rank-1 accuracy to assess whether FDface and FLAME_FDface images could be reidentified as their original counterparts using ArcFace.</p></sec><sec id="s2-8"><title>Statistical Analysis for Diagnostic Benchmark Establishment</title><p>To determine the sample size required for constructing the diagnostic benchmark, we first conducted a pilot study with <inline-formula><mml:math id="ieqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>50</mml:mn></mml:mrow></mml:mstyle></mml:math></inline-formula> to estimate the SD (<inline-formula><mml:math id="ieqn2"><mml:msub><mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>) of the difference in pixel distances along the x-axis between the left and right eyes. The margin of error was then calculated by using the formula (<inline-formula><mml:math id="ieqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mfrac><mml:mi>&#x03B1;</mml:mi><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mfrac><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:msqrt><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:msqrt></mml:mfrac><mml:mo>=</mml:mo><mml:mn>1.96</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mfrac><mml:mn>3.31</mml:mn><mml:msqrt><mml:mn>50</mml:mn></mml:msqrt></mml:mfrac><mml:mo>=</mml:mo><mml:mn>0.92</mml:mn></mml:mrow></mml:mstyle></mml:math></inline-formula> , where <inline-formula><mml:math id="ieqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mfrac><mml:mi>&#x03B1;</mml:mi><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1.96</mml:mn></mml:mrow></mml:mstyle></mml:math></inline-formula>  correspond to a 95% confidence level). Based on these estimates, the final required sample size (n) was determined using the standard sample size formula:</p><disp-formula id="E2"><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mfrac><mml:mi>&#x03B1;</mml:mi><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mtext>pilot</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mi>E</mml:mi></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:mn>1.96</mml:mn><mml:mo>&#x22C5;</mml:mo><mml:mn>3.63</mml:mn></mml:mrow><mml:mn>0.92</mml:mn></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:mo>=</mml:mo><mml:mn>59.81</mml:mn></mml:mrow></mml:mstyle></mml:math></disp-formula><p>Finally, a total of 1136 healthy individuals were included in our study, exceeding the estimated sample size (n=60) and ensuring sufficient statistical power for establishing the diagnostic benchmark.</p><p>Pixel distances (|A|, |B|, |CD|, |EF|, |GH|, and |IJ|) along the x- and y-axes for both eyes were calculated between the iris center (o and o&#x2019;) and reference points (a, b, c, d, e, f, g, h, i, j) in 1136 healthy individuals (see <xref ref-type="fig" rid="figure3">Figures 3II</xref>,<xref ref-type="fig" rid="figure3">3</xref>). The difference in pixel distances along the x-axis between the left and right eyes was denoted as |A-B|. Outlier detection was performed using the IQR method, excluding data points outside 1.5 times the IQR from the lower and upper quartiles. After outlier removal, 95% CIs for the pixel distances within the eye region were calculated. For variables that followed an approximately normal distribution (assessed by visual inspection and normality tests), we used the normal distribution method to calculate CIs. For variables that deviated from normality, we applied the bootstrap method to obtain more robust CI estimates. These diagnostic benchmarks were validated on an independent dataset of 114 individuals not included in the original sample. Inter-rater consistency between the diagnostic benchmarks and ophthalmologists&#x2019; assessments was evaluated using Cohen Kappa. The significance level was set to .05. Statistical analysis was performed using RStudio (version 1.1.463; Posit), which is developed and maintained by Posit.</p><p>The established diagnostic benchmarks were then used to identify ocular abnormalities, such as esotropia and exotropia (see <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Measurement and comparison of pixel distances along the x-axis between the left and right eyes.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e66873_fig03.png"/></fig></sec><sec id="s2-9"><title>Digital FaceDefender Platform</title><p>We developed the Digital FaceDefender platform to support ophthalmologists and researchers in their work, offering a user-friendly interface for privacy protection and auxiliary diagnostic assistance.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Image Processing</title><p>Figure S1 in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref> illustrates certain challenges due to low resolution (640&#x00D7;480), nonideal shooting angles, and intricate backgrounds, such as a blue curtain. These factors could hinder accurate representation of critical eye regions. As a representative example, we enhanced Figure S1A to a high-resolution version (2436&#x00D7;1944) shown in Figure S1B, both in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>. Additional corrections included aligning the pupils horizontally in Figure S1C in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref> and refining the cropping to emphasize the central facial region more clearly in Figure S1D in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>.</p></sec><sec id="s3-2"><title>Creation of Digitally Synthesized Avatars</title><p>As depicted in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>, we generated 2 sets of digitally synthesized avatars representing Asian males and females aged from 5 to 85 years at 10-year intervals. These avatars are used as templates in subsequent image fusion processes to ensure the preservation of critical facial features essential for medical diagnoses. Furthermore, a set of digitally synthesized avatars representing Caucasian individuals was also created.</p></sec><sec id="s3-3"><title>Generation and Comparison of FDface and DMface Fusion Images</title><p>Using Google&#x2019;s MediaPipe library, we focus on 52 key landmarks within the eye and periocular regions, including the eyelids, eye sockets, irises, and brow arches, for both patients and avatars. These landmarks are used to define closed ROIs for subsequent image fusion. OpenCV is then applied to adjust the eye regions, resulting in transformed images, referred to as preliminary fusion images (see <xref ref-type="fig" rid="figure1">Figure 1</xref>), which are subsequently used in the fusion process.</p><p><xref ref-type="fig" rid="figure4">Figure 4</xref> presents the final fused images generated by both the DM method and our proposed Digital FaceDefender approach. The DMface images produced using the DM method are shown in <xref ref-type="fig" rid="figure4">Figure 4C</xref> and <xref ref-type="fig" rid="figure4">D</xref>, while the FDface images created by Digital FaceDefender are depicted in <xref ref-type="fig" rid="figure4">Figure 4E</xref> and <xref ref-type="fig" rid="figure4">F</xref>. The difference in fused images generated by DM and Digital FaceDefender highlights that Digital FaceDefender not only meets the privacy protection requirements but also preserves essential features related to ocular diseases. The resulting natural-looking fused images enhance clinician-patient empathy, which is crucial for accurate diagnosis and personalized treatment. Furthermore, the final fused images generated using the various ethnic digitally synthesized avatars, as presented in <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>, demonstrate the generalizability of the proposed Digital FaceDefender methodology.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Comparison of privacy protection and reidentification risk: DM versus Digital FaceDefender (the 2 facial images are sourced from Yang et al [<xref ref-type="bibr" rid="ref4">4</xref>]). DM: Digital Mask; FLAME: Faces Learned with an Articulated Model and Expressions.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e66873_fig04.png"/></fig></sec><sec id="s3-4"><title>Reidentification Risk Assessment</title><p><xref ref-type="fig" rid="figure4">Figure 4</xref> illustrates the difference in reidentification risk between the DM method and the Digital FaceDefender approach. When applying the FLAME reconstruction technique to both DMface and FDface images, the FLAME_DMface images (<xref ref-type="fig" rid="figure4">Figure 4G</xref> and <xref ref-type="fig" rid="figure4">H</xref>) exhibit a higher reidentification risk compared to FLAME_FDface images (<xref ref-type="fig" rid="figure4">Figure 4I</xref> and <xref ref-type="fig" rid="figure4">J</xref>). This increased risk stems from the preservation of key facial contours, including the face shape, nose, eyes, and mouth, which enables effective reconstruction through FLAME in the DM method.</p><p>As shown in <xref ref-type="table" rid="table1">Table 1</xref> and <xref ref-type="supplementary-material" rid="app8">Multimedia Appendices 8</xref>,<xref ref-type="supplementary-material" rid="app9">9</xref> and <xref ref-type="supplementary-material" rid="app9">9</xref>, the mean similarity score between FDface and raw facial images is 0.31. While this value suggests that FDface retains some identifiable facial features, potentially beneficial for auxiliary diagnosis, it remains below the reidentification threshold established by leading facial recognition systems. For example, Microsoft&#x2019;s Face API reports that similarity scores above 0.5 indicate a potential match [<xref ref-type="bibr" rid="ref32">32</xref>]. The significantly lower mean similarity score of 0.09 for FLAME_FDface versus raw facial images suggests that reconstructing the original face from FDface images is highly challenging, confirming that Digital FaceDefender significantly reduces reidentification risk while enhancing privacy protection.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Evaluating the mean similarity between FDface and FLAME_FDface images and raw facial images.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Comparison</td><td align="left" valign="bottom">Mean similarity</td></tr></thead><tbody><tr><td align="left" valign="top">FDface versus raw facial images</td><td align="left" valign="top">0.31</td></tr><tr><td align="left" valign="top">FLAME_FDface versus raw facial images</td><td align="left" valign="top">0.09</td></tr></tbody></table></table-wrap><p>Further validation comes from Rank-1 accuracy analysis across different facial poses (see <xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref>). Notably, for Pose #2 and Pose #3 (see <xref ref-type="table" rid="table2">Table 2</xref>) and Poses #2-#5 (see <xref ref-type="table" rid="table3">Table 3</xref>), the Rank-1 accuracy is 0, indicating that no successful reidentifications occurred in these conditions. Even for Pose #4 (0.02) and Pose #5 (0.04), the Rank-1 accuracy remains near zero, confirming that Digital FaceDefender effectively mitigates reidentification risks across various facial angles.</p><p>These results underscore the robustness of our proposed Digital FaceDefender in balancing privacy protection with retention of diagnostic features.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Evaluating the Rank-1 accuracy between FDface images and raw facial images with different poses.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Pose</td><td align="left" valign="bottom">Rank-1 accuracy</td></tr></thead><tbody><tr><td align="left" valign="top">Pose #2</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">Pose #3</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">Pose #4</td><td align="left" valign="top">0.02</td></tr><tr><td align="left" valign="top">Pose #5</td><td align="left" valign="top">0.04</td></tr></tbody></table></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Evaluating the Rank-1 accuracy between FLAME_FDface images and raw facial images with different poses.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Pose #</td><td align="left" valign="bottom">Rank-1 accuracy</td></tr></thead><tbody><tr><td align="left" valign="top">Pose #2</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">Pose #3</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">Pose #4</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">Pose #5</td><td align="left" valign="top">0</td></tr></tbody></table></table-wrap></sec><sec id="s3-5"><title>Diagnostic Benchmark Establishment</title><p>Using the developed diagnostic benchmarks, we evaluated their agreement with ophthalmologists&#x2019; assessments for ocular disease detection (see <xref ref-type="fig" rid="figure5">Figure 5</xref>). As shown in <xref ref-type="table" rid="table4">Table 4</xref>, Cohen Kappa analysis demonstrated a fair agreement for the left eye (&#x03BA;=0.37) and a moderate agreement for the right eye (&#x03BA;=0.445). Both agreements were statistically significant (<italic>P</italic>&#x003C;.001), indicating the reliability of the benchmarks. However, factors such as head posture and image quality may influence diagnostic accuracy.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Clinical diagnoses of ocular diseases based on fused images.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jmir_v27i1e66873_fig05.png"/></fig><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Cohen Kappa analysis in classifying right and left eye strabismus.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom"/><td align="left" valign="bottom">Kappa</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Left eye</td><td align="left" valign="top">0.37</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Right eye</td><td align="left" valign="top">0.45</td><td align="left" valign="top">&#x003C;.001</td></tr></tbody></table></table-wrap></sec><sec id="s3-6"><title>Digital FaceDefender Platform</title><p>The Digital FaceDefender platform is available as a website [<xref ref-type="bibr" rid="ref33">33</xref>]. <xref ref-type="supplementary-material" rid="app10">Multimedia Appendix 10</xref>, along with the accompanying <xref ref-type="supplementary-material" rid="app11">Multimedia Appendix 11</xref>, demonstrates the effectiveness of the Digital FaceDefender Platform in protecting patients&#x2019; facial privacy while simultaneously supporting the diagnosis of ocular diseases. In addition, the Digital FaceDefender platform utilizes the developed diagnostic benchmarks to identify raw images with suboptimal camera angles, providing text prompts to flag them as poor-quality images.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>This study introduces Digital FaceDefender to safeguard patients&#x2019; privacy and achieve auxiliary diagnosis. Collectively, the comparison of DMface images, FDface images, FLAME_DMface images, and FLAME_FDface images, along with the evaluation metrics including Rank-1 accuracy, mean similarity, and Cohen Kappa, demonstrates that Digital FaceDefender effectively facilitates auxiliary diagnosis while concurrently safeguarding patient privacy.</p><p>Quantitative analysis, including mean similarity scores and Rank-1 accuracy, demonstrates that Digital FaceDefender substantially reduces reidentification risks while effectively preserving critical diagnostic features. In contrast, existing anonymization methods exhibit certain limitations. For example, Shawn Shan et al [<xref ref-type="bibr" rid="ref34">34</xref>] introduces subtle perturbations to facial images to prevent unauthorized facial recognition. However, these perturbations can be removed using super-resolution models, adversarial training, or denoising techniques, thereby restoring identifiable facial features and significantly weakening privacy protection. Similarly, FaceObfuscato [<xref ref-type="bibr" rid="ref35">35</xref>] disrupts reidentification attacks, making the method resistant to adversarial optimization techniques. However, it remains vulnerable to non&#x2013;gradient-based and adaptive attacks that leverage auxiliary information or brute-force reconstruction. In addition, its transformations may distort critical eye-region details, potentially degrading performance in ocular auxiliary diagnosis. Face Deidentification [<xref ref-type="bibr" rid="ref36">36</xref>] uses GAN-based face synthesis to generate realistic, high-resolution anonymized faces. While this method enhances image fidelity, it can be susceptible to GAN inversion attacks. In contrast, Digital FaceDefender achieves lower reidentification and similarity scores, demonstrating superior robustness in privacy protection. Unlike diffusion-based anonymization methods, such as the approach proposed by Kung et al [<xref ref-type="bibr" rid="ref37">37</xref>], which suffer from uncontrollable anonymization levels due to the nonlinear latent space, Digital FaceDefender enables precise control over the strength of anonymization, ensuring that key ocular features remain intact for auxiliary diagnosis. Furthermore, diffusion-based anonymization can degrade expression recognition accuracy, whereas Digital FaceDefender generates visually natural and diagnostically meaningful fused images. Compared to DeepPrivacy [<xref ref-type="bibr" rid="ref38">38</xref>],which uses a StyleGAN-like architecture to synthesize high-resolution, photorealistic faces. However, it remains vulnerable to adversarial attacks. Digital FaceDefender preserves clinically relevant facial features while minimizing reidentification risks.</p><p>However, there are several limitations to this study. First, while Google&#x2019;s MediaPipe can detect 468 facial landmarks, this number may be insufficient to accurately represent the ROI, particularly since only 16 landmarks are assigned to the eyelids and 5 to the iris for each eye. Second, MediaPipe faces challenges in accurately localizing the ROI, especially when image quality is compromised. Third, aligning pixel distances in the ROI on the digitally synthesized avatar&#x2019;s face with the actual ROI size on the patient&#x2019;s face remains problematic. A possible solution could involve using a horizontal ruler during photography as a reference for scaling. Fourth, the quality of image fusion is influenced by factors such as camera equipment, shooting angles, and environmental conditions, highlighting the need for standardized imaging protocols. Fifth, enhancing the accuracy of fusion boundaries will require larger and more diverse training datasets, which remain scarce, particularly for rare medical conditions. Finally, we explore some novel advanced techniques, like Hugging Face using Stable Diffusion (V3.5), developed by Hugging Face [<xref ref-type="bibr" rid="ref35">35</xref>], to generate digitally synthesized avatars from various ethnic backgrounds. However, the results produced by Stable Diffusion did not surpass those generated by StyleGAN2 in terms of fusion quality (see <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>). The reason for this could be the significantly higher resolution of virtual avatars produced by Stable Diffusion compared to the raw facial images, which may have introduced challenges in achieving seamless fusion effects (see <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>).</p><p>In conclusion, unlike other privacy protection technologies, Digital FaceDefender demonstrates dual efficacy: it preserves privacy while facilitating auxiliary diagnoses. Although our preliminary findings are promising, further refinement of the image fusion process is essential to enhance the realism and accuracy of the resultant images. In addition, improving the automatic matching between the virtual avatar&#x2019;s face and the patient&#x2019;s face has the potential to reduce the workload of health care professionals. Future research should focus on these areas to advance the utility and applicability of Digital FaceDefender in clinical practice.</p></sec></body><back><ack><p>HC, ZW, and MH are co-first authors on this work.</p><p>We appreciate Lifeng Weng for his efforts in masking some raw facial images to ensure privacy protection.</p><p>Our research was partially supported by National Natural Science Foundation of China (grants 12231017 and 72171216), and Innovative development funds of Anhui Province Federation of Social Sciences (2022CX 081).</p></ack><notes><sec><title>Data Availability</title><p>The data supporting this study&#x2019;s findings are divided into two groups: publicly available data and restricted data. The publicly available data were obtained from open-source datasets. The restricted data, which involve identifiable information from individuals, are subject to licensing agreements that allow usage solely for analytical purposes and cannot be shared publicly. All code used in this study is available at GitHub [<xref ref-type="bibr" rid="ref39">39</xref>].</p><p>Users interested in exploring the Digital FaceDefender platform [<xref ref-type="bibr" rid="ref33">33</xref>] can request trial access by contacting the administrators via email at linnanqia@126.com.</p></sec></notes><fn-group><fn fn-type="con"><p>HT contributed to writing &#x2013; review &#x0026; editing, writing &#x2013; original draft, visualization, supervision, resources, project administration, methodology, investigation, formal analysis, data curation, and conceptualization. HC contributed to writing &#x2013; original draft, visualization, validation, software, methodology, investigation, and formal analysis. ZW contributed to writing &#x2013; original draft, validation, software, methodology, investigation, data curation, and conceptualization. MH contributed to writing &#x2013; review &#x0026; editing, validation, and conceptualization. CW contributed to writing &#x2013; original draft, software, and methodology. LS contributed to writing &#x2013; original draft, validation, and data curation. XW contributed to writing &#x2013; review &#x0026; editing and funding acquisition. DS contributed to writing &#x2013; review &#x0026; editing and conceptualization. CH contributed to writing &#x2013; original draft and methodology. AG contributed to writing &#x2013; original draft and funding acquisition.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">DECA</term><def><p>Detailed Expression Capture and Animation</p></def></def-item><def-item><term id="abb3">DM</term><def><p>Digital Mask</p></def></def-item><def-item><term id="abb4">FFHQ</term><def><p>Flickr-Faces-HQ</p></def></def-item><def-item><term id="abb5">FLAME</term><def><p>Faces Learned with an Articulated Model and Expressions</p></def></def-item><def-item><term id="abb6">GAN</term><def><p>generative adversarial network</p></def></def-item><def-item><term id="abb7">RGB</term><def><p>red, green, and blue</p></def></def-item><def-item><term id="abb8">ROI</term><def><p>region of interest</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mason</surname><given-names>J</given-names> </name><name name-style="western"><surname>Dave</surname><given-names>R</given-names> </name><name name-style="western"><surname>Chatterjee</surname><given-names>P</given-names> </name><name name-style="western"><surname>Graham-Allen</surname><given-names>I</given-names> </name><name name-style="western"><surname>Esterline</surname><given-names>A</given-names> </name><name name-style="western"><surname>Roy</surname><given-names>K</given-names> </name></person-group><article-title>An investigation of biometric authentication in the healthcare environment</article-title><source>Array</source><year>2020</year><month>12</month><volume>8</volume><fpage>100042</fpage><pub-id pub-id-type="doi">10.1016/j.array.2020.100042</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alhammad</surname><given-names>N</given-names> </name><name name-style="western"><surname>Alajlani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Abd-Alrazaq</surname><given-names>A</given-names> </name><name name-style="western"><surname>Epiphaniou</surname><given-names>G</given-names> </name><name name-style="western"><surname>Arvanitis</surname><given-names>T</given-names> </name></person-group><article-title>Patients&#x2019; perspectives on the data confidentiality, privacy, and security of mHealth apps: systematic review</article-title><source>J Med Internet Res</source><year>2024</year><month>05</month><day>31</day><volume>26</volume><fpage>e50715</fpage><pub-id pub-id-type="doi">10.2196/50715</pub-id><pub-id pub-id-type="medline">38820572</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nigam</surname><given-names>D</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>SN</given-names> </name><name name-style="western"><surname>Raj Vincent</surname><given-names>PMD</given-names> </name><name name-style="western"><surname>Srinivasan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Arunmozhi</surname><given-names>S</given-names> </name></person-group><article-title>Biometric authentication for intelligent and privacy-preserving healthcare systems</article-title><source>J Healthc Eng</source><year>2022</year><volume>2022</volume><fpage>1789996</fpage><pub-id pub-id-type="doi">10.1155/2022/1789996</pub-id><pub-id pub-id-type="medline">35368929</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lyu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>R</given-names> </name><etal/></person-group><article-title>A digital mask to safeguard patient privacy</article-title><source>Nat Med</source><year>2022</year><month>09</month><volume>28</volume><issue>9</issue><fpage>1883</fpage><lpage>1892</lpage><pub-id pub-id-type="doi">10.1038/s41591-022-01966-1</pub-id><pub-id pub-id-type="medline">36109638</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meeus</surname><given-names>M</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>S</given-names> </name><name name-style="western"><surname>de Montjoye</surname><given-names>YA</given-names> </name></person-group><article-title>Concerns about using a digital mask to safeguard patient privacy</article-title><source>Nat Med</source><year>2023</year><month>07</month><volume>29</volume><issue>7</issue><fpage>1658</fpage><lpage>1659</lpage><pub-id pub-id-type="doi">10.1038/s41591-023-02439-9</pub-id><pub-id pub-id-type="medline">37464037</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bolkart</surname><given-names>T</given-names> </name><name name-style="western"><surname>Black</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Romero</surname><given-names>J</given-names> </name></person-group><article-title>Learning a model of facial shape and expression from 4D scans</article-title><source>ACM Trans Graph</source><year>2017</year><month>12</month><day>31</day><volume>36</volume><issue>6</issue><fpage>1</fpage><lpage>17</lpage><pub-id pub-id-type="doi">10.1145/3130800.3130813</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Loper</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mahmood</surname><given-names>N</given-names> </name><name name-style="western"><surname>Romero</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pons-Moll</surname><given-names>G</given-names> </name><name name-style="western"><surname>Black</surname><given-names>MJ</given-names> </name></person-group><article-title>SMPL: a skinned multi-person linear model</article-title><source>Seminal Graphics Papers: Pushing the Boundaries</source><year>2023</year><volume>2</volume><fpage>851</fpage><lpage>866</lpage></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Deng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xue</surname><given-names>N</given-names> </name><name name-style="western"><surname>Zafeiriou</surname><given-names>S</given-names> </name></person-group><article-title>ArcFace: additive angular margin loss for deep face recognition</article-title><conf-name>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 15-20, 2019</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2019.00482</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lyu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Dai</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>H</given-names> </name></person-group><article-title>Reply to: concerns about using a digital mask to safeguard patient privacy</article-title><source>Nat Med</source><year>2023</year><month>07</month><volume>29</volume><issue>7</issue><fpage>1660</fpage><lpage>1661</lpage><pub-id pub-id-type="doi">10.1038/s41591-023-02435-z</pub-id><pub-id pub-id-type="medline">37464038</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Neumann</surname><given-names>M</given-names> </name><name name-style="western"><surname>Moore</surname><given-names>ST</given-names> </name><name name-style="western"><surname>Baum</surname><given-names>LM</given-names> </name><etal/></person-group><article-title>Politicizing masks? Examining the volume and content of local news coverage of face coverings in the US through the COVID-19 pandemic</article-title><source>Polit Commun</source><year>2024</year><month>01</month><day>2</day><volume>41</volume><issue>1</issue><fpage>66</fpage><lpage>106</lpage><pub-id pub-id-type="doi">10.1080/10584609.2023.2239181</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCrackin</surname><given-names>SD</given-names> </name><name name-style="western"><surname>Capozzi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Mayrand</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ristic</surname><given-names>J</given-names> </name></person-group><article-title>Face masks impair basic emotion recognition: group effects and individual variability</article-title><source>Soc Psychol</source><year>2022</year><volume>54</volume><pub-id pub-id-type="doi">10.31234/osf.io/2whmp</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumari</surname><given-names>P</given-names> </name><name name-style="western"><surname>Seeja</surname><given-names>KR</given-names> </name></person-group><article-title>A novel periocular biometrics solution for authentication during COVID-19 pandemic situation</article-title><source>J Ambient Intell Humaniz Comput</source><year>2021</year><volume>12</volume><issue>11</issue><fpage>10321</fpage><lpage>10337</lpage><pub-id pub-id-type="doi">10.1007/s12652-020-02814-1</pub-id><pub-id pub-id-type="medline">33425055</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>R</given-names> </name><name name-style="western"><surname>Ross</surname><given-names>A</given-names> </name></person-group><article-title>Periocular biometrics and its relevance to partially masked faces: a survey</article-title><source>Comput Vis Image Underst</source><year>2023</year><month>01</month><volume>226</volume><fpage>103583</fpage><pub-id pub-id-type="doi">10.1016/j.cviu.2022.103583</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Binka</surname><given-names>M</given-names> </name><name name-style="western"><surname>Adu</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Jeong</surname><given-names>D</given-names> </name><etal/></person-group><article-title>The impact of mask mandates on face mask use during the COVID-19 pandemic: longitudinal survey study</article-title><source>JMIR Public Health Surveill</source><year>2023</year><month>01</month><day>11</day><volume>9</volume><fpage>e42616</fpage><pub-id pub-id-type="doi">10.2196/42616</pub-id><pub-id pub-id-type="medline">36446134</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kret</surname><given-names>ME</given-names> </name><name name-style="western"><surname>de Gelder</surname><given-names>B</given-names> </name></person-group><article-title>Islamic headdress influences how emotion is recognized from the eyes</article-title><source>Front Psychol</source><year>2012</year><volume>3</volume><fpage>110</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2012.00110</pub-id><pub-id pub-id-type="medline">22557983</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kret</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Maitner</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>AH</given-names> </name></person-group><article-title>Interpreting emotions from women with covered faces: a comparison between a Middle Eastern and Western-European sample</article-title><source>Front Psychol</source><year>2021</year><volume>12</volume><fpage>620632</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2021.620632</pub-id><pub-id pub-id-type="medline">34025499</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><article-title>Switching Eds: face swapping with Python, dlib, and OpenCV</article-title><source>GitHub</source><access-date>2023-10-15</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://matthewearl.github.io/2015/07/28/switching-eds-with-python/">https://matthewearl.github.io/2015/07/28/switching-eds-with-python/</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><article-title>Color balance</article-title><source>Wikipedia</source><access-date>2023-10-01</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://en.wikipedia.org/wiki/Color_balance">https://en.wikipedia.org/wiki/Color_balance</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Tero</surname><given-names>K</given-names> </name></person-group><article-title>Flickr faces HQ (FFHQ) 70K from stylegan</article-title><source>GitHub</source><access-date>2023-11-12</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/NVlabs/ffhq-dataset">https://github.com/NVlabs/ffhq-dataset</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Karras</surname><given-names>T</given-names> </name><name name-style="western"><surname>Laine</surname><given-names>S</given-names> </name><name name-style="western"><surname>Aila</surname><given-names>T</given-names> </name></person-group><article-title>A style-based generator architecture for generative adversarial networks</article-title><conf-name>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name><conf-date>Jun 15-20, 2019</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2019.00453</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maluleke</surname><given-names>VH</given-names> </name><name name-style="western"><surname>Thakkar</surname><given-names>N</given-names> </name><name name-style="western"><surname>Brooks</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Studying bias in GANS through the lens of race</article-title><source>European Conference on Computer Vision</source><fpage>344</fpage><lpage>360</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-19778-9_20</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><source>SeePrettyFace</source><access-date>2025-06-11</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://seeprettyface.com/">http://seeprettyface.com/</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wen</surname><given-names>C</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pan</surname><given-names>W</given-names> </name><collab>Alzheimer&#x2019;s Disease Neuroimaging Initiative</collab></person-group><article-title>Genome-wide association studies of brain imaging data via weighted distance correlation</article-title><source>Bioinformatics</source><year>2020</year><month>12</month><day>8</day><volume>36</volume><issue>19</issue><fpage>4942</fpage><lpage>4950</lpage><pub-id pub-id-type="doi">10.1093/bioinformatics/btaa612</pub-id><pub-id pub-id-type="medline">32619001</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yi</surname><given-names>X</given-names> </name><name name-style="western"><surname>Walia</surname><given-names>E</given-names> </name><name name-style="western"><surname>Babyn</surname><given-names>P</given-names> </name></person-group><article-title>Generative adversarial network in medical imaging: a review</article-title><source>Med Image Anal</source><year>2019</year><month>12</month><volume>58</volume><fpage>101552</fpage><pub-id pub-id-type="doi">10.1016/j.media.2019.101552</pub-id><pub-id pub-id-type="medline">31521965</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Shin</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Tenenholtz</surname><given-names>NA</given-names> </name><name name-style="western"><surname>Rogers</surname><given-names>JK</given-names> </name><etal/></person-group><article-title>Medical image synthesis for data augmentation and anonymization using generative adversarial networks</article-title><source>Simulation and Synthesis in Medical Imaging: Third International Workshop, SASHIMI 2018, Held in Conjunction with MICCAI 2018</source><year>2018</year><publisher-name>Springer</publisher-name><fpage>1</fpage><lpage>11</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-00536-8_1</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="web"><article-title>Affine transformation</article-title><source>Wolfram Mathworld</source><access-date>2024-01-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://mathworld.wolfram.com/AffineTransformation.html">https://mathworld.wolfram.com/AffineTransformation.html</ext-link></comment></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="web"><article-title>WarpAffine</article-title><source>Nvidia</source><access-date>2024-01-13</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/image_processing/warp.html">https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/image_processing/warp.html</ext-link></comment></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="web"><article-title>Image filters: Gaussian blur</article-title><source>Medium</source><access-date>2025-06-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://aryamansharda.medium.com/image-filters-gaussian-blur-eb36db6781b1">https://aryamansharda.medium.com/image-filters-gaussian-blur-eb36db6781b1</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Graham</surname><given-names>PA</given-names> </name></person-group><article-title>Epidemiology of strabismus</article-title><source>Br J Ophthalmol</source><year>1974</year><month>03</month><volume>58</volume><issue>3</issue><fpage>224</fpage><lpage>231</lpage><pub-id pub-id-type="doi">10.1136/bjo.58.3.224</pub-id><pub-id pub-id-type="medline">4834596</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Popkin</surname><given-names>T</given-names> </name><name name-style="western"><surname>Cavallaro</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hands</surname><given-names>D</given-names> </name></person-group><article-title>Accurate and efficient method for smoothly space-variant Gaussian blurring</article-title><source>IEEE Trans Image Process</source><year>2010</year><month>05</month><volume>19</volume><issue>5</issue><fpage>1362</fpage><lpage>1370</lpage><pub-id pub-id-type="doi">10.1109/TIP.2010.2041400</pub-id><pub-id pub-id-type="medline">20106740</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Tai</surname><given-names>YW</given-names> </name><name name-style="western"><surname>Jia</surname><given-names>J</given-names> </name><name name-style="western"><surname>Tang</surname><given-names>CK</given-names> </name></person-group><article-title>Local color transfer via probabilistic segmentation by expectation-maximization</article-title><conf-name>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR&#x2019;05)</conf-name><conf-date>Jun 20-25, 2005</conf-date><pub-id pub-id-type="doi">10.1109/CVPR.2005.215</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name></person-group><article-title>Multi-organ segmentation via co-training weight-averaged models from few-organ datasets</article-title><conf-name>Medical Image Computing and Computer Assisted Intervention&#x2013;MICCAI 2020: 23rd International Conference</conf-name><conf-date>Oct 4-8, 2020</conf-date></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="web"><source>Digital FaceDefender</source><access-date>2025-06-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://218.16.242.201:5460">http://218.16.242.201:5460</ext-link></comment></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Shan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wenger</surname><given-names>E</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name></person-group><article-title>Fawkes: protecting privacy against unauthorized deep learning models</article-title><source>Proceedings of the 29th USENIX Conference on Security Symposium (SEC&#x2019;20)</source><year>2020</year><publisher-name>USENIX Association</publisher-name><fpage>1589</fpage><lpage>1604</lpage></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Jin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>FaceObfuscator: defending deep learning-based privacy attacks with gradient descent-resistant features in face recognition</article-title><conf-name>33rd USENIX Security Symposium (USENIX Security 24)</conf-name><conf-date>Aug 14-16, 2024</conf-date></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meden</surname><given-names>B</given-names> </name><name name-style="western"><surname>Gonzalez-Hernandez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Peer</surname><given-names>P</given-names> </name><name name-style="western"><surname>&#x0160;truc</surname><given-names>V</given-names> </name></person-group><article-title>Face deidentification with controllable privacy protection</article-title><source>Image Vis Comput</source><year>2023</year><month>06</month><volume>134</volume><fpage>104678</fpage><pub-id pub-id-type="doi">10.1016/j.imavis.2023.104678</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Kung</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Varanka</surname><given-names>T</given-names> </name><name name-style="western"><surname>Saha</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sim</surname><given-names>T</given-names> </name><name name-style="western"><surname>Sebe</surname><given-names>N</given-names> </name></person-group><article-title>Face anonymization made simple</article-title><source>arXiv</source><comment>Preprint posted online on  Nov 1, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2411.00762</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hukkel&#x00E5;s</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mester</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lindseth</surname><given-names>F</given-names> </name></person-group><article-title>DeepPrivacy: a generative adversarial network for face anonymization</article-title><conf-name>International symposium on visual computing</conf-name><conf-date>Oct 7-9, 2019</conf-date><pub-id pub-id-type="doi">10.1007/978-3-030-33720-9_44</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="web"><article-title>Digital Face Defender</article-title><source>GitHub</source><access-date>2025-06-11</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/GreatFishbig/Digital-Face-Defender">https://github.com/GreatFishbig/Digital-Face-Defender</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Images from &#x201C;A digital mask to safeguard patient privacy.&#x201D;</p><media xlink:href="jmir_v27i1e66873_app1.png" xlink:title="PNG File, 150 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Ethical approval.</p><media xlink:href="jmir_v27i1e66873_app2.pdf" xlink:title="PDF File, 483 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>The landmarks of face and periocular region.</p><media xlink:href="jmir_v27i1e66873_app3.docx" xlink:title="DOCX File, 535 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>The detection of blepharoptosis in the right eye and left eye.</p><media xlink:href="jmir_v27i1e66873_app4.docx" xlink:title="DOCX File, 437 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Image preprocessing for images with low-quality.</p><media xlink:href="jmir_v27i1e66873_app5.docx" xlink:title="DOCX File, 1355 KB"/></supplementary-material><supplementary-material id="app6"><label>Multimedia Appendix 6</label><p>Digitally synthesized avatars of Asian males and females representing various age groups.</p><media xlink:href="jmir_v27i1e66873_app6.docx" xlink:title="DOCX File, 4815 KB"/></supplementary-material><supplementary-material id="app7"><label>Multimedia Appendix 7</label><p>The final fused images with various ethinic digital avatars.</p><media xlink:href="jmir_v27i1e66873_app7.png" xlink:title="PNG File, 551 KB"/></supplementary-material><supplementary-material id="app8"><label>Multimedia Appendix 8</label><p>Comparison of similarity rates between FDface and original facial images.</p><media xlink:href="jmir_v27i1e66873_app8.docx" xlink:title="DOCX File, 97 KB"/></supplementary-material><supplementary-material id="app9"><label>Multimedia Appendix 9</label><p>Comparison of similarity rates between FLAME_FDface and original facial images.</p><media xlink:href="jmir_v27i1e66873_app9.docx" xlink:title="DOCX File, 97 KB"/></supplementary-material><supplementary-material id="app10"><label>Multimedia Appendix 10</label><p>Illustration of the Digital FaceDefender platform.</p><media xlink:href="jmir_v27i1e66873_app10.docx" xlink:title="DOCX File, 564 KB"/></supplementary-material><supplementary-material id="app11"><label>Multimedia Appendix 11</label><p>Implementation video of the Digital FaceDfender platform.</p><media xlink:href="jmir_v27i1e66873_app11.mp4" xlink:title="MP4 File, 811 KB"/></supplementary-material></app-group></back></article>