<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e68538</article-id>
      <article-id pub-id-type="pmid">40424023</article-id>
      <article-id pub-id-type="doi">10.2196/68538</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Exploring the Application Capability of ChatGPT as an Instructor in Skills Education for Dental Medical Students: Randomized Controlled Trial</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Jin</surname>
            <given-names>Qiao</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bajpai</surname>
            <given-names>Dr Manas</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Metcalf</surname>
            <given-names>Mary</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Yin</surname>
            <given-names>Rong</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Huang</surname>
            <given-names>Siyu</given-names>
          </name>
          <degrees>BM, MDS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-1516-7333</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Wen</surname>
            <given-names>Chang</given-names>
          </name>
          <degrees>BM, MNS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0008-7748-9058</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Bai</surname>
            <given-names>Xueying</given-names>
          </name>
          <degrees>BM, MDS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0008-3023-6969</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Sihong</given-names>
          </name>
          <degrees>BM, MDS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-8161-3389</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Wang</surname>
            <given-names>Shuining</given-names>
          </name>
          <degrees>BM, MDS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-1482-0046</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Wang</surname>
            <given-names>Xiaoxuan</given-names>
          </name>
          <degrees>BM, MDS, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3763-2939</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Dong</given-names>
          </name>
          <degrees>BM, MDS, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>State Key Laboratory of Oral &amp; Maxillofacial Reconstruction and Regeneration, Key Laboratory of Oral Biomedicine Ministry of Education, Hubei Key Laboratory of Stomatology</institution>
            <institution>School &amp; Hospital of Stomatology</institution>
            <institution>Wuhan University</institution>
            <addr-line>#237 Luoyu Road, Hongshan District.</addr-line>
            <addr-line>Wuhan, 430079</addr-line>
            <country>China</country>
            <phone>86 2787686212</phone>
            <fax>86 2787646697</fax>
            <email>ydnba@whu.edu.cn</email>
          </address>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8378-9008</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>State Key Laboratory of Oral &amp; Maxillofacial Reconstruction and Regeneration, Key Laboratory of Oral Biomedicine Ministry of Education, Hubei Key Laboratory of Stomatology</institution>
        <institution>School &amp; Hospital of Stomatology</institution>
        <institution>Wuhan University</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Center for Orthodontics and Pediatric Dentistry at Optics Valley Branch</institution>
        <institution>School &amp; Hospital of Stomatology</institution>
        <institution>Wuhan University</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Periodontology</institution>
        <institution>School &amp; Hospital of Stomatology</institution>
        <institution>Wuhan University</institution>
        <addr-line>Wuhan</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Dong Yang <email>ydnba@whu.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>27</day>
        <month>5</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e68538</elocation-id>
      <history>
        <date date-type="received">
          <day>10</day>
          <month>11</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>28</day>
          <month>2</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>16</day>
          <month>3</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>22</day>
          <month>3</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Siyu Huang, Chang Wen, Xueying Bai, Sihong Li, Shuining Wang, Xiaoxuan Wang, Dong Yang. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 27.05.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e68538" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Clinical operative skills training is a critical component of preclinical education for dental students. Although technology-assisted instruction, such as virtual reality and simulators, is increasingly being integrated, direct guidance from instructors remains the cornerstone of skill development. ChatGPT, an advanced conversational artificial intelligence model developed by OpenAI, is gradually being used in medical education.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to compare the effects of ChatGPT-assisted skill learning on performance, cognitive load, self-efficacy, learning motivation, and spatial ability, with the aim of evaluating the potential of ChatGPT in clinical operative skills education.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>In this study, 187 undergraduate dental students recruited from a first-class university in China were randomly divided into a ChatGPT group and a blank control group. Among them, the control group used videos for skill acquisition, and the ChatGPT group used ChatGPT in addition to the videos. After 1 week of intervention, skills were tested using desktop virtual reality, and cognitive load was measured by recording changes in pupil diameter with an eye tracker. In addition, a spatial ability test was administered to analyze the effect of ChatGPT on those with different spatial abilities. Finally, a questionnaire was also used to assess cognitive load and self-efficacy during the learning process.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 192 dental undergraduates from a top-tier Chinese university were initially recruited for the experiment by October 25, 2024. Following eye-tracking calibration procedures, 5 participants were excluded, resulting in 187 eligible students successfully completing the experimental protocol by November 2, 2024. Following a short-term intervention administered through randomized allocation, superior performance (ChatGPT group: mean 73.12, SD 10.06; control group: mean 65.54, SD 12.48; <italic>P</italic>&lt;.001) was observed among participants in the ChatGPT group, along with higher levels of self-efficacy (<italic>P</italic>=.04) and learning motivation (<italic>P</italic>=.02). In addition, cognitive load was lower in the ChatGPT group according to eye-tracking measures (ChatGPT group: mean 0.137, SD 0.036; control group: mean 0.312, SD 0.032; <italic>P</italic>&lt;.001). The analysis of the learning performance of participants with different spatial abilities in the 2 modalities showed that compared to the learners with high spatial abilities (ChatGPT group: mean 76.58, SD 9.23; control group: mean 73.89, SD 11.75; <italic>P</italic>=.22), those with low spatial abilities (ChatGPT group: mean 70.20, SD 10.71; control group: mean 55.41, SD 13.31; <italic>P</italic>&lt;.001) were more positively influenced by ChatGPT.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>ChatGPT has performed outstandingly in assisting dental skill learning, and the study supports the integration of ChatGPT into skills teaching and provides new ideas for modernizing skill teaching.</p>
        </sec>
        <sec sec-type="trial registration">
          <title>Trial Registration</title>
          <p>ClinicalTrials.gov NCT06942130；https://clinicaltrials.gov/study/NCT06942130</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT</kwd>
        <kwd>dental education</kwd>
        <kwd>clinical skills</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>randomized controlled trial</kwd>
        <kwd>virtual reality</kwd>
        <kwd>cognitive load</kwd>
        <kwd>self-efficacy</kwd>
        <kwd>motivation</kwd>
        <kwd>spatial ability</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>ChatGPT, developed by OpenAI, is a powerful artificial intelligence (AI) language model based on the GPT architecture designed to generate human-like text and engage in conversation [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. It uses advanced deep learning techniques to understand various cues and respond with contextually relevant and coherent language [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Since its release, ChatGPT has been well received and multiple applications have been developed that integrate its chatbot capabilities. Many studies have reported on the potential of ChatGPT for passing examinations [<xref ref-type="bibr" rid="ref1">1</xref>], learning anatomy, and understanding emerging trends [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>], making it an important tool in areas such as medical education.</p>
      <p>The core of skills education is to enable students to master clinical skills, such as dental restoration, periodontal treatment, and surgical procedures, through hands-on practice [<xref ref-type="bibr" rid="ref8">8</xref>]. This hands-on approach allows students to apply theoretical knowledge to real cases [<xref ref-type="bibr" rid="ref9">9</xref>]. Before engaging in actual clinical practice, students typically undergo training in simulated environments using traditional dental mannequins [<xref ref-type="bibr" rid="ref10">10</xref>], 3D-printed models [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], and virtual reality (VR) technologies [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. This training provides a risk-free setting where students can practice repeatedly until they achieve proficiency. Given the varying learning paces and skill levels among students, skills education often involves small-group teaching or one-on-one mentoring. Instructors tailor their guidance to students’ individual needs to ensure that each one attains the necessary clinical competence. However, this approach demands significant instructional resources.</p>
      <p>Research has shown that ChatGPT can assist students in reviewing key concepts, reinforcing theoretical knowledge [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref15">15</xref>], and simulating clinical scenarios to enhance clinical reasoning [<xref ref-type="bibr" rid="ref16">16</xref>]. This raises the question whether ChatGPT could also serve as an instructor in skills training. We found that the potential of ChatGPT for dental kills education remains unknown.</p>
      <p>In addition, given the limited working space in oral procedures, fine motor skills and high spatial ability are crucial for mastering technical skills [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Instructional practitioners also need to incorporate Cognitive Load Theory [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>] and the Control Value Theory of Achievement Emotions [<xref ref-type="bibr" rid="ref21">21</xref>] into their instructional design; the former emphasizes minimizing the extraneous cognitive load, providing appropriate learning content for a given learner, and sparing sufficient working memory capacity for the germane cognitive load [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. The latter highlights the importance of emotions which includes self-efficacy and motivation in academic achievements [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>].</p>
      <p>In this randomized controlled trial, we integrated ChatGPT into skill education and subsequently assessed the effectiveness of skill acquisition using high-fidelity desktop VR simulations. The investigation further evaluated the impacts of spatial ability, cognitive load, learning motivation, and self-efficacy. Hence, this study aims to investigate the potential value-added effects of a ChatGPT-integrated pedagogical framework on operative skill training in dental education, thereby providing evidence-based foundations for innovating dental education systems in the artificial intelligence era.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Recruitment</title>
        <p>This study recruited 192 dentistry students (female: n=98, 51.04%; male: n=94, 48.96%) from a first-class university. In China, the dental medicine curriculum during the first 3 years of university focuses on foundational medical sciences and basic clinical medicine, with the aim of progressively developing clinical reasoning skills. In the fourth and fifth years, students transition to systematic instruction encompassing both theoretical knowledge and clinical skills in core dental disciplines. Therefore, participants in the study were in their fourth or fifth years of a 5-year dental education program and had received theoretical knowledge in courses. They ranged in age from 20 to 25 years, with an average age of 22.53 (SD 1.47) years. All participants’ visual acuity was normal or had been corrected to be normal.</p>
      </sec>
      <sec>
        <title>Procedure</title>
        <p>Before the study, participants were calibrated for eye tracking using the 9-point method, and those who did not meet the calibration standards were excluded. Participants who met the inclusion criteria were randomly assigned to a ChatGPT-3.5–assisted learning group and a blank control group using the sealed envelope method to minimize systematic bias. The control group used videos for skill acquisition, while the ChatGPT group supplemented video learning with ChatGPT-3.5 as an additional learning tool. Following the completion of pretest questionnaires assessing theoretical knowledge, spatial ability, motivation, and self-efficacy, participants began a 1-week skill acquisition period. Subsequently, both groups were assessed on their skills using desktop VR (Zhonghui), and eye movement data were collected using an aSee eye-tracking device (EVERLOYAL). Data collection was conducted in a digital classroom with illumination levels maintained between 100 and 130 lux. Participants’ motivation and self-efficacy were also recorded.</p>
      </sec>
      <sec>
        <title>ChatGPT</title>
        <p>Although ChatGPT version 4.0 offers advantages such as enhanced comprehension, reasoning, and accuracy compared with version 3.5, the free version 3.5 (<xref rid="figure1" ref-type="fig">Figure 1</xref>) was used in this experiment due to its cost. Given that a recent study indicates nearly all faculty and students in Chinese universities have previous experience with generative AI tools, this study did not assess participants’ previous exposure to ChatGPT [<xref ref-type="bibr" rid="ref30">30</xref>]. However, standardized operational guidelines were distributed to the ChatGPT group via group messaging platforms before the study to ensure familiarity and proficiency with the technology. During the skill learning period, the ChatGPT group was allowed to use ChatGPT to address doubts that arose during the operation process, verify the correctness of the procedures, and provide additional guidance as needed (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>ChatGPT interface.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Eye-Tracking Equipment and Software for Detecting Cognitive Load</title>
        <p>Eye movements were monitored using the aSee eye-tracker (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>) at a sampling rate of 60 Hz, tracking both eyes. Before the experiment, the participant’s position was calibrated to ensure that the distance between the screens and eyes was approximately 70 cm and participants were told they could move their head freely but not too much during the experiment. aSee eye-tracker was applied to collect and analyze the eye movement data.</p>
      </sec>
      <sec>
        <title>Theoretical Knowledge Test</title>
        <p>A comprehensive knowledge test was formulated to assess participants’ theoretical understanding of the surgery (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). The test comprised 10 multiple-choice questions, and the content validity of these questions had been rigorously examined by experts, ensuring the relevance and appropriateness of the test content, and experts recommended the test be completed in 10 minutes. With each question scored out of 10, the total test score ranges from 0 to 100.</p>
      </sec>
      <sec>
        <title>Operational Test</title>
        <p>At the end of the experiments, participants were asked to complete an operational test in desktop VR within 15 minutes (<xref rid="figure2" ref-type="fig">Figure 2</xref>). Operational test scores are automatically generated by the VR, which avoids the influence of subjective factors on test results.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Virtual reality equipment.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Spatial Ability Test</title>
        <sec>
          <title>The Purdue Spatial Visualization Test</title>
          <p>The Purdue Spatial Visualization Test: Rotations, developed by Roland Guay, was used to gauge the spatial ability of participants. It consists of 30 questions designed to assess participants’ ability to mentally rotate a 3D object. Guay recommended a time limit of 20 minutes to complete the test. The total score is 30 points with 1 point per question. Participants were then divided into groups based on their spatial ability, using the median as a distinction between those with high spatial ability and those with low spatial ability.</p>
        </sec>
      </sec>
      <sec>
        <title>Learning Motivation and Self-Efficacy</title>
        <p>To assess students’ learning motivation and self-efficacy, this study used questionnaires that have been widely investigated in dental, medical, and nursing practice. The questionnaire contains 5 items focusing on the motivation domain and 5 items focusing on self-efficacy. Each item was coded according to a 5-point Likert rating scale (1=“strongly disagree,” 2=“disagree,” 3=“neutral,” 4=“agree,” and 5=“strongly agree”). The corresponding Cronbach α values were 0.76 and 0.73, indicating satisfactory internal consistency. Validity was censored by experts in medical education. All participants were requested to complete the questionnaire both before and after the experiment, with a time allocation of 10 minutes for this task.</p>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>An independent samples <italic>t</italic> test was used to determine the difference in performance and spatial ability, while the Mann-Whitney <italic>U</italic> test was used to compare the scores of self-efficacy and learning motivation. The raw pupil diameter data were extracted to measure cognitive load. For further signal processing, data points labeled as a blink and 50 milliseconds before and after these points were removed since eyelid movement during these periods might distort pupil diameter. MATLAB programming (MathWorks) was used to eliminate the fluctuations in pupil size to obtain a smooth curve of pupil size change over time, as depicted in <xref rid="figure3" ref-type="fig">Figure 3</xref>. In the analysis of pupil diameter variations during learning, this study calculated the median pupil sizes at the beginning of learning which was marked as the baseline, and the overall median pupil sizes during the learning process separately. The preference for the median over the mean was driven by the former’s greater robustness toward noise and outliers.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Smoothed pupil diameter.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>The trial protocol followed the CONSORT-EHEALTH (Consolidated Standards of Reporting Trials of Electronic and Mobile Health Applications and Online Telehealth; version 1.6.1) checklist (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>) [<xref ref-type="bibr" rid="ref31">31</xref>]. Informed consent was obtained from all participants and the study was approved by the ethics committee of the School and Hospital of Stomatology, Wuhan University (WDKQ2024-034) before it was conducted.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>The research results include the distribution of participants, knowledge test scores, operational test scores, and change in pupil diameter, which is used to measure the cognitive load, spatial ability test, learning motivation, and self-efficacy.</p>
      </sec>
      <sec>
        <title>Distribution of Participants</title>
        <p>During the eye-tracking calibration, 5 participants were excluded because they did not pass through the calibration. Thus, during the eye-tracking calibration, 94 students were randomly assigned to the ChatGPT group and 93 to the control group. The sex distribution was balanced, with 50.80% of female participants (26.2% in the ChatGPT group and 24.6% in the control group) and 49.20% of male participants (24.06% in ChatGPT the group and 25.13% in the control group), ensuring a representative sample. The detailed experimental procedure is illustrated in <xref rid="figure4" ref-type="fig">Figure 4</xref>.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>CONSORT (Consolidated Standards of Reporting Trials) flow diagram. VR: virtual reality.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Theoretical Knowledge Test and Operational Test Scores</title>
        <p>This paper presents a comparative analysis between the ChatGPT group and the blank control group in terms of theoretical knowledge and operational performance. Shapiro-Wilk tests were used to assess the normality of the data, which showed that the data in both groups followed a normal distribution. Due to the normality, <italic>t</italic> tests were performed to determine significant differences.</p>
        <p>The results (<xref ref-type="table" rid="table1">Table 1</xref>) showed that there was no statistically significant difference in the theoretical knowledge test scores between the 2 groups (<italic>t</italic><sub>185</sub>=0.649, <italic>P</italic>=.52), suggesting that the participants’ knowledge levels before learning operations in the 2 groups were comparable. However, the <italic>t</italic> test for operation performance revealed a statistically significant difference (<italic>t</italic><sub>176.241</sub>=4.569, <italic>P</italic>&lt;.001), indicating that the learning modes exerted an impact on operation learning. Specifically, the ChatGPT group exhibited a significant advantage over the video-only group in the operation training. It is noteworthy that all statistical tests were conducted at the α=.05 significance.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>The comparisons of 2 tests between ChatGPT and the control group.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="180"/>
            <col width="180"/>
            <col width="280"/>
            <col width="140"/>
            <thead>
              <tr valign="top">
                <td>Test</td>
                <td colspan="2">Knowledge test score, mean (SD)</td>
                <td><italic>t</italic> test (<italic>df</italic>)</td>
                <td><italic>P</italic> value</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT</td>
                <td>Control</td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Theoretical</td>
                <td>41.24 (8.19)</td>
                <td>42.03 (8.34)</td>
                <td>0.649 (185)</td>
                <td>.52</td>
              </tr>
              <tr valign="top">
                <td>Operation</td>
                <td>73.12 (10.06)</td>
                <td>65.54 (12.48)</td>
                <td>4.569 (176.241)</td>
                <td>&lt;.001</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Cognitive Load</title>
        <p>To access and compare the cognitive load experienced by the participants during the learning, changes in the participants’ pupil diameters were analyzed. Preliminary screening of the data confirmed that they followed a normal distribution. <xref rid="figure5" ref-type="fig">Figure 5</xref> shows the mean increase in pupil diameter from baseline. An independent samples <italic>t</italic> test was performed to determine the statistical significance of the changes in pupil size. The results of the <italic>t</italic> test (<italic>P</italic>&lt;.001), performed at a significance level of α=.05, showed significant differences between the ChatGPT group (mean 0.137, SD 0.036) and the control group (mean 0.312, SD 0.032). This implies that the cognitive load borne by participants in the ChatGPT group was significantly lower than that in the control group.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Comparison between 2 groups in the change of pupil diameter.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Influence of Spatial Ability Based on Different Learning Modes</title>
        <p>In order to categorize the participants according to their spatial abilities, the median was taken as the boundary. <xref ref-type="table" rid="table2">Table 2</xref> provides an insight into the distribution and operational performance of participants with varying spatial abilities. In terms of the theoretical knowledge test, no statistically significant differences were found between the two modes, irrespective of whether participants belonged to high (<italic>t</italic><sub>92</sub>=0.689, <italic>P</italic>=.49) or low (<italic>t</italic><sub>91</sub>=0.764, <italic>P</italic>=.45) spatial ability groups. The independent samples <italic>t</italic> test, as presented in <xref ref-type="table" rid="table2">Table 2</xref>, indicated that there was no compelling evidence to support the notion that learning mode had a different impact on high spatial ability learners, However, for low spatial ability learners, the <italic>t</italic> test results indicated a statistically significant impact of the learning mode. With regard to those with low spatial abilities, the operational scores achieved by the ChatGPT group (mean 70.20, SD 10.71) were found to be higher than those attained by the control group (mean 55.41, SD 13.31).</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Distribution and t test for operational performance in different learning modes among high and low spatial ability learners.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="280"/>
            <col width="160"/>
            <col width="220"/>
            <col width="0"/>
            <col width="190"/>
            <col width="0"/>
            <col width="120"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Spatial ability and mode</td>
                <td>Participants, n</td>
                <td>Operational performance score, mean (SD)</td>
                <td colspan="2"><italic>t</italic> test (<italic>df</italic>)</td>
                <td colspan="2"><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="5">
                  <bold>High</bold>
                </td>
                <td colspan="2">1.23 (92)</td>
                <td>.22</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT</td>
                <td>43</td>
                <td>76.58 (9.23)</td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Control</td>
                <td>51</td>
                <td>73.89 (11.75)<break/>  <break/>  </td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Low</bold>
                </td>
                <td colspan="2">5.94 (91)</td>
                <td>＜.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>ChatGPT</td>
                <td>51</td>
                <td>70.20 (10.71)</td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Control</td>
                <td>42</td>
                <td>55.41 (13.31)</td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Learning Motivation and Self-Efficacy</title>
        <p>Given that learning motivation and self-efficacy scores were not normally distributed, the Mann-Whitney <italic>U</italic> test was used to analyze the differences between the pretest and posttest scores for ChatGPT and the control group respectively. Furthermore, this study further compared the scores between the 2 groups. <xref rid="figure6" ref-type="fig">Figure 6</xref> demonstrates that, at the outset of the study, there was no evidence to suggest that there were any differences in learning motivation (Z=0.31, <italic>P</italic>=.76) and self-efficacy (Z=0.69, <italic>P</italic>=.48) between the 2 groups before the intervention. Nevertheless, it is notable that the posttest scores for learning motivation and self-efficacy differed from the pretest scores, irrespective of whether the learners were in the ChatGPT or control group. Furthermore, in comparison to the control group, the ChatGPT group exhibited higher posttest scores for both learning motivation (Z=2.32, <italic>P</italic>=.02) and self-efficacy (Z=2.03, <italic>P</italic>=.04).</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Motivation and self-efficacy pretest and posttest scores.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e68538_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The study revealed that incorporating ChatGPT-3.5 as a tutor resulted in markedly elevated performance scores in skill assessments compared with using videos in isolation. This discrepancy was particularly pronounced among students with lower spatial abilities. In addition, the use of ChatGPT-3.5 was shown to reduce cognitive load, enhance self-efficacy, and boost learning motivation. These findings provide robust evidence supporting the use of ChatGPT-3.5 as a valuable tool in skill training and offer insights for the design of educational programs.</p>
        <p>Our investigation builds on previous studies that have identified the potential of ChatGPT in medical education. A recent study demonstrated that ChatGPT exhibited exemplary performance in both the immediate and long-term contexts of orthopedic teaching for undergraduate students [<xref ref-type="bibr" rid="ref1">1</xref>]. In the field of dental education, extant research indicates that ChatGPT demonstrates satisfactory performance across a range of dental assessment types [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. However, these studies have primarily focused on theoretical knowledge. Our findings further reveal that ChatGPT exhibits comparable exciting potential in skills-based education. Compared with traditional dental education, ChatGPT offers personalized learning content and feedback tailored to individual student needs, helping students acquire skills more effectively. In addition, ChatGPT provides real-time feedback and answers to student queries, thereby enhancing the efficiency of the learning process [<xref ref-type="bibr" rid="ref34">34</xref>]. It also alleviates the strain on teaching resources and offers greater flexibility in skill training schedules.</p>
        <p>Changes in pupil diameter during task performance have been used to infer variations in cognitive load. A previous study applied this method to measure cognitive load differences between experts and students while examining dental radiographs [<xref ref-type="bibr" rid="ref35">35</xref>]. In line with this approach, this study used eye-tracking technology to record pupil diameter changes from baseline. The results indicated that participants in the ChatGPT-assisted group exhibited smaller changes in pupil diameter, suggesting a reduction in cognitive load. This decrease can be attributed to ChatGPT’s ability to address challenging points in the skill-learning process. These findings not only validate the efficacy of ChatGPT in reducing cognitive load but also highlight its potential as a pedagogical tool that fosters a smoother and more relaxed learning experience.</p>
        <p>Emerging evidence indicates the varied performance of AI models in spatial relations–related topics. Previous studies evaluating ChatGPT on Geographic Information Systems examinations demonstrated its capacity to achieve passing scores in spatial analysis, spatial statistics, and interpolation tasks [<xref ref-type="bibr" rid="ref36">36</xref>]. Furthermore, generative AI systems including ChatGPT-3.5 have shown nascent potential in executing basic spatial queries [<xref ref-type="bibr" rid="ref37">37</xref>]. However, limitations persist, as evidenced by its suboptimal performance in robot programming scenarios requiring complex 3D spatial reasoning and nuanced understanding of spatial relationships [<xref ref-type="bibr" rid="ref38">38</xref>]. Nevertheless, there is no research on the topic of the spatial relations related topics between ChatGPT and medical skill learning, and this study provides a preliminary exploration of this component.</p>
        <p>Previous research has demonstrated a significant yet modest positive correlation between spatial ability measured by the Purdue Spatial Visualization Test: Rotations and performance in dental anatomy assessments [<xref ref-type="bibr" rid="ref39">39</xref>]. Furthermore, studies using other spatial ability assessments, such as the mental rotations test, visualization of views test, and visualization of rotation test, have consistently shown that students with higher spatial aptitude achieve superior performance in endodontics [<xref ref-type="bibr" rid="ref40">40</xref>], radiology [<xref ref-type="bibr" rid="ref41">41</xref>], anatomy [<xref ref-type="bibr" rid="ref42">42</xref>], and prosthodontics [<xref ref-type="bibr" rid="ref43">43</xref>]. The convergent validity of these findings across diverse spatial ability metrics and dental subdisciplines suggests that spatial ability may serve as a foundational competency in skill acquisition. This study delves into the impact of spatial ability on learning outcomes under different instructional modes, revealing the interaction between spatial ability and teaching methods. The findings indicate that ChatGPT-assisted instruction significantly enhances learning outcomes for learners with lower spatial ability. However, no significant difference was observed between the 2 instructional modes for learners with higher spatial ability. Cognitive load theory provides a framework for understanding these results. Given the limited capacity of working memory to process information simultaneously, learners with lower spatial ability in the control group experienced cognitive load beyond their cognitive resources [<xref ref-type="bibr" rid="ref20">20</xref>]. In contrast, learners with higher spatial ability could activate preconstructed schemas based on 2D images, thus reducing the demand on working memory [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. ChatGPT, by offering clear explanations and guidance, helps learners with lower spatial ability better comprehend complex skills or concepts, thereby alleviating cognitive load and improving their learning outcomes.</p>
        <p>Besides, the findings of this study indicate that participants in both learning modes experienced improvements in learning motivation and self-efficacy, with ChatGPT-assisted learning demonstrating a particularly pronounced capacity to facilitate these outcomes. Previous studies also support this view [<xref ref-type="bibr" rid="ref45">45</xref>]. A thematic analysis of interviews with higher education experts indicated that the personalized feedback and support provided by ChatGPT can assist students in setting and achieving goals, reflecting on their progress, and enhancing noncognitive skills such as motivation and self-efficacy [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>].</p>
        <p>As an auxiliary tool in medical education, ChatGPT is currently considered a double-edged sword by many scholars [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. On one hand, it may impede the development of students’ critical thinking and independent learning abilities and potentially encourage academic dishonesty [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. However, when applied to skill training, ChatGPT displays considerable promise. It is capable of providing tailored assistance and feedback in response to the learner’s progress, offering technical guidance and confirmation of procedural steps [<xref ref-type="bibr" rid="ref53">53</xref>]. This aids in the comprehension of complex skills, such as tooth preparation, periodontal scaling, and impacted tooth extraction [<xref ref-type="bibr" rid="ref7">7</xref>]. Furthermore, it diminishes cognitive load during the learning process, enhancing both learning motivation and self-efficacy. Therefore, dental educators must not choke on their knowledge but rather set a new standard for teaching methods and assessment to keep up with the times.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>First, it is important to recognize that ChatGPT has some inherent limitation types, although ChatGPT is trained on big data, there is a possibility that the training data may be inherently biased or there may be errors in the training process, which leads to the accuracy and reliability of the information provided will also be affected [<xref ref-type="bibr" rid="ref54">54</xref>]. During the skill learning process, individuals will have their distinctive queries, so for this study checking the accuracy of the answers to the ChatGPT responses was difficult. Second, this study was conducted using the free version of ChatGPT-3.5, and more research is needed to explore whether ChatGPT-4.0 would be more advantageous in aiding skill instruction [<xref ref-type="bibr" rid="ref55">55</xref>]. Besides, the study compares ChatGPT with a blank control group based on videos, and further research is required to investigate the detailed strengths and weaknesses of ChatGPT in comparison to other methods.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This study sought to ascertain whether the supplementary use of ChatGPT-3.5 is more effective for mastering basic oral skills operations among dental students compared with videos alone in terms of manipulative performance, cognitive load, spatial ability, and emotions. The study used desktop VR for skill testing and the results showed that students with the assistance of ChatGPT performed better. Eye-tracking technology was used to record the visual behaviors, and the results revealed that participants in the ChatGPT-3.5 group experienced reduced cognitive load. The interaction analysis highlighted learners with low spatial ability derived greater benefits from the ChatGPT than those with high spatial ability. In addition, the questionnaire revealed that learners in the ChatGPT group demonstrated higher levels of self-efficacy and learning motivation. In conclusion, the findings of this study contribute to recognizing the potential of ChatGPT in dental skills education.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>ChatGPT feedback and interactions.</p>
        <media xlink:href="jmir_v27i1e68538_app1.docx" xlink:title="DOCX File , 52 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Reference to aSee A6 eye-tracker.</p>
        <media xlink:href="jmir_v27i1e68538_app2.docx" xlink:title="DOCX File , 16 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Knowledge test.</p>
        <media xlink:href="jmir_v27i1e68538_app3.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>CONSORT eHEALTH checklist (V 1.6.1).</p>
        <media xlink:href="jmir_v27i1e68538_app4.pdf" xlink:title="PDF File  (Adobe PDF File), 50991 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CONSORT-EHEALTH</term>
          <def>
            <p>Consolidated Standards of Reporting Trials of Electronic and Mobile Health Applications and Online Telehealth</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study would not have been possible without the support of Wuhan University. This study was supported by Wuhan University Education Quality Building Project (2024ZG147), Natural Science Foundation of Hubei Province of China (2021CFB466), Medical Backbone Talents Foundation of Wuhan City of China (2020-55), Nursing Research Foundation of Wuhan University (030), Clinical Medicine Education Research Project of Hubei Provincial Health Commission (HBJG-220005), and Nursing study of Stomatology Hospital of Wuhan University.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The datasets generated or analyzed during the study are available from the corresponding author on reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>SY and CW contributed to conceptualization and formal analysis. XY performed data curation. DY, XX, and CW managed funding acquisition. SY, SH, and SN handled investigation. SY, CW, XY, and XX conducted methodology. SY, CW, and DY project administration and writing—review and editing. DY and XX conducted supervision. XY, SH, and SN validation. SY and CW writing—original draft.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ouyang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Integrating ChatGPT in orthopedic education for medical undergraduates: randomized controlled trial</article-title>
          <source>J Med Internet Res</source>
          <year>2024</year>
          <volume>26</volume>
          <fpage>e57037</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2024//e57037/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/57037</pub-id>
          <pub-id pub-id-type="medline">39163598</pub-id>
          <pub-id pub-id-type="pii">v26i1e57037</pub-id>
          <pub-id pub-id-type="pmcid">PMC11372336</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bagde</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Dhopte</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alam</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Basri</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A systematic review and meta-analysis on ChatGPT and its utilization in medical and dental research</article-title>
          <source>Heliyon</source>
          <year>2023</year>
          <volume>9</volume>
          <issue>12</issue>
          <fpage>e23050</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2405-8440(23)10258-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.heliyon.2023.e23050</pub-id>
          <pub-id pub-id-type="medline">38144348</pub-id>
          <pub-id pub-id-type="pii">S2405-8440(23)10258-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC10746423</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>EPH</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kwok</surname>
              <given-names>JYY</given-names>
            </name>
            <name name-style="western">
              <surname>Lok</surname>
              <given-names>KYW</given-names>
            </name>
          </person-group>
          <article-title>Chatting or cheating? The impacts of ChatGPT and other artificial intelligence language models on nurse education</article-title>
          <source>Nurse Educ Today</source>
          <year>2023</year>
          <volume>125</volume>
          <fpage>105796</fpage>
          <pub-id pub-id-type="doi">10.1016/j.nedt.2023.105796</pub-id>
          <pub-id pub-id-type="medline">36934624</pub-id>
          <pub-id pub-id-type="pii">S0260-6917(23)00090-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lubowitz</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT, an artificial intelligence chatbot, is impacting medical literature</article-title>
          <source>Arthroscopy</source>
          <year>2023</year>
          <volume>39</volume>
          <issue>5</issue>
          <fpage>1121</fpage>
          <lpage>1122</lpage>
          <pub-id pub-id-type="doi">10.1016/j.arthro.2023.01.015</pub-id>
          <pub-id pub-id-type="medline">36797148</pub-id>
          <pub-id pub-id-type="pii">S0749-8063(23)00033-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kitamura</surname>
              <given-names>FC</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT is shaping the future of medical writing but still requires human judgment</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <volume>307</volume>
          <issue>2</issue>
          <fpage>e230171</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.230171</pub-id>
          <pub-id pub-id-type="medline">36728749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lebhar</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Velazquez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Goza</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hoppe</surname>
              <given-names>IC</given-names>
            </name>
          </person-group>
          <article-title>Dr. ChatGPT: Utilizing artificial intelligence in surgical education</article-title>
          <source>Cleft Palate Craniofac J</source>
          <year>2024</year>
          <volume>61</volume>
          <issue>12</issue>
          <fpage>2067</fpage>
          <lpage>2073</lpage>
          <pub-id pub-id-type="doi">10.1177/10556656231193966</pub-id>
          <pub-id pub-id-type="medline">37545428</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karobari</surname>
              <given-names>MI</given-names>
            </name>
            <name name-style="western">
              <surname>Suryawanshi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Patil</surname>
              <given-names>SR</given-names>
            </name>
          </person-group>
          <article-title>Revolutionizing oral and maxillofacial surgery: ChatGPT's impact on decision support, patient communication, and continuing education</article-title>
          <source>Int J Surg</source>
          <year>2024</year>
          <volume>110</volume>
          <issue>6</issue>
          <fpage>3143</fpage>
          <lpage>3145</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38446838"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/JS9.0000000000001286</pub-id>
          <pub-id pub-id-type="medline">38446838</pub-id>
          <pub-id pub-id-type="pii">01279778-990000000-01147</pub-id>
          <pub-id pub-id-type="pmcid">PMC11175733</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arigbede</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Denloye</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Dosumu</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Use of simulators in operative dental education: experience in southern Nigeria</article-title>
          <source>Afr Health Sci</source>
          <year>2015</year>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>269</fpage>
          <lpage>277</lpage>
          <pub-id pub-id-type="doi">10.4314/ahs.v15i1.35</pub-id>
          <pub-id pub-id-type="medline">25834558</pub-id>
          <pub-id pub-id-type="pii">jAFHS.v15.i1.pg269</pub-id>
          <pub-id pub-id-type="pmcid">PMC4370126</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Effectiveness of case-based learning in Chinese dental education: a systematic review and meta-analysis</article-title>
          <source>BMJ Open</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>2</issue>
          <fpage>e048497</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmjopen.bmj.com/lookup/pmidlookup?view=long&amp;pmid=35190409"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjopen-2020-048497</pub-id>
          <pub-id pub-id-type="medline">35190409</pub-id>
          <pub-id pub-id-type="pii">bmjopen-2020-048497</pub-id>
          <pub-id pub-id-type="pmcid">PMC8860046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lv</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The current situation and future prospects of simulators in dental education</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>e23635</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/4/e23635/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/23635</pub-id>
          <pub-id pub-id-type="medline">33830059</pub-id>
          <pub-id pub-id-type="pii">v23i4e23635</pub-id>
          <pub-id pub-id-type="pmcid">PMC8063092</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reymus</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fotiadou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kessler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Heck</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hickel</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Diegritz</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>3D printed replicas for endodontic education</article-title>
          <source>Int Endod J</source>
          <year>2019</year>
          <volume>52</volume>
          <issue>1</issue>
          <fpage>123</fpage>
          <lpage>130</lpage>
          <pub-id pub-id-type="doi">10.1111/iej.12964</pub-id>
          <pub-id pub-id-type="medline">29900562</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>3D-printed coloured tooth model for inlay preparation in pre-clinical dental education</article-title>
          <source>Eur J Dent Educ</source>
          <year>2024</year>
          <volume>28</volume>
          <issue>2</issue>
          <fpage>481</fpage>
          <lpage>489</lpage>
          <pub-id pub-id-type="doi">10.1111/eje.12972</pub-id>
          <pub-id pub-id-type="medline">37994209</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Corrêa</surname>
              <given-names>CG</given-names>
            </name>
            <name name-style="western">
              <surname>de Andrade Moreira Machado</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Ranzini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Tori</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>de Lourdes Santos Nunes</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality simulator for dental anesthesia training in the inferior alveolar nerve block</article-title>
          <source>J Appl Oral Sci</source>
          <year>2017</year>
          <volume>25</volume>
          <issue>4</issue>
          <fpage>357</fpage>
          <lpage>366</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.scielo.br/scielo.php?script=sci_arttext&amp;pid=S1678-77572017000400357&amp;lng=en&amp;nrm=iso&amp;tlng=en"/>
          </comment>
          <pub-id pub-id-type="doi">10.1590/1678-7757-2016-0386</pub-id>
          <pub-id pub-id-type="medline">28877273</pub-id>
          <pub-id pub-id-type="pii">S1678-77572017000400357</pub-id>
          <pub-id pub-id-type="pmcid">PMC5595107</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bruno</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Wolff</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wernly</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Masyuk</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Piayda</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Leaver</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Erkens</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Oehler</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Afzal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Heidari</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kelm</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Virtual and augmented reality in critical care medicine: the patient's, clinician's, and researcher's perspective</article-title>
          <source>Crit Care</source>
          <year>2022</year>
          <volume>26</volume>
          <issue>1</issue>
          <fpage>326</fpage>
          <pub-id pub-id-type="doi">10.1186/s13054-022-04202-x</pub-id>
          <pub-id pub-id-type="medline">36284350</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13054-022-04202-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC9593998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Pu</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of large language models for literature screening in thoracic surgery: Diagnostic study</article-title>
          <source>J Med Internet Res</source>
          <year>2025</year>
          <volume>27</volume>
          <fpage>e67488</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2025//e67488/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/67488</pub-id>
          <pub-id pub-id-type="medline">40068152</pub-id>
          <pub-id pub-id-type="pii">v27i1e67488</pub-id>
          <pub-id pub-id-type="pmcid">PMC11937709</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Borg</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Georg</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jobs</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Huss</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Waldenlind</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ruiz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Edelbring</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Skantze</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Parodis</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Virtual patient simulations using social robotics combined with large language models for clinical reasoning training in medical education: mixed methods study</article-title>
          <source>J Med Internet Res</source>
          <year>2025</year>
          <volume>27</volume>
          <fpage>e63312</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2025//e63312/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/63312</pub-id>
          <pub-id pub-id-type="medline">40053778</pub-id>
          <pub-id pub-id-type="pii">v27i1e63312</pub-id>
          <pub-id pub-id-type="pmcid">PMC11914843</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Donoff</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Karimbux</surname>
              <given-names>NY</given-names>
            </name>
          </person-group>
          <article-title>An initial assessment of haptics in preclinical operative dentistry training</article-title>
          <source>J Investig Clin Dent</source>
          <year>2015</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>69</fpage>
          <lpage>76</lpage>
          <pub-id pub-id-type="doi">10.1111/jicd.12065</pub-id>
          <pub-id pub-id-type="medline">23946269</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wajngarten</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pazos</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Menegazzo</surname>
              <given-names>VP</given-names>
            </name>
            <name name-style="western">
              <surname>Novo</surname>
              <given-names>JPD</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>PPNS</given-names>
            </name>
          </person-group>
          <article-title>Magnification effect on fine motor skills of dental students</article-title>
          <source>PLoS One</source>
          <year>2021</year>
          <volume>16</volume>
          <issue>11</issue>
          <fpage>e0259768</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0259768"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0259768</pub-id>
          <pub-id pub-id-type="medline">34748614</pub-id>
          <pub-id pub-id-type="pii">PONE-D-21-02845</pub-id>
          <pub-id pub-id-type="pmcid">PMC8575251</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brunken</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Plass</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Leutner</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Direct measurement of cognitive load in multimedia learning</article-title>
          <source>Educ. Psychol</source>
          <year>2010</year>
          <volume>38</volume>
          <issue>1</issue>
          <fpage>53</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1207/s15326985ep3801_7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>E.A.-L.</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>KW</given-names>
            </name>
          </person-group>
          <article-title>Learning with desktop virtual reality: Low spatial ability learners are more positively affected</article-title>
          <source>Comput Educ</source>
          <year>2014</year>
          <volume>79</volume>
          <fpage>49</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1016/j.compedu.2014.07.010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Makransky</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lilleholt</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>A structural equation modeling investigation of the emotional value of immersive virtual reality in education</article-title>
          <source>Education Tech Research Dev</source>
          <year>2018</year>
          <volume>66</volume>
          <issue>5</issue>
          <fpage>1141</fpage>
          <lpage>1164</lpage>
          <pub-id pub-id-type="doi">10.1007/s11423-018-9581-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Albus</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Vogt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Seufert</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Signaling in virtual reality influences learning outcome and cognitive load</article-title>
          <source>Comput Educ</source>
          <year>2021</year>
          <volume>166</volume>
          <fpage>104154</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compedu.2021.104154</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Grierson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Norman</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Manipulation of cognitive load variables and impact on auscultation test performance</article-title>
          <source>Adv Health Sci Educ Theory Pract</source>
          <year>2015</year>
          <volume>20</volume>
          <issue>4</issue>
          <fpage>935</fpage>
          <lpage>952</lpage>
          <pub-id pub-id-type="doi">10.1007/s10459-014-9573-x</pub-id>
          <pub-id pub-id-type="medline">25430065</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10459-014-9573-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Suebnukarn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hataidechadusadee</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Suwannasri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Suprasert</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Rhienmora</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Haddawy</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Access cavity preparation training using haptic virtual reality and microcomputed tomography tooth models</article-title>
          <source>Int Endod J</source>
          <year>2011</year>
          <volume>44</volume>
          <issue>11</issue>
          <fpage>983</fpage>
          <lpage>989</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1365-2591.2011.01899.x</pub-id>
          <pub-id pub-id-type="medline">21623838</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sweller</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Mestre</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>BH</given-names>
            </name>
          </person-group>
          <article-title>Cognitive Load Theory</article-title>
          <source>Cognition in Education</source>
          <year>2011</year>
          <publisher-loc>Cambridge, MA</publisher-loc>
          <publisher-name>Academic Press</publisher-name>
          <fpage>37</fpage>
          <lpage>76</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Takhdat</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rebahi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rooney</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Ait Babram</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Benali</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Touzani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lamtali</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>El Adib</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>The impact of brief mindfulness meditation on anxiety, cognitive load, and teamwork in emergency simulation training: A randomized controlled trial</article-title>
          <source>Nurse Educ Today</source>
          <year>2024</year>
          <volume>132</volume>
          <fpage>106005</fpage>
          <pub-id pub-id-type="doi">10.1016/j.nedt.2023.106005</pub-id>
          <pub-id pub-id-type="medline">37944276</pub-id>
          <pub-id pub-id-type="pii">S0260-6917(23)00299-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Makransky</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Petersen</surname>
              <given-names>GB</given-names>
            </name>
          </person-group>
          <article-title>Investigating the process of learning with desktop virtual reality: A structural equation modeling approach</article-title>
          <source>Comput Educ</source>
          <year>2019</year>
          <volume>134</volume>
          <fpage>15</fpage>
          <lpage>30</lpage>
          <pub-id pub-id-type="doi">10.1016/j.compedu.2019.02.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bandura</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Self-efficacy: toward a unifying theory of behavioral change</article-title>
          <source>Psychol Rev</source>
          <year>1977</year>
          <volume>84</volume>
          <issue>2</issue>
          <fpage>191</fpage>
          <lpage>215</lpage>
          <pub-id pub-id-type="doi">10.1037//0033-295x.84.2.191</pub-id>
          <pub-id pub-id-type="medline">847061</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Anderman</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Gray</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Motivation and classroom learning</article-title>
          <source>Handbook of psychology, Volume 7, Educational psychology. 2nd ed</source>
          <year>2012</year>
          <publisher-loc>Hoboken, NJ</publisher-loc>
          <publisher-name>Wiley</publisher-name>
          <fpage>99</fpage>
          <lpage>116</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <article-title>From Prohibition to Regulation: Universities Explore the Boundaries of AI Use</article-title>
          <source>MyCOS Research Institute</source>
          <access-date>2025-03-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.sohu.com/a/853729904_222256">https://www.sohu.com/a/853729904_222256</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
            <collab>CONSORT-EHEALTH Group</collab>
          </person-group>
          <article-title>CONSORT-EHEALTH: improving and standardizing evaluation reports of Web-based and mobile health interventions</article-title>
          <source>J Med Internet Res</source>
          <year>2011</year>
          <volume>13</volume>
          <issue>4</issue>
          <fpage>e126</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2011/4/e126/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.1923</pub-id>
          <pub-id pub-id-type="medline">22209829</pub-id>
          <pub-id pub-id-type="pii">v13i4e126</pub-id>
          <pub-id pub-id-type="pmcid">PMC3278112</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Danesh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pazouki</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Danesh</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Danesh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vardar-Sengul</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in dental education: ChatGPT's performance on the periodontic in-service examination</article-title>
          <source>J Periodontol</source>
          <year>2024</year>
          <volume>95</volume>
          <issue>7</issue>
          <fpage>682</fpage>
          <lpage>687</lpage>
          <pub-id pub-id-type="doi">10.1002/JPER.23-0514</pub-id>
          <pub-id pub-id-type="medline">38197146</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Barhom</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tamimi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Duggal</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT-A double-edged sword for healthcare education? Implications for assessments of dental students</article-title>
          <source>Eur J Dent Educ</source>
          <year>2024</year>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>206</fpage>
          <lpage>211</lpage>
          <pub-id pub-id-type="doi">10.1111/eje.12937</pub-id>
          <pub-id pub-id-type="medline">37550893</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Okuhara</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Shirabe</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nishiie</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Okada</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kiuchi</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT across different versions in medical licensing examinations worldwide: systematic review and meta-analysis</article-title>
          <source>J Med Internet Res</source>
          <year>2024</year>
          <volume>26</volume>
          <fpage>e60807</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2024//e60807/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/60807</pub-id>
          <pub-id pub-id-type="medline">39052324</pub-id>
          <pub-id pub-id-type="pii">v26i1e60807</pub-id>
          <pub-id pub-id-type="pmcid">PMC11310649</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Castner</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Appel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Eder</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Richter</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Scheiter</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Keutel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hüttig</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Duchowski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kasneci</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Pupil diameter differentiates expertise in dental radiography visual search</article-title>
          <source>PLoS One</source>
          <year>2020</year>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>e0223941</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0223941"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0223941</pub-id>
          <pub-id pub-id-type="medline">32469952</pub-id>
          <pub-id pub-id-type="pii">PONE-D-19-27421</pub-id>
          <pub-id pub-id-type="pmcid">PMC7259659</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mooney</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Juhász</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Towards understanding the geospatial skills of ChatGPT: Taking a geographic information systems (GIS) exam</article-title>
          <year>2023</year>
          <conf-name>GeoAI '23: Proceedings of the 6th ACM SIGSPATIAL International Workshop on AI for Geographic Knowledge Discovery</conf-name>
          <conf-date>2023 November 20</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3615886.3627745</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Renshaw</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lourentzou</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Crawford</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Comparing the spatial querying capacity of large language models: OpenAI's ChatGPT and Google's gemini pro</article-title>
          <source>Prof. Geogr</source>
          <year>2025</year>
          <volume>77</volume>
          <issue>2</issue>
          <fpage>186</fpage>
          <lpage>198</lpage>
          <pub-id pub-id-type="doi">10.1080/00330124.2024.2434455</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sobo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mubarak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Baimagambetov</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Polatidis</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Evaluating LLMs for code generation in HRI: A comparative study of ChatGPT, Gemini, and Claude</article-title>
          <source>Appl. Artif. Intell</source>
          <year>2024</year>
          <volume>39</volume>
          <issue>1</issue>
          <fpage>2439610</fpage>
          <pub-id pub-id-type="doi">10.1080/08839514.2024.2439610</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sarilita</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lita</surname>
              <given-names>YA</given-names>
            </name>
            <name name-style="western">
              <surname>Firman</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkinson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Susilawati</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Saptarini</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Aripin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sjamsudin</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Spatial ability and anatomy learning performance among dental students</article-title>
          <source>Korean J Med Educ</source>
          <year>2022</year>
          <volume>34</volume>
          <issue>4</issue>
          <fpage>309</fpage>
          <lpage>318</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36464901"/>
          </comment>
          <pub-id pub-id-type="doi">10.3946/kjme.2022.239</pub-id>
          <pub-id pub-id-type="medline">36464901</pub-id>
          <pub-id pub-id-type="pii">kjme.2022.239</pub-id>
          <pub-id pub-id-type="pmcid">PMC9726233</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Collet</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Reitmann</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Valette</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hoyek</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Maurin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ducret</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Villat</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Santamaria</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Richert</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Spatial abilities and endodontic access cavity preparation: Implications for dental education</article-title>
          <source>Eur J Dent Educ</source>
          <year>2025</year>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1111/eje.13039</pub-id>
          <pub-id pub-id-type="medline">39312553</pub-id>
          <pub-id pub-id-type="pmcid">PMC11730116</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nilsson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hedman</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ahlqvist</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Visual-spatial ability and interpretation of three-dimensional information in radiographs</article-title>
          <source>Dentomaxillofac Radiol</source>
          <year>2007</year>
          <volume>36</volume>
          <issue>2</issue>
          <fpage>86</fpage>
          <lpage>91</lpage>
          <pub-id pub-id-type="doi">10.1259/dmfr/56593635</pub-id>
          <pub-id pub-id-type="medline">17403885</pub-id>
          <pub-id pub-id-type="pii">36/2/86</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gonzales</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Ferns</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vorstenbosch</surname>
              <given-names>MATM</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>CF</given-names>
            </name>
          </person-group>
          <article-title>Does spatial awareness training affect anatomy learning in medical students?</article-title>
          <source>Anat Sci Educ</source>
          <year>2020</year>
          <volume>13</volume>
          <issue>6</issue>
          <fpage>707</fpage>
          <lpage>720</lpage>
          <pub-id pub-id-type="doi">10.1002/ase.1949</pub-id>
          <pub-id pub-id-type="medline">32048478</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodacre</surname>
              <given-names>CJ</given-names>
            </name>
          </person-group>
          <article-title>Digital learning resources for prosthodontic education: the perspectives of a long-term dental educator regarding 4 key factors</article-title>
          <source>J Prosthodont</source>
          <year>2018</year>
          <volume>27</volume>
          <issue>9</issue>
          <fpage>791</fpage>
          <lpage>797</lpage>
          <pub-id pub-id-type="doi">10.1111/jopr.12987</pub-id>
          <pub-id pub-id-type="medline">30307085</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sweller</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ayres</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kalyuga</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Sweller</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ayres</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kalyuga</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The Expertise Reversal Effect</article-title>
          <source>Cognitive Load Theory</source>
          <year>2011</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>155</fpage>
          <lpage>170</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sauder</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tritsch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rajput</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Shoja</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Exploring generative artificial intelligence-assisted medical education: assessing case-based learning for medical students</article-title>
          <source>Cureus</source>
          <year>2024</year>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>e51961</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38333501"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.51961</pub-id>
          <pub-id pub-id-type="medline">38333501</pub-id>
          <pub-id pub-id-type="pmcid">PMC10852982</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Applying ChatGPT to tackle the side effects of personal learning environments from learner and learning perspective: An interview of experts in higher education</article-title>
          <source>PLoS One</source>
          <year>2024</year>
          <volume>19</volume>
          <issue>1</issue>
          <fpage>e0295646</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0295646"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0295646</pub-id>
          <pub-id pub-id-type="medline">38170691</pub-id>
          <pub-id pub-id-type="pii">PONE-D-23-08682</pub-id>
          <pub-id pub-id-type="pmcid">PMC10763943</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schiefele</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Krapp</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Winteler</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Renninger</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Hidi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krapp</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Interest as a predictor of academic achievement: A meta-analysis of research</article-title>
          <source>The role of interest in learning and development</source>
          <year>1992</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Psychology Press</publisher-name>
          <fpage>183</fpage>
          <lpage>212</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abd-Alrazaq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>AlSaad</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alhuwail</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Healy</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Latifi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aziz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Damseh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alabed Alrazak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Large language models in medical education: Opportunities, challenges, and future directions</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e48291</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48291/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48291</pub-id>
          <pub-id pub-id-type="medline">37261894</pub-id>
          <pub-id pub-id-type="pii">v9i1e48291</pub-id>
          <pub-id pub-id-type="pmcid">PMC10273039</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>The rise of ChatGPT: Exploring its potential in medical education</article-title>
          <source>Anat Sci Educ</source>
          <year>2024</year>
          <volume>17</volume>
          <issue>5</issue>
          <fpage>926</fpage>
          <lpage>931</lpage>
          <pub-id pub-id-type="doi">10.1002/ase.2270</pub-id>
          <pub-id pub-id-type="medline">36916887</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gödde</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nöhl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rupert</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Rimkus</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ehlers</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Breuckmann</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sellmann</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A SWOT (strengths, weaknesses, opportunities, and threats) analysis of ChatGPT in the medical literature: Concise review</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e49368</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e49368/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/49368</pub-id>
          <pub-id pub-id-type="medline">37865883</pub-id>
          <pub-id pub-id-type="pii">v25i1e49368</pub-id>
          <pub-id pub-id-type="pmcid">PMC10690535</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arif</surname>
              <given-names>TB</given-names>
            </name>
            <name name-style="western">
              <surname>Munaf</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Ul-Haque</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>The future of medical education and research: Is ChatGPT a blessing or blight in disguise?</article-title>
          <source>Med Educ Online</source>
          <year>2023</year>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>2181052</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.tandfonline.com/doi/10.1080/10872981.2023.2181052?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/10872981.2023.2181052</pub-id>
          <pub-id pub-id-type="medline">36809073</pub-id>
          <pub-id pub-id-type="pmcid">PMC9946299</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Safranek</surname>
              <given-names>CW</given-names>
            </name>
            <name name-style="western">
              <surname>Sidamon-Eristoff</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Gilson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chartash</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The role of large language models in medical education: Applications and implications</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e50945</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e50945/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/50945</pub-id>
          <pub-id pub-id-type="medline">37578830</pub-id>
          <pub-id pub-id-type="pii">v9i1e50945</pub-id>
          <pub-id pub-id-type="pmcid">PMC10463084</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sheng</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Assessing ChatGPT as a medical consultation assistant for chronic hepatitis B: Cross-language study of English and Chinese</article-title>
          <source>JMIR Med Inform</source>
          <year>2024</year>
          <volume>12</volume>
          <fpage>e56426</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2024//e56426/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/56426</pub-id>
          <pub-id pub-id-type="medline">39115930</pub-id>
          <pub-id pub-id-type="pii">v12i1e56426</pub-id>
          <pub-id pub-id-type="pmcid">PMC11342014</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shimizu</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kasai</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Shikino</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Araki</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Takahashi</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Onodera</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kimura</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Tsukamoto</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yamauchi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Asahina</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ito</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kawakami</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Developing medical education curriculum reform strategies to address the impact of generative AI: Qualitative study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e53466</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e53466/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/53466</pub-id>
          <pub-id pub-id-type="medline">38032695</pub-id>
          <pub-id pub-id-type="pii">v9i1e53466</pub-id>
          <pub-id pub-id-type="pmcid">PMC10722362</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Takagi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Watari</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Erabi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sakaguchi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Performance of GPT-3.5 and GPT-4 on the Japanese medical licensing examination: comparison study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e48002</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48002/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48002</pub-id>
          <pub-id pub-id-type="medline">37384388</pub-id>
          <pub-id pub-id-type="pii">v9i1e48002</pub-id>
          <pub-id pub-id-type="pmcid">PMC10365615</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
