<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="review-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v27i1e58966</article-id>
      <article-id pub-id-type="pmid">41252719</article-id>
      <article-id pub-id-type="doi">10.2196/58966</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Transforming Surgical Training With AI Techniques for Training, Assessment, and Evaluation: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Leung</surname>
            <given-names>Tiffany</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Yin</surname>
            <given-names>Rong</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Pojskic</surname>
            <given-names>Mirza</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Escobar-Castillejos</surname>
            <given-names>David</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Facultad de Ingeniería</institution>
            <institution>Universidad Panamericana</institution>
            <addr-line>Augusto Rodin 498</addr-line>
            <addr-line>Ciudad de México, 03920</addr-line>
            <country>Mexico</country>
            <phone>52 5545221827</phone>
            <email>descobarc@up.edu.mx</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6583-5274</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Barrera-Animas</surname>
            <given-names>Ari Y</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8533-125X</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Noguez</surname>
            <given-names>Julieta</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6000-3452</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Magana</surname>
            <given-names>Alejandra J</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6117-7502</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Benes</surname>
            <given-names>Bedrich</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5293-2112</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Facultad de Ingeniería</institution>
        <institution>Universidad Panamericana</institution>
        <addr-line>Ciudad de México</addr-line>
        <country>Mexico</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Facultad de Ingeniería</institution>
        <institution>Universidad Nacional Autónoma de México</institution>
        <addr-line>Ciudad de México</addr-line>
        <country>Mexico</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>School of Applied and Creative Computing and School of Engineering Education</institution>
        <institution>Purdue University</institution>
        <addr-line>West Lafayette, IN</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Computer Science</institution>
        <institution>Purdue University</institution>
        <addr-line>West Lafayette, IN</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: David Escobar-Castillejos <email>descobarc@up.edu.mx</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>18</day>
        <month>11</month>
        <year>2025</year>
      </pub-date>
      <volume>27</volume>
      <elocation-id>e58966</elocation-id>
      <history>
        <date date-type="received">
          <day>29</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>13</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>20</day>
          <month>10</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>23</day>
          <month>10</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©David Escobar-Castillejos, Ari Y Barrera-Animas, Julieta Noguez, Alejandra J Magana, Bedrich Benes. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 18.11.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2025/1/e58966" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) has introduced novel opportunities for assessment and evaluation in surgical training, offering potential improvements that could surpass traditional educational methods.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This scoping review examines the integration of AI in surgical training, assessment, and evaluation, aiming to determine how AI technologies can enhance trainees’ learning paths and performance by incorporating data-driven insights and predictive analytics. In addition, this review examines the current state and applications of AI algorithms in this field, identifying potential areas for future research.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Following the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) guidelines, the PubMed, Scopus, and Web of Science were searched for studies published between January 2020 and March 18, 2024. Eligibility criteria included English-language full-text articles that investigated the application of AI in surgical training, assessment, or evaluation; non-English texts, reviews, preprints, and studies not addressing AI in surgical education were excluded. After duplicate removal and screening, 56 studies were included in the analysis. Data were structured by categorizing studies according to surgical procedure, AI technique, and training setup. Results were synthesized narratively and summarized in frequency tables.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>From 1400 initial records, 56 studies met the inclusion criteria. Most were journal articles (84%, 47/56), with the remainder being conference papers (16%, 9/56). AI was most frequently applied in minimally invasive surgery (27%, 15/56), neurosurgery (20%, 11/56), and laparoscopy (16%, 9/56). Common techniques included machine learning (20%, 11/56), clustering (14%, 8/56), deep learning (11%, 6/56), convolutional neural networks (11%, 6/56), and support vector machines (11%, 6/56). Training setups were dominated by simulation platforms (33%, 19/56) and box trainers (24%, 13/56), followed by surgical video analysis (16%, 9/56), and robotic systems such as the da Vinci platform (13%, 7/56). Across studies, AI-enhanced training environments provided automated skill assessment, personalized feedback, and adaptive learning trajectories, with several reporting improvements in trainees’ learning curves and technical proficiency. However, heterogeneity in study design and outcome measures limited comparability, and algorithmic transparency was often lacking.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The application of AI in surgical training demonstrates the potential to enhance skill acquisition and support more efficient, personalized, and adaptive learning pathways. Despite encouraging findings, several limitations exist, including small sample sizes, the lack of standardized evaluation metrics, and insufficient external validation of AI models. Future studies should aim to clarify AI methodologies, improve reproducibility, and develop scalable, simulation-based solutions aligned with global education goals.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>technology-enhanced learning</kwd>
        <kwd>simulation-based training</kwd>
        <kwd>performance assessment</kwd>
        <kwd>medical training</kwd>
        <kwd>surgery</kwd>
        <kwd>higher education</kwd>
        <kwd>educational innovation</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Scientific advances have significantly influenced the evolution of education and training in recent decades. Emerging technologies such as technology-enhanced learning and simulation-based training have played a crucial role in improving the learning experience of practitioners and have become essential in modern education systems [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>Traditionally, surgical training has mainly focused on gaining experience through a significant number of surgeries and direct involvement, in which trainees receive less supervision from experienced surgeons as they gain competence and eventually become capable of doing surgeries independently [<xref ref-type="bibr" rid="ref2">2</xref>]. This model embodies the “see one, do one, teach one” approach [<xref ref-type="bibr" rid="ref3">3</xref>]. An experienced surgeon first executes a procedure, which the trainee observes. Then, under supervision, the trainee replicates the process. Finally, upon achieving competence, the trainee is expected to instruct others on how to perform it. This approach underscores the importance of direct observation, practical experience, and the ability to transmit information and expertise to future generations of medical practitioners. However, it also raises inquiries regarding the diversity of learning experiences, the consistency of the skills acquired, and the stress that it places on seasoned surgeons and trainees to quickly comprehend and transmit complex procedures involving inherent risks [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Acquiring and improving skills in the field of medicine are complex processes that last throughout a physician’s career. Since the 1990s, ongoing discussions have focused on enhancing teaching practices [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
      <p>Researchers have developed various simulators and training platforms to address these challenges and the demands of an expanding spectrum of surgical operations [<xref ref-type="bibr" rid="ref7">7</xref>]. These tools enable trainees to develop expertise in different surgical procedures and provide the benefit of unlimited practice opportunities, customizable difficulty levels, and cost-effective solutions that emulate the difficulties of actual surgery procedures [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Furthermore, these platforms offer a secure and interactive setting that promotes learning through experimentation, enabling risk-free practice. Nevertheless, there remains considerable potential to improve the effectiveness of these training setups [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>].</p>
      <p>As technological advancements continue, interest in incorporating artificial intelligence (AI) into medical training has also increased [<xref ref-type="bibr" rid="ref12">12</xref>]. AI, with its capacity to emulate certain aspects of human cognition, has the potential to enhance educational outcomes and transform traditional methods of training and teaching [<xref ref-type="bibr" rid="ref13">13</xref>]. It enables the creation of the next generation of autonomous systems to execute tasks usually performed by individuals, representing a substantial advancement in computer science. Furthermore, AI algorithms could assist in enhancing conceptual understanding, facilitating virtual practice, and offering analytical feedback on performance. Through the use of data-driven insights and predictive analytics, AI has the potential to revolutionize surgical training, offering customized and efficient learning pathways.</p>
      <p>This scoping review aims to map and analyze current applications of AI in surgical training, assessment, and evaluation, identifying the most common surgical procedures, AI techniques, and training setups while highlighting gaps and opportunities for future research. The following research questions guided this study:</p>
      <list list-type="order">
        <list-item>
          <p>What are the specific surgical procedures where AI algorithms are most frequently applied in surgical training?</p>
        </list-item>
        <list-item>
          <p>Which AI techniques have been used in surgical training and evaluation?</p>
        </list-item>
        <list-item>
          <p>How are AI techniques being used to assess and improve surgical training?</p>
        </list-item>
        <list-item>
          <p>How do AI applications in surgical training affect the learning curve of surgical residents and fellows?</p>
        </list-item>
      </list>
      <p>The paper is organized as follows: the “Methods” section outlines the methodology used to carry out this scoping review. The “Results” section provides a comprehensive overview of the findings, shows additional findings, and identifies potential areas for opportunity. The “Discussion” section presents an outline of the research questions, shows additional findings, identifies potential areas for opportunity, acknowledges the limits of the current review, and concludes with final thoughts and directions for future research in the realm of AI in surgical education.</p>
      <p>Although there are different definitions and approaches to what AI is, this study is particularly interested in Russell and Norvig’s [<xref ref-type="bibr" rid="ref14">14</xref>] approach to systems that act rationally, that is, systems that act intelligently and rationally, ideally in the best possible way given the available information. AI is a disruptive technology that is reshaping education, facilitating a shift toward more efficient teaching protocols [<xref ref-type="bibr" rid="ref15">15</xref>]. It enables machines to imitate various complex human skills, and AI-based techniques are typically employed in the following areas:</p>
      <list list-type="bullet">
        <list-item>
          <p>Expert systems “emulate the behavior of a human expert within a well-defined, narrow domain of knowledge” [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
        </list-item>
        <list-item>
          <p>Intelligent tutoring systems emulate “model learners’ psychological states to provide individualized instruction. They… help learners acquire domain-specific, cognitive, and metacognitive knowledge” [<xref ref-type="bibr" rid="ref17">17</xref>].</p>
        </list-item>
      </list>
      <p>AI can be subdivided into machine learning (ML), which further includes deep learning (DL). ML aims to “perform intelligent predictions based on a data set” [<xref ref-type="bibr" rid="ref18">18</xref>]. It uses statistical, data mining, and optimization methods to design models that can identify patterns and make predictions with higher precision than human experts. In this field, there are 3 fundamental ML paradigms:</p>
      <list list-type="bullet">
        <list-item>
          <p>Supervised learning uses input data and their matching labeled output to train models [<xref ref-type="bibr" rid="ref19">19</xref>]. A labeled output is data that has been assigned labels to add context; consequently, the objective of supervised learning is to learn and predict outputs for unseen data based on the initial input-output pairs.</p>
        </list-item>
        <list-item>
          <p>Unsupervised learning involves working with unlabeled data [<xref ref-type="bibr" rid="ref20">20</xref>]. The algorithms autonomously attempt to discern patterns and relationships within the data.</p>
        </list-item>
        <list-item>
          <p>Reinforcement learning uses an autonomous entity known as an agent, which learns to make decisions by performing activities inside an environment to reach a specific objective [<xref ref-type="bibr" rid="ref21">21</xref>]. The feedback the agent receives in the form of rewards or penalties serves as a guide as it iteratively refines its strategy to achieve optimal performance.</p>
        </list-item>
      </list>
      <p>Finally, DL is a branch of machine learning that uses artificial neural networks to replicate the sophisticated processes of the human brain [<xref ref-type="bibr" rid="ref22">22</xref>]. Algorithms in this category learn to identify patterns and comprehend large datasets. DL is highly efficient because it can automatically extract and learn high-level characteristics from data, reducing the need for manual feature selection. It excels at handling complex tasks such as image and audio recognition, natural language processing, image generation, and data-driven prediction.</p>
      <p>Numerous models have been developed within AI to address challenging problems and tasks across different sectors and research fields. Each approach provides certain advantages specific to the type of data to be processed and the analytical needs (see <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>).</p>
      <boxed-text id="box1" position="float">
        <title>Approaches and advantages specific to the type of data to be processed and analytical needs.</title>
        <list list-type="bullet">
          <list-item>
            <p>Regression analysis forecasts a continuous output by considering one or more predictor variables [<xref ref-type="bibr" rid="ref23">23</xref>].</p>
          </list-item>
          <list-item>
            <p>Cluster analysis methods group similar items based on shared characteristics. These algorithms help identify patterns within the data [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
          </list-item>
          <list-item>
            <p>Support vector machine (SVM) categorizes data by identifying the optimal boundary that divides distinct groups [<xref ref-type="bibr" rid="ref25">25</xref>].</p>
          </list-item>
          <list-item>
            <p>Decision trees analyze data by using a series of questions and rules, resulting in the generation of predictions or classifications [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
          </list-item>
          <list-item>
            <p>Random forest (RF) uses a set of decision trees to enhance predictive precision and mitigate overfitting, a phenomenon in which predictions are accurate for training data but not for new data [<xref ref-type="bibr" rid="ref27">27</xref>].</p>
          </list-item>
          <list-item>
            <p>Bayesian networks model the relationships and dependencies among variables using probability theory [<xref ref-type="bibr" rid="ref28">28</xref>]. They are represented through a directed acyclic graph. This approach facilitates the prediction of outcomes based on established conditions.</p>
          </list-item>
          <list-item>
            <p>Markov models represent the transitions between states in a system using probabilities [<xref ref-type="bibr" rid="ref29">29</xref>]. They are characterized by the Markov property, where the future state depends only on the current state and not on the sequence of events that preceded it.</p>
          </list-item>
          <list-item>
            <p>Fuzzy systems are based on fuzzy logic, which extends classical Boolean logic to handle the concept of partial truth, where truth values can range between completely true and completely false [<xref ref-type="bibr" rid="ref30">30</xref>].</p>
          </list-item>
          <list-item>
            <p>Neural networks (NNs) are inspired by the human brain. They rely on interconnected nodes to process data and detect connections [<xref ref-type="bibr" rid="ref31">31</xref>]. This model can be subdivided based on its specific use.</p>
            <list list-type="bullet">
              <list-item>
                <p>Convolutional neural networks (CNNs) process data that displays a grid-like structure, such as images [<xref ref-type="bibr" rid="ref32">32</xref>].</p>
              </list-item>
              <list-item>
                <p>Recurrent neural networks (RNNs) predict sequences [<xref ref-type="bibr" rid="ref33">33</xref>]. They use their internal state (memory) to process sequences of inputs, such as language or time series data.</p>
              </list-item>
              <list-item>
                <p>Long short-term memory (LSTM) networks are a type of RNN that can learn long-term dependencies [<xref ref-type="bibr" rid="ref34">34</xref>]. They are ideal for activities that require comprehension of long sequences.</p>
              </list-item>
              <list-item>
                <p>Deep neural networks (DNNs) consist of multiple interconnected layers of neurons [<xref ref-type="bibr" rid="ref35">35</xref>]. These networks can learn from extensive amounts of data and detect complex patterns.</p>
              </list-item>
              <list-item>
                <p>Transformers are a type of network that relies on self-attention mechanisms, allowing it to weigh the importance of different parts of the input data [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
              </list-item>
              <list-item>
                <p>Large language models (LLMs) are advanced types of networks that have been trained on vast datasets of words and sentences [<xref ref-type="bibr" rid="ref37">37</xref>]. They produce coherent, human-like responses to written text by selecting the most probable next words.</p>
              </list-item>
            </list>
          </list-item>
        </list>
      </boxed-text>
      <p>These AI models highlight the potential of this technology in educational contexts. The United Nations Educational, Scientific and Cultural Organization indicates that digital technologies have the potential to complement, enrich, and transform education, aligning with the United Nations’ Sustainable Development Goal 4 (SDG 4) for education and providing universal access to learning [<xref ref-type="bibr" rid="ref38">38</xref>]. Consequently, the integration of AI in surgical training could boost independence, self-study, engagement, and motivation.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>This review adheres to the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews; see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) statement, designed for publications in the health and medical sciences [<xref ref-type="bibr" rid="ref39">39</xref>]. The review process was organized following a structured protocol consisting of four stages: (1) planning, which involved establishing the criteria for the search and databases to be used; (2) conducting, which entailed performing the search and applying filters for the scoping review; (3) reporting, which included compiling the studies that met the criteria and were included in the review. During stages 1 and 2, the research papers were compiled, and the initial screening process was conducted, focusing solely on papers that fall within the scope of the review and were published in peer-reviewed scientific journals. Stage 3 consists of identifying the main characteristics that distinguish the contributions and unique features of each article that has passed the initial screening process. Subsequently, the necessary analysis was performed to present the summary of the research and compile tables and figures. The starting date for stages 1 and 2 of the scoping review was February 27, 2024, and it concluded on March 18, 2024.</p>
      </sec>
      <sec>
        <title>Information Sources</title>
        <p>A total of 3 databases were selected to search for relevant studies: PubMed, Scopus, and Web of Science. The inclusion of Web of Science and Scopus databases consolidates information from other sources, such as IEEE Xplore, ScienceDirect, and SpringerLink. Therefore, they expand the scope of accessible academic literature. These platforms also provide search and analytical tools, making it easier to find pertinent studies and analyze trends. By using the 3 databases, the review considered articles with different AI models beyond the limitation of just focusing on clinical trials. By implementing this procedure, the scope of the review is expanded, enabling the identification of significant manuscripts to identify areas of opportunity in the field.</p>
      </sec>
      <sec>
        <title>Search Strategy</title>
        <p>A total of 4 keywords related to AI concepts and 4 keywords related to surgical training were selected based on the research questions. The selected keywords were converted into search strings and processed to be compatible with the advanced search tool of each database. <xref ref-type="table" rid="table1">Table 1</xref> shows the search strings used in this scoping review.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Search strings used in the advanced search tools of PubMed, Web of Science, and Scopus.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="120"/>
            <col width="880"/>
            <thead>
              <tr valign="top">
                <td>Database</td>
                <td>String of keywords</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>PubMed</td>
                <td>(“Artificial Intelligence”[MeSH] OR “AI” OR “machine learning” OR “deep learning”) AND (“Surgical Training” OR “surgical education” OR “surgical assessment” OR “surgical evaluation”)</td>
              </tr>
              <tr valign="top">
                <td>Web of Science</td>
                <td>TS = ((“artificial intelligence” OR “AI” OR “machine learning” OR “deep learning”) AND (“surgical training” OR “surgical education” OR “surgical assessment” OR “surgical evaluation”))</td>
              </tr>
              <tr valign="top">
                <td>Scopus</td>
                <td>(TITLE-ABS-KEY(“artificial intelligence” OR “AI” OR “machine learning” OR “deep learning”) AND TITLE-ABS-KEY (“surgical training” OR “surgical education” OR “surgical assessment” OR “surgical evaluation”))</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Eligibility Criteria</title>
        <p>Records retrieved from the initial search were examined to verify their compliance with the eligibility criteria and their alignment with the research questions (<xref ref-type="boxed-text" rid="box2">Textbox 2</xref>).</p>
        <boxed-text id="box2" position="float">
          <title>Eligibility criteria.</title>
          <p>The inclusion criteria for this review were limited to:</p>
          <list list-type="bullet">
            <list-item>
              <p>Studies published from January 2020 to March 2024 were reviewed to ensure the review covers the most recent advancements in artificial intelligence (AI) applications in surgical training.</p>
            </list-item>
            <list-item>
              <p>Full-text articles available in English to allow thorough review and analysis.</p>
            </list-item>
            <list-item>
              <p>Studies that focus on the application of AI in surgical training and evaluation, aligning with the research questions.</p>
            </list-item>
          </list>
          <p>For the exclusion criteria, this review applied the following criteria:</p>
          <list list-type="bullet">
            <list-item>
              <p>Studies not centered on the application of AI to assess or evaluate surgical training.</p>
            </list-item>
            <list-item>
              <p>Nonscientific journal publications, non–full-text articles available online, and preprints.</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Data Charting and Synthesis</title>
        <p>After the inclusion and exclusion criteria had been applied during screening, data were charted for each included study covering three dimensions: (1) the surgical procedure (eg, laparoscopy, minimally invasive surgery, neurosurgery, and arthroscopy), (2) the AI model (eg, support vector machine [SVM], convolutional neural network [CNN], deep neural network [DNN], long short-term memory [LSTM], and transformers), and (3) the training setup (eg, simulation platforms, box trainers, surgical video analysis, in-vivo settings, virtual reality, and da Vinci system). These variables structured the subsequent evidence synthesis and guided the organization of results by procedure, technique, and setup. In addition, bibliographic fields, including year of publication and type of publication, were also charted to support descriptive reporting in the Results section. This structured approach enabled a descriptive and narrative synthesis aimed at elucidating how AI contributes to educational outcomes and skill acquisition in surgical training.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Search Results and Study Selection</title>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref> presents the PRISMA-ScR flow diagram illustrating the complete selection process. The initial search identified 1400 records: 545 from PubMed, 288 from Web of Science, and 567 from Scopus, obtained using the search strings described in <xref ref-type="table" rid="table1">Table 1</xref>. After applying the publication date range from January 2020 to March 2024, a total of 461 records were excluded, leaving 939 for further screening. Duplicate removal eliminated 363 records, yielding 576 unique studies.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Flow diagram of the scoping review process, illustrating the inclusion and exclusion criteria. AI: artificial intelligence; LLM: large language model.</p>
          </caption>
          <graphic xlink:href="jmir_v27i1e58966_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Subsequent filtering was conducted in stages to ensure methodological rigor and relevance. Database parameters were adjusted to retain only peer-reviewed journal articles and conference proceedings, excluding 260 reviews and 36 editorials that did not meet the inclusion criteria. A total of 280 records proceeded to qualitative screening. During this stage, the relevance of each article to the review objectives was reassessed. This process excluded 76 studies that, despite meeting database filters, were secondary reviews, surveys, or editorials; 9 non-English papers, 7 papers focused on nonsurgical training, and 18 papers described simulator development or validation without AI integration. Additional exclusions comprised 1 duplicate, 3 studies addressing “Data Collection Systems,” 11 centered on “LLMs in Non-Surgical Education,” and 99 that did not provide sufficient information about AI-enhanced surgical training. This filtering process excluded 224 additional studies, leaving 56 studies for the final synthesis and analysis.</p>
        <p>The characteristics of the 56 included studies are summarized in <xref ref-type="table" rid="table2">Table 2</xref>, organized across five domains: (1) surgical procedure (eg, laparoscopy, minimally invasive surgery [MIS], neurosurgery, and arthroscopy), (2) year of publication, (3) type of publication, (4) AI technique or model used (eg, SVM, CNN, DNN, LSTM, and transformers), and (5) training setup (eg, simulation platforms, box trainers, da Vinci system, surgical video analysis, and in vivo or virtual-reality environments). This structure enables direct comparison across specialties and methodological approaches, while supporting a descriptive and narrative synthesis of cross-cutting trends.</p>
        <p>Across the included studies, MIS, neurosurgery, and laparoscopy represented the majority of AI applications. ML and DL techniques were the most frequently used computational approaches, while simulation environments and box trainers constituted the primary training configurations. Collectively, these trends indicate a primary emphasis on risk-managed training environments that leverage accessible kinematic and video data. However, heterogeneity in studies and limited standardization of outcome measures remain persistent challenges, underscoring the need for unified evaluation frameworks in the future.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Characteristics of included studies: surgical procedures, artificial intelligence (AI) techniques, and training setups.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="290"/>
            <col width="0"/>
            <col width="100"/>
            <col width="0"/>
            <col width="150"/>
            <col width="0"/>
            <col width="230"/>
            <col width="0"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Classification and references</td>
                <td colspan="2">Year</td>
                <td colspan="2">Type</td>
                <td colspan="2">AI model</td>
                <td>Setup</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="10">
                  <bold>MIS<sup>a</sup> skills</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Rashidi et al [<xref ref-type="bibr" rid="ref40">40</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Fuzzy systems</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fathabadi et al [<xref ref-type="bibr" rid="ref41">41</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Conference</td>
                <td colspan="2">Fuzzy systems</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Deng et al [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Conference</td>
                <td colspan="2">CNN<sup>b</sup></td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kulkarni et al [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Wu et al [<xref ref-type="bibr" rid="ref44">44</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML<sup>c</sup> (unspecified)</td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Brown and Kuchenbecker [<xref ref-type="bibr" rid="ref45">45</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Regression analysis</td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Keles et al [<xref ref-type="bibr" rid="ref46">46</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Koskinen et al [<xref ref-type="bibr" rid="ref47">47</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM<sup>d</sup></td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kasa et al [<xref ref-type="bibr" rid="ref48">48</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DL<sup>e</sup> (unspecified)</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Gao et al [<xref ref-type="bibr" rid="ref49">49</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Baghdadi et al [<xref ref-type="bibr" rid="ref50">50</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Benmansour et al [<xref ref-type="bibr" rid="ref51">51</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN<sup>f</sup>+LSTM<sup>g</sup></td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yanik et al [<xref ref-type="bibr" rid="ref52">52</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Lee et al [<xref ref-type="bibr" rid="ref53">53</xref>]</td>
                <td colspan="2">2024</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Markov chains</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hung et al [<xref ref-type="bibr" rid="ref54">54</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN+LSTM</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Neurosurgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ledwos et al [<xref ref-type="bibr" rid="ref55">55</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mirchi et al [<xref ref-type="bibr" rid="ref56">56</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yilmaz et al [<xref ref-type="bibr" rid="ref57">57</xref>]</td>
                <td colspan="2">2024</td>
                <td colspan="2">Journal</td>
                <td colspan="2">AI (unspecified)</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Siyar et al [<xref ref-type="bibr" rid="ref58">58</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reich et al [<xref ref-type="bibr" rid="ref59">59</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">NN<sup>h</sup></td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Natheir et al [<xref ref-type="bibr" rid="ref60">60</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Siyar et al [<xref ref-type="bibr" rid="ref61">61</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yilmaz et al [<xref ref-type="bibr" rid="ref62">62</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DNN<sup>i</sup></td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fazlollahi et al [<xref ref-type="bibr" rid="ref63">63</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Tutoring system (unspecified)</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Du et al [<xref ref-type="bibr" rid="ref64">64</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dhanakshirur et al [<xref ref-type="bibr" rid="ref65">65</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Conference</td>
                <td colspan="2">CNN</td>
                <td colspan="2">Training station</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Laparoscopy</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kuo et al [<xref ref-type="bibr" rid="ref66">66</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DL (unspecified)</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Shafiei et al [<xref ref-type="bibr" rid="ref67">67</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Lavanchy et al [<xref ref-type="bibr" rid="ref68">68</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN</td>
                <td colspan="2">In-vivo setting</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ryder et al [<xref ref-type="bibr" rid="ref69">69</xref>]</td>
                <td colspan="2">2024</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">In-vivo setting</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Halperin et al [<xref ref-type="bibr" rid="ref70">70</xref>]</td>
                <td colspan="2">2024</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DL (unspecified)</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ebina et al [<xref ref-type="bibr" rid="ref71">71</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM</td>
                <td colspan="2">Box trainer</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hamilton et al [<xref ref-type="bibr" rid="ref72">72</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">AI (unspecified)</td>
                <td colspan="2">Training station</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Adrales et al [<xref ref-type="bibr" rid="ref73">73</xref>]</td>
                <td colspan="2">2024</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Wang et al [<xref ref-type="bibr" rid="ref74">74</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Conference</td>
                <td colspan="2">AI (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Arthroscopy</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Mirchi et al [<xref ref-type="bibr" rid="ref75">75</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">NN</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Alkadri et al [<xref ref-type="bibr" rid="ref76">76</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">NN</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Shedage et al [<xref ref-type="bibr" rid="ref77">77</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Conference</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Ophthalmology</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Tabuchi et al [<xref ref-type="bibr" rid="ref78">78</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">AI (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Wang et al [<xref ref-type="bibr" rid="ref79">79</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DNN</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dong et al [<xref ref-type="bibr" rid="ref80">80</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Robotic-assisted surgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Simmonds et al [<xref ref-type="bibr" rid="ref81">81</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Clustering</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kocielnik et al [<xref ref-type="bibr" rid="ref82">82</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Conference</td>
                <td colspan="2">DL (unspecified)</td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Wang et al [<xref ref-type="bibr" rid="ref83">83</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Bayesian network</td>
                <td colspan="2">da Vinci system</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Open surgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Bkheet et al [<xref ref-type="bibr" rid="ref84">84</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">DL (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kadkhodamohammadi et al [<xref ref-type="bibr" rid="ref85">85</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Surgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Papagiannakis et al [<xref ref-type="bibr" rid="ref86">86</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Conference</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Thanawala et al [<xref ref-type="bibr" rid="ref87">87</xref>]</td>
                <td colspan="2">2022</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Case logs</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Surgery skills</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sung et al [<xref ref-type="bibr" rid="ref88">88</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">CNN</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Khan et al [<xref ref-type="bibr" rid="ref89">89</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Motion data</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Otolaryngology</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Lamtara et al [<xref ref-type="bibr" rid="ref90">90</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Conference</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Orthopedics</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sun et al [<xref ref-type="bibr" rid="ref91">91</xref>]</td>
                <td colspan="2">2021</td>
                <td colspan="2">Journal</td>
                <td colspan="2">ML (unspecified)</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Plastic surgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kim et al [<xref ref-type="bibr" rid="ref92">92</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Conference</td>
                <td colspan="2">DL (unspecified)</td>
                <td colspan="2">Medical images</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Radiology</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Saricilar et al [<xref ref-type="bibr" rid="ref93">93</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">NN</td>
                <td colspan="2">Simulation training</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Urology</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Kiyasseh et al [<xref ref-type="bibr" rid="ref94">94</xref>]</td>
                <td colspan="2">2023</td>
                <td colspan="2">Journal</td>
                <td colspan="2">Transformer</td>
                <td colspan="2">Surgical video</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Vascular surgery</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Guo et al [<xref ref-type="bibr" rid="ref95">95</xref>]</td>
                <td colspan="2">2020</td>
                <td colspan="2">Journal</td>
                <td colspan="2">SVM+RF<sup>j</sup></td>
                <td colspan="2">Slave controller</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>MIS: minimally invasive surgery.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>ML: machine learning.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table2fn5">
              <p><sup>e</sup>DL: deep learning.</p>
            </fn>
            <fn id="table2fn6">
              <p><sup>f</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table2fn7">
              <p><sup>g</sup>LSTM: long short-term memory.</p>
            </fn>
            <fn id="table2fn8">
              <p><sup>h</sup>NN: neural network.</p>
            </fn>
            <fn id="table2fn9">
              <p><sup>i</sup>DNN: deep neural network.</p>
            </fn>
            <fn id="table2fn10">
              <p><sup>j</sup>RF: random forest.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Findings and Interpretation</title>
        <sec>
          <title>Specific Surgical Procedures</title>
          <p>The scoping review reveals the range of surgical procedures where AI algorithms are being used (see <xref ref-type="table" rid="table3">Table 3</xref>). The analysis emphasizes the integration of AI in MIS skills (27%, 15/56) [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref54">54</xref>], neurosurgery (20%, 11/56) [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref65">65</xref>], and laparoscopy (16%, 9/56) [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref74">74</xref>] (see <xref ref-type="table" rid="table3">Table 3</xref>). Moderate representation was observed in arthroscopy (5%, 3/56) [<xref ref-type="bibr" rid="ref75">75</xref>-<xref ref-type="bibr" rid="ref77">77</xref>], ophthalmology (5%, 3/56) [<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>], and robot-assisted surgery (5%,3/56) [<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref83">83</xref>]. Several other domains appeared less frequently, including open surgery (4%, 2/56) [<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>], general surgery (4%, 2/56) [<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>], and surgery skills (4%, 2/56) [<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]. Finally, isolated studies were identified in otolaryngology (2%, 1/56) [<xref ref-type="bibr" rid="ref90">90</xref>], orthopedics (2%, 1/56) [<xref ref-type="bibr" rid="ref91">91</xref>], plastic surgery (2%, 1/56) [<xref ref-type="bibr" rid="ref92">92</xref>], radiology (2%, 1/56) [<xref ref-type="bibr" rid="ref93">93</xref>], urology (2%, 1/56) [<xref ref-type="bibr" rid="ref94">94</xref>], and vascular surgery (2%, 1/56) [<xref ref-type="bibr" rid="ref95">95</xref>].</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Frequency of medical fields in the included articles (N=56).</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="500"/>
              <col width="500"/>
              <thead>
                <tr valign="top">
                  <td>Specialty</td>
                  <td>Included articles, n (%)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>MIS<sup>a</sup> skills</td>
                  <td>15 (27)</td>
                </tr>
                <tr valign="top">
                  <td>Neurosurgery</td>
                  <td>11 (20)</td>
                </tr>
                <tr valign="top">
                  <td>Laparoscopy</td>
                  <td>9 (16)</td>
                </tr>
                <tr valign="top">
                  <td>Arthroscopy</td>
                  <td>3 (5)</td>
                </tr>
                <tr valign="top">
                  <td>Ophthalmology</td>
                  <td>3 (5)</td>
                </tr>
                <tr valign="top">
                  <td>Robot-assisted surgery</td>
                  <td>3 (5)</td>
                </tr>
                <tr valign="top">
                  <td>Open surgery</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Surgery</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Surgery skills</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Otolaryngology</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Orthopedy</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Plastic surgery</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Radiology</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Urology</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Vascular surgery</td>
                  <td>1 (2)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table3fn1">
                <p><sup>a</sup>MIS: minimally invasive surgery.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <p>Functionally, most studies focused on automated skill assessment and learning-curve analysis, while comparatively few examined procedure guidance, workflow recognition, or decision support. This trend was especially evident in MIS and laparoscopy, which relied heavily on video-centric datasets and computer-vision models [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref74">74</xref>], and in neurosurgery, where virtual reality simulators provided standardized training environments and feedback mechanisms [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref65">65</xref>]. The specialty distribution appears to be driven by the availability of high-quality labeled data. Overall, the distribution of specialties indicates that AI integration aligns strongly with domains that generate structured, labeled, and reproducible data, such as endoscopic or robotic procedures. By contrast, open and specialty surgeries remain underrepresented, constrained by the limited standardization of datasets and variability in operative workflows. Future progress will depend on developing shared, procedure-specific repositories, cross-institutional benchmarks, and multimodal data capture beyond video and kinematic streams to enhance generalizability and educational impact [<xref ref-type="bibr" rid="ref84">84</xref>-<xref ref-type="bibr" rid="ref95">95</xref>].</p>
        </sec>
        <sec>
          <title>AI Techniques Used</title>
          <p>The scoping review identified a diverse set of AI techniques in surgical training (see <xref ref-type="table" rid="table4">Table 4</xref>). The most frequent were ML (unspecified; 21%, 12/56) [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref91">91</xref>], clustering (13%, 7/56) [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref81">81</xref>], and CNNs (11%, 6/56) [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>]. We also observed DL (unspecified; 11%, 6/56) [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref92">92</xref>] and SVMs (9%, 5/56) [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref71">71</xref>], followed by neural networks (NNs; 7%, 4/56) [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref93">93</xref>] and AI (unspecified; 7%; 4/56) [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]. Additional categories included CNN+LSTM (4%, 2/56) [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>], DNNs (4%, 2/56) [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref79">79</xref>], and fuzzy systems (4%, 2/56) [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Single-study categories (2%, 1/56) included regression analysis [<xref ref-type="bibr" rid="ref45">45</xref>], Markov chains [<xref ref-type="bibr" rid="ref53">53</xref>], tutoring system (unspecified) [<xref ref-type="bibr" rid="ref63">63</xref>], Bayesian network [<xref ref-type="bibr" rid="ref83">83</xref>], transformer [<xref ref-type="bibr" rid="ref94">94</xref>], and SVM+RF [<xref ref-type="bibr" rid="ref95">95</xref>].</p>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>Application of artificial intelligence (AI) techniques in the included articles (N=56).</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="500"/>
              <col width="500"/>
              <thead>
                <tr valign="top">
                  <td>AI technique</td>
                  <td>Included articles, n (%)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>ML<sup>a</sup> (unspecified)</td>
                  <td>12 (21)</td>
                </tr>
                <tr valign="top">
                  <td>Clustering</td>
                  <td>7 (13)</td>
                </tr>
                <tr valign="top">
                  <td>CNNs<sup>b</sup></td>
                  <td>6 (11)</td>
                </tr>
                <tr valign="top">
                  <td>DL<sup>c</sup> (unspecified)</td>
                  <td>6 (11)</td>
                </tr>
                <tr valign="top">
                  <td>SVMs<sup>d</sup></td>
                  <td>5 (9)</td>
                </tr>
                <tr valign="top">
                  <td>NNs<sup>e</sup></td>
                  <td>4 (7)</td>
                </tr>
                <tr valign="top">
                  <td>AI (unspecified)</td>
                  <td>4 (7)</td>
                </tr>
                <tr valign="top">
                  <td>CNN+LSTM<sup>f</sup></td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>DNNs<sup>g</sup></td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Fuzzy systems</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Regression analysis</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Markov chains</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Tutoring system (unspecified)</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Bayesian network</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>SVM+RF<sup>h</sup></td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Transformer</td>
                  <td>1 (2)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table4fn1">
                <p><sup>a</sup>ML: machine learning.</p>
              </fn>
              <fn id="table4fn2">
                <p><sup>b</sup>CNN: convolutional neural network.</p>
              </fn>
              <fn id="table4fn3">
                <p><sup>c</sup>DL: deep learning.</p>
              </fn>
              <fn id="table4fn4">
                <p><sup>d</sup>SVM: support vector machine.</p>
              </fn>
              <fn id="table4fn5">
                <p><sup>e</sup>NN: neural network.</p>
              </fn>
              <fn id="table4fn6">
                <p><sup>f</sup>LSTM: long short-term memory.</p>
              </fn>
              <fn id="table4fn7">
                <p><sup>g</sup>DNN: deep neural network.</p>
              </fn>
              <fn id="table4fn8">
                <p><sup>h</sup>RF: random forest.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <p>From 2020 to 2024 (see <xref ref-type="table" rid="table5">Table 5</xref>), ML (unspecified) appears every year, CNNs strengthen in 2021 and 2023, and DL (unspecified) is present in 2020 and 2022-2024. Sequential and hybrid models (CNN+LSTM and DNNs) clusters in 2022-2023. AI (unspecified) emerges from 2022 onward. Probabilistic and rule-based approaches (Bayesian networks, fuzzy systems, and Markov chains) and transformer/SVM+RF appear as single-study categories. Overall, the technique mix tracks data modality and availability (video and kinematics), reinforcing the need for shared multimodal repositories and standardized evaluation metrics to compare methods fairly and improve external validity.</p>
          <table-wrap position="float" id="table5">
            <label>Table 5</label>
            <caption>
              <p>Temporal distribution of artificial intelligence (AI) models in the included articles (2020-2024).</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="350"/>
              <col width="100"/>
              <col width="100"/>
              <col width="100"/>
              <col width="100"/>
              <col width="100"/>
              <col width="150"/>
              <thead>
                <tr valign="top">
                  <td>AI model</td>
                  <td>2020, n (%)</td>
                  <td>2021, n (%)</td>
                  <td>2022, n (%)</td>
                  <td>2023, n (%)</td>
                  <td>2024, n (%)</td>
                  <td>Total, n (%)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>ML<sup>a</sup> (unspecified)</td>
                  <td>2 (17)</td>
                  <td>5 (42)</td>
                  <td>1 (8)</td>
                  <td>2 (17)</td>
                  <td>2 (17)</td>
                  <td>12 (100)</td>
                </tr>
                <tr valign="top">
                  <td>CNN<sup>b</sup></td>
                  <td>1 (17)</td>
                  <td>3 (50)</td>
                  <td>0 (0)</td>
                  <td>2 (33)</td>
                  <td>0 (0)</td>
                  <td>6 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Clustering</td>
                  <td>3 (43)</td>
                  <td>2 (28)</td>
                  <td>1 (14)</td>
                  <td>1 (14)</td>
                  <td>0 (0)</td>
                  <td>7 (100)</td>
                </tr>
                <tr valign="top">
                  <td>SVM<sup>c</sup></td>
                  <td>3 (60)</td>
                  <td>0 (0)</td>
                  <td>1 (20)</td>
                  <td>1 (20)</td>
                  <td>0 (0)</td>
                  <td>5 (100)</td>
                </tr>
                <tr valign="top">
                  <td>DL<sup>d</sup> (unspecified)</td>
                  <td>1 (17)</td>
                  <td>0 (0)</td>
                  <td>2 (33)</td>
                  <td>2 (33)</td>
                  <td>1 (17)</td>
                  <td>6 (100)</td>
                </tr>
                <tr valign="top">
                  <td>NN<sup>e</sup></td>
                  <td>1 (25)</td>
                  <td>1 (25)</td>
                  <td>1 (25)</td>
                  <td>1 (25)</td>
                  <td>0 (0)</td>
                  <td>4 (100)</td>
                </tr>
                <tr valign="top">
                  <td>AI (unspecified)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (25)</td>
                  <td>2 (50)</td>
                  <td>1 (25)</td>
                  <td>4 (100)</td>
                </tr>
                <tr valign="top">
                  <td>DNN<sup>f</sup></td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>2 (100)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>2 (100)</td>
                </tr>
                <tr valign="top">
                  <td>CNN+LSTM<sup>g</sup></td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>2 (100)</td>
                  <td>0 (0)</td>
                  <td>2 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Fuzzy systems</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (50)</td>
                  <td>1 (50)</td>
                  <td>0 (0)</td>
                  <td>2 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Bayesian network</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Markov chains</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Regression analysis</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>SVM+RF<sup>h</sup></td>
                  <td>1 (100)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Transformer</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Tutoring system (unspecified)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                  <td>0 (0)</td>
                  <td>0 (0)</td>
                  <td>1 (100)</td>
                </tr>
                <tr valign="top">
                  <td>Total per year</td>
                  <td>12 (21)</td>
                  <td>11 (20)</td>
                  <td>11(20)</td>
                  <td>17 (30)</td>
                  <td>5 (9)</td>
                  <td>56 (100)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table5fn1">
                <p><sup>a</sup>ML: machine learning.</p>
              </fn>
              <fn id="table5fn2">
                <p><sup>b</sup>CNN: convolutional neural network.</p>
              </fn>
              <fn id="table5fn3">
                <p><sup>c</sup>SVM: support vector machine.</p>
              </fn>
              <fn id="table5fn4">
                <p><sup>d</sup>DL: deep learning.</p>
              </fn>
              <fn id="table5fn5">
                <p><sup>e</sup>NN: neural network.</p>
              </fn>
              <fn id="table5fn6">
                <p><sup>f</sup>DNN: deep neural network.</p>
              </fn>
              <fn id="table5fn7">
                <p><sup>g</sup>LSTM: long short-term memory.</p>
              </fn>
              <fn id="table5fn8">
                <p><sup>h</sup>RF: random forest.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <p>In the analyzed studies, the number of publications increased from 12 in 2020 to 17 in 2023, with 11 in both 2021 and 2022, and 5 in 2024. The literature search concluded on March 18, 2024, which likely accounts for the lower count in 2024. These totals are summarized in the “Total per year” row of <xref ref-type="table" rid="table5">Table 5</xref>.</p>
        </sec>
        <sec>
          <title>Application of AI Techniques</title>
          <p>AI techniques have been applied across diverse training setups, enhancing both learning experiences and performance assessment in surgical procedures (see <xref ref-type="table" rid="table6">Table 6</xref>). The most frequent environments were simulation training (36%, 20/56) [<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref75">75</xref>-<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>] and box trainers (23%, 13/56) [40–43,46-50,52,66,70-71], followed by surgical video analysis (16%, 9/56) [<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref94">94</xref>] and robotic systems using the da Vinci platform (11%, 6/56) [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref83">83</xref>]. Less frequent configurations included training stations (4%, 2/56) [<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref72">72</xref>] and in-vivo settings (4%, 2/56) [<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref69">69</xref>], with single-study setups for case logs [<xref ref-type="bibr" rid="ref87">87</xref>], motion data [<xref ref-type="bibr" rid="ref89">89</xref>], medical images [<xref ref-type="bibr" rid="ref92">92</xref>], and a slave controller [<xref ref-type="bibr" rid="ref95">95</xref>] (each 2%, 1/56). Across these settings, studies reported the use of automated skill assessment, formative feedback, and adaptive progression, supported by video, kinematic, and performance-metric streams.</p>
          <p>Over time, setup diversity increased, peaking in 2023 (see <xref rid="figure2" ref-type="fig">Figure 2</xref>). Simulation training and box trainers were consistently present, while surgical video and da Vinci deployments clustered in 2021-2023. These patterns mirror data availability and standardization in risk-managed environments, where AI can be trained and evaluated reliably.</p>
          <table-wrap position="float" id="table6">
            <label>Table 6</label>
            <caption>
              <p>Distribution of training setups in the included articles (N=56).</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="500"/>
              <col width="500"/>
              <thead>
                <tr valign="top">
                  <td>Training setup</td>
                  <td>Included articles, n (%)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Simulation training</td>
                  <td>20 (36)</td>
                </tr>
                <tr valign="top">
                  <td>Box trainer</td>
                  <td>13 (23)</td>
                </tr>
                <tr valign="top">
                  <td>Surgical video</td>
                  <td>9 (16)</td>
                </tr>
                <tr valign="top">
                  <td>da Vinci System</td>
                  <td>6 (11)</td>
                </tr>
                <tr valign="top">
                  <td>Training station</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>In-vivo setting</td>
                  <td>2 (4)</td>
                </tr>
                <tr valign="top">
                  <td>Case logs</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Motion data</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Medical images</td>
                  <td>1 (2)</td>
                </tr>
                <tr valign="top">
                  <td>Slave controller</td>
                  <td>1 (2)</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Appearance of setups over the years in the included articles.</p>
            </caption>
            <graphic xlink:href="jmir_v27i1e58966_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This section discusses the study’s implications and contributions to the field. The review maps and analyzes current applications of AI in surgical training, assessment, and evaluation, identifying the most common surgical procedures, AI techniques, training setups, and highlighting gaps and opportunities for future research. The results show that AI is most frequently reported in data-rich, risk-mitigated environments, notably simulation training and box-trainer setups, and that ML (unspecified) and DL (unspecified) approaches dominate model choices.</p>
        <p>Within these settings, many studies report models that leverage synchronized inputs, for example, kinematics, video, and other performance metrics, to classify technical skill using consistent criteria, to characterize learning trajectories across repeated attempts, and to localize performance-limiting behaviors at the level of gestures, steps, or procedural phases. When embedded in iterative practice, these capabilities may enable individualized training pathways that adjust task parameters and feedback density to a trainee’s evolving competence, with the potential to shorten time to proficiency and to reduce instructor workload. These implications are consistent with the results, in which simulation training accounted for 36% (20/56) and box trainer setups for 23% (13/56) of the included studies.</p>
      </sec>
      <sec>
        <title>Findings in Relation to the Research Questions</title>
        <p>Regarding the first research question aimed at identifying the specific surgical procedures where AI algorithms are most frequently applied in surgical training, AI use concentrates on MIS skills [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref54">54</xref>], neurosurgery [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref65">65</xref>], and laparoscopy [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref74">74</xref>]. Rather than simple frequency, the common thread across these areas is structured, high-signal data capture and well-specified tasks. Endoscopic and robotic workflows generate synchronized video, robotic kinematics, and simulator logs, which enable reproducible labels such as phase boundaries, gesture events, and Objective Structured Assessment of Technical Skills–aligned rubrics. This ecosystem lowers barriers to annotation and validation, thereby accelerating method development. Beyond these clusters, activity in ophthalmology [<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>], open surgery [<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>], robot-assisted surgery [<xref ref-type="bibr" rid="ref81">81</xref>-<xref ref-type="bibr" rid="ref83">83</xref>], and single-study specialties including radiology [<xref ref-type="bibr" rid="ref93">93</xref>], urology [<xref ref-type="bibr" rid="ref94">94</xref>], and vascular surgery [<xref ref-type="bibr" rid="ref95">95</xref>] signals a widening scope. However, these domains often face less standardized capture or a more variable field-of-view, which complicates model training and external validation. The overall distribution, therefore, appears to reflect data tractability and curricular formalization more than inherent differences in educational need.</p>
        <p>The second research question investigated which AI techniques have been used in surgical training and evaluation. Studies use ML (unspecified) [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref89">89</xref>-<xref ref-type="bibr" rid="ref91">91</xref>] and DL (unspecified) [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref92">92</xref>] as broad families, with task-appropriate specializations such as CNNs for video [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref88">88</xref>] and SVMs for lower-dimensional kinematics or hand-crafted features [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. NNs [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref93">93</xref>] support competency modeling when feature engineering is feasible, and CNN+LSTM hybrids [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>] target temporal dynamics for suturing and task segmentation. DNNs are explicitly mentioned in [<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]. Single-study categories (fuzzy systems [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>], regression analysis [<xref ref-type="bibr" rid="ref45">45</xref>], Markov chains [<xref ref-type="bibr" rid="ref53">53</xref>], tutoring system (unspecified) [<xref ref-type="bibr" rid="ref63">63</xref>], Bayesian network [<xref ref-type="bibr" rid="ref83">83</xref>], transformers [<xref ref-type="bibr" rid="ref94">94</xref>], and SVM+RF [<xref ref-type="bibr" rid="ref95">95</xref>]) illustrate exploratory breadth rather than established consensus. Consistent with coding, CNN+LSTM is treated as a distinct class and not double-counted under CNNs. No single approach emerges as universally optimal; instead, methods align with task structure (classification vs sequence prediction), signal characteristics (video and kinematics), and assessment granularity (summative scores versus frame- or gesture-level feedback).</p>
        <p>The third research question investigated how AI techniques are being used to assess and improve surgical training. Across setups, a common pattern is the move from retrospective, manual scoring to prospective, automated analytics that are both standardized and timely. In simulation training, synchronized streams enable immediate feedback and progression gating, which supports deliberate practice cycles grounded in objective metrics. This aligns with the preponderance of simulation studies in the dataset and the consistent application of ML and DL to transform kinematics and video into competency-linked outputs. In box trainers, models quantify motion economy, tool path quality, and task efficiency, enabling skill stratification and targeted coaching [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref46">46</xref>-<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref70">70</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. In robotic systems on the da Vinci platform, studies demonstrate automated assessment, uncertainty-aware feedback, and domain adaptation for cross-site or cross-task transfer [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref83">83</xref>]. In surgical video pipelines, investigators focus on procedural understanding, ergonomics, and fine-grained performance analytics [<xref ref-type="bibr" rid="ref73">73</xref>,<xref ref-type="bibr" rid="ref74">74</xref>,<xref ref-type="bibr" rid="ref78">78</xref>-<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref94">94</xref>]. The unifying mechanism across these contexts is measurement at scale that reduces feedback latency, increases consistency, and enables adaptive progression rules without displacing instructor oversight.</p>
        <p>Finally, the last research question investigated the way in which AI applications in surgical training affect the learning curve of surgical residents and fellows. Multiple studies report outcomes consistent with accelerated learning and improved technical performance under AI-enabled training. This includes predictive modeling of progression [<xref ref-type="bibr" rid="ref49">49</xref>], metric selection and learning-curve characterization in simulation [<xref ref-type="bibr" rid="ref55">55</xref>], a randomized comparison of feedback modalities [<xref ref-type="bibr" rid="ref57">57</xref>], competency-based training backed by neural models [<xref ref-type="bibr" rid="ref59">59</xref>], continuous monitoring of bimanual expertise with deep models [<xref ref-type="bibr" rid="ref62">62</xref>], and competency estimation in laparoscopic training [<xref ref-type="bibr" rid="ref69">69</xref>]. Evidence from robotic contexts shows that automated assessment can structure practice with short feedback loops [<xref ref-type="bibr" rid="ref45">45</xref>]. That said, effect sizes remain difficult to aggregate due to heterogeneous study designs, small sample sizes, nonstandard outcome measures, and limited external validation. The most defensible interpretation is that personalized, data-driven feedback and objective, repeated measurement are plausible mechanisms for the observed gains, with further multicenter validation needed to establish generalizability and durability.</p>
        <p>The findings suggest that current AI deployment in surgical training follows data availability and standardization, that ML/DL with video and kinematics are dominant because they best match that data, and that automated, timely feedback is the primary lever through which AI influences performance and learning. Where capture is less standardized or external validation is sparse, adoption tends to lag. This synthesis directly motivates the recommendations presented later in the Discussion section on common benchmarks, transparent reporting, and SDG 4–aligned scalability.</p>
      </sec>
      <sec>
        <title>Comparison With Previous Work</title>
        <p>Systematic literature reviews in surgical training found in the literature have focused on specific training methods (eg, simulation-based training) or on specific types of surgery (eg, plastic surgery and orthopedic surgery) rather than providing a cross-specialty map of AI methods for training, assessment, and evaluation. Reviews focused on simulation-based training within specific domains underscore this pattern. Lawaetz et al [<xref ref-type="bibr" rid="ref96">96</xref>] examined simulation-based training and assessment in open vascular surgery, cataloguing common methods and commenting on effectiveness within that context. Abelleyra Lastoria et al [<xref ref-type="bibr" rid="ref97">97</xref>] surveyed simulation-based tools in plastic surgery and concluded that the validity of many approaches requires further investigation. Woodward et al [<xref ref-type="bibr" rid="ref98">98</xref>] reached a similar conclusion in orthopedic surgery, noting concerns about the construct validity and methodological rigor of simulation studies. Reviews centered on robotic-assisted surgery also reflect divergent emphases: Rahimi et al [<xref ref-type="bibr" rid="ref99">99</xref>] provided a descriptive overview of training modalities and assessment practices, whereas Boal et al [<xref ref-type="bibr" rid="ref100">100</xref>] explicitly scrutinized AI methods for technical skills in robotic surgery and highlighted that both manual and automated assessment tools are often insufficiently validated.</p>
        <p>Closer to the scope of the present scoping review, several analyses have examined automation and AI across surgical training tasks. Levin et al [<xref ref-type="bibr" rid="ref101">101</xref>] identified families of automated technical skill assessment methods, including computer vision, motion tracking, ML and DL, and performance classification, but did not synthesize evidence on educational effectiveness. Lam et al [<xref ref-type="bibr" rid="ref102">102</xref>] focused specifically on ML methods and reported accuracy rates that generally exceeded 80 percent across included studies, offering a performance-oriented view rather than a training-context analysis. Pedrett et al [<xref ref-type="bibr" rid="ref103">103</xref>] emphasized the central role of video-derived motion and robotic kinematic data as inputs to AI models for technical skill assessment in minimally invasive surgery, reinforcing the importance of structured, high-signal data streams.</p>
        <p>Findings from the present review are consistent with these previous observations in several respects. First, the centrality of simulation and other risk-managed environments recurs across literature, reflecting where ground truth is tractable and measurement can be standardized. Second, many reviews identify validation gaps, noting that reported metrics, dataset partitions, and labeling practices vary widely, which complicates comparison across sites and inhibits external generalizability [<xref ref-type="bibr" rid="ref96">96</xref>-<xref ref-type="bibr" rid="ref100">100</xref>]. Third, there is broad agreement that AI-assisted assessment is advancing rapidly in robotic and minimally invasive settings; yet, many frameworks remain descriptive or single-center, and their educational impact is not consistently established with robust designs [<xref ref-type="bibr" rid="ref99">99</xref>-<xref ref-type="bibr" rid="ref103">103</xref>].</p>
        <p>At the same time, this review differs from earlier work in several ways. The scope extends across specialties and across training setups, linking procedures, techniques, and use cases in a single comparative framework. Rather than isolating a single algorithm family or specialty, the analysis connects the dominant AI techniques to the data modalities they exploit and to the assessment functions they serve. This mapping clarifies why ML and DL approaches, particularly CNN-based and hybrid temporal models, are prevalent where high-quality video and kinematics are available, and why adoption is slower where capture is less standardized. In addition, the review integrates signals relevant to learning curves, highlighting studies that associate AI-enabled feedback with improvements in proficiency trajectories, while also acknowledging heterogeneity and the need for external validation. By taking this comparative perspective, the review identifies shared deficiencies that cut across specialties, including nonstandard outcome measures, limited transparency in algorithmic reporting, and sparse multicenter testing, and points toward future work on benchmarks, interoperable data schemas, and scalable deployment aligned with SDG 4.</p>
        <p>Whereas previous reviews have been primarily domain-specific or method-specific, this scoping review offers a cross-specialty synthesis that links where AI is used, which techniques are used, and how they are used to support training and assessment. This perspective complements existing literature by emphasizing comparability across contexts, illuminating mechanisms by which AI influences learning, and articulating the methodological steps needed to translate promising prototypes into reproducible, generalizable, and educationally meaningful tools.</p>
      </sec>
      <sec>
        <title>Strengths and Limitations</title>
        <p>This scoping review offers a broad, cross-specialty perspective on the application of AI in surgical training, assessment, and evaluation. It maps procedures, techniques, and training setups within a single comparative framework, which supports interpretation across contexts rather than within a single specialty. The review adheres to PRISMA-ScR guidance, applies explicit inclusion and exclusion criteria, and uses transparent counting rules that assign each study a primary AI technique and a primary setup to avoid double-counting. Results are presented as both narrative synthesis and structured summaries. The Discussion integrates an SDG 4 perspective, offering concrete implementation considerations related to access, scalability, and equity. Together, these elements provide a panoramic view of where AI is currently deployed, why certain methods dominate in specific data environments, and how these choices influence assessment and feedback in practice.</p>
        <p>Several constraints should be considered. First, the search was limited to English-language publications and to the period ending March 18, 2024, which may omit relevant work outside this window. Second, many articles describe methods only at a general label level (AI, ML, and DL) without specifying architectures or training details, which limits interpretability and reproducibility. Third, the evidence base is concentrated in simulation, box-trainer, and video-centric settings, which may not fully capture transfer to live clinical performance, patient outcomes, or longer-term retention. Fourth, external validation is limited, as relatively few studies report multicenter testing, performance under domain shift, subgroup analyses, or calibration, which constrains confidence in portability.</p>
        <p>To address these limitations, educational outcomes should also be mapped to recognized competency frameworks and reported with standardized metrics that enable replication and meta-synthesis. When multisetup or multi-technique pipelines are used, authors should specify proportional attribution. Reporting on access, resource requirements, and cost per trainee hour will support the deployment and equity assessment of SDG 4. Multicenter collaborations that release shared benchmarks and interoperable datasets will be necessary to improve reproducibility and to allow fair comparisons across techniques and settings.</p>
      </sec>
      <sec>
        <title>Future Work Recommendations</title>
        <p>This scoping review identified current applications of AI in surgical education and highlighted priority areas for further work. As summarized in <xref ref-type="table" rid="table6">Table 6</xref> and visualized in <xref rid="figure2" ref-type="fig">Figure 2</xref>, a large proportion of studies focus on simulation training [<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref75">75</xref>-<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref81">81</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref93">93</xref>], representing 36% (20/56) of the included articles. This concentration reflects the suitability of simulation for controlled data capture and iterative practice. Building on this foundation, AI can enhance simulation-based training with realistic, adaptive, and personalized learning experiences [<xref ref-type="bibr" rid="ref104">104</xref>,<xref ref-type="bibr" rid="ref105">105</xref>], while also enabling standardized and rapid feedback that supports deliberate practice.</p>
        <p>Advances in computer vision are particularly significant where high-quality video and kinematic data are accessible, which aligns with the prevalence of simulation and box-trainer studies in the included literature. In these regulated, risk-mitigated environments, AI systems can produce timely and structured feedback linked to defined competency frameworks, including economy of motion, bimanual coordination, camera control, tissue handling, and ergonomics, thereby facilitating deliberate practice. Although natural language processing technologies are less represented in the current review, their growing maturity suggests near-term opportunities to integrate narrative guidance, rubric-based feedback, and reflective prompts alongside quantitative metrics, provided such outputs are aligned with curricular objectives and are appropriately validated.</p>
        <p>Future efforts should pursue 5 complementary directions.</p>
        <p>First, strengthen external validity. Studies should include multi-institution cohorts, predefined external test sets, and reporting of performance under domain shift, including different camera views, instruments, and case difficulty. Where feasible, researchers should evaluate the transfer from simulation or bench-top tasks to higher-fidelity or clinical settings with clearly specified outcome measures and follow-up intervals.</p>
        <p>Second, standardize educational outcomes. Investigators should map AI outputs to recognized competency frameworks and report validity, reliability, learning curve parameters, and time to competency with consistent definitions. Agreement on core outcome sets will enable comparison across techniques and facilitate meta-synthesis.</p>
        <p>Third, expand the breadth and transparency of data. New work should prioritize multimodal capture that combines video, kinematics, tool telemetry, where appropriate, eye tracking or physiological signals. Public or data-sharing consortia should release interoperable schemas, labeling protocols, and benchmark tasks that are specific to procedures and skill elements. Clear descriptions of models and training and validation splits will improve reproducibility.</p>
        <p>Fourth, improve usability, equity, and scalability in alignment with SDG 4. Models should operate on standard hardware, interoperate with existing simulators and video platforms, and function reliably in low-bandwidth or offline environments. Reporting of access, installation steps, resource needs, and cost per trainee hour will support adoption in diverse settings. Interfaces should disclose uncertainty, make feedback interpretable, and integrate into educator workflows without adding undue burden.</p>
        <p>Fifth, broaden methodological scope responsibly. There is an opportunity to study natural language technologies for rubric-based guidance, structured debriefs, and reflective prompts, provided outputs are aligned with curricular objectives and validated for educational use. Prospective trials that compare feedback modalities and density, and that measure downstream retention and transfer, will clarify how AI should be integrated pedagogically.</p>
        <p>Together, these directions could move the field from promising prototypes toward reproducible, generalizable, and educationally meaningful tools that improve surgeon training while supporting equitable access to high-quality education.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This scoping review maps current applications of AI in surgical training, assessment, and evaluation across procedures, techniques, and training setups. From 1400 records, 56 studies met the inclusion criteria, with activity concentrated in minimally invasive surgery, neurosurgery, and laparoscopy. AI is most frequently deployed in data-rich, risk-mitigated environments, particularly simulation training and box trainers, where synchronized video and kinematic streams support objective measurement and timely feedback. Technique choices reflect these data conditions, with ML (unspecified) and DL (unspecified) methods predominating and task-specific variants, such as CNNs and hybrid temporal models, applied to video-centric problems.</p>
        <p>Across settings, studies describe automated skill assessment, structured formative feedback, and adaptive progression, with several reporting improvements consistent with accelerated learning curves. At the same time, heterogeneity in study design, small samples, nonstandard outcome measures, and limited external validation constrain strong inferences about effect sizes and generalizability. The evidence, therefore, supports cautious optimism that AI-enabled feedback can enhance skill acquisition, while underscoring the need for more rigorous evaluation.</p>
        <p>Future work should prioritize precise reporting of models and datasets, multicenter validation, and standardized educational outcomes linked to recognized competency frameworks. Interoperable data schemes, shared benchmarks, and transparent methods will be essential to enable comparison across sites and techniques. Attention to scalability, access, and usability will support alignment with SDG 4, ensuring that benefits extend beyond well-resourced centers. With these elements in place, AI has the potential to deliver reproducible, equitable, and educationally meaningful gains in surgical training.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>PRISMA-ScR checklist.</p>
        <media xlink:href="jmir_v27i1e58966_app1.docx" xlink:title="DOCX File , 108 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DNN</term>
          <def>
            <p>deep neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">LSTM</term>
          <def>
            <p>long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">MIS</term>
          <def>
            <p>minimally invasive surgery</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">NN</term>
          <def>
            <p>neural networks</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PRISMA-ScR</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">SDG</term>
          <def>
            <p>Sustainable Development Goal</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We thank the Engineering Faculty, the Research Group NexEd Hub, and the Computing Department of Universidad Panamericana, Mexico City Campus. Finally, we would like to thank Rodrigo González Serna and Monserrat Villacampa Espinosa de los Monteros for their assistance during the design and creation of the flow diagram and the graphs, respectively. Generative AI was used to improve the grammar, style, and clarity of some sentences and paragraphs after initial human drafting. The authors verified all output for factual accuracy and scientific integrity. The model was not used to generate paragraphs, summaries, display charts or tables, or to analyze or interpret data. The model used was ChatGPT based on GPT-4-turbo (“omni”), the vendor is OpenAI, over the web app (chat.openai.com). There were no external funding sources for this study. Consequently, funders had no influence on the design of the study, the collection, analysis, or interpretation of data, the writing of the manuscript, or the decision to publish the results.</p>
    </ack>
    <notes>
      <sec>
        <title>Funding</title>
        <p>We would also like to thank the Academy of Medical Sciences (AMS) for their support (NIF004\1018), as this study originated from this award.</p>
      </sec>
    </notes>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The datasets generated or analyzed during this study are available in the AI Review – Selected Zotero group library [<xref ref-type="bibr" rid="ref106">106</xref>].</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>Conceptualization: DE-C, JN</p>
        <p>Methodology: DE-C, JN, AJM, BB</p>
        <p>Software: DE-C</p>
        <p>Validation: DE-C, JN</p>
        <p>Resources: DE-C, JN, AJM</p>
        <p>Data curation: DE-C, JN</p>
        <p>Visualization: DE-C, JN, AJM, BB</p>
        <p>Supervision: JN</p>
        <p>Project administration: JN</p>
        <p>Writing – original draft: DE-C</p>
        <p>Writing – review &amp; editing: DE-C, AYB-A, JN, AJM, BB</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gavish</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gutiérrez</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Webel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rodríguez</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Peveri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bockholt</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Tecchia</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Evaluating virtual reality and augmented reality training for industrial maintenance and assembly tasks</article-title>
          <source>Interactive Learning Environments</source>
          <year>2013</year>
          <month>07</month>
          <day>18</day>
          <volume>23</volume>
          <issue>6</issue>
          <fpage>778</fpage>
          <lpage>798</lpage>
          <pub-id pub-id-type="doi">10.1080/10494820.2013.815221</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Stachel</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Evidence in surgical training - a review</article-title>
          <source>Innov Surg Sci</source>
          <year>2019</year>
          <month>03</month>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>7</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31579796"/>
          </comment>
          <pub-id pub-id-type="doi">10.1515/iss-2018-0026</pub-id>
          <pub-id pub-id-type="medline">31579796</pub-id>
          <pub-id pub-id-type="pii">iss-2018-0026</pub-id>
          <pub-id pub-id-type="pmcid">PMC6754061</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayub</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>"See one, do one, teach one": Balancing patient care and surgical training in an emergency trauma department</article-title>
          <source>J Glob Health</source>
          <year>2022</year>
          <month>07</month>
          <day>06</day>
          <volume>12</volume>
          <fpage>03051</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35787589"/>
          </comment>
          <pub-id pub-id-type="doi">10.7189/jogh.12.03051</pub-id>
          <pub-id pub-id-type="medline">35787589</pub-id>
          <pub-id pub-id-type="pmcid">PMC9258902</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wetzel</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Kneebone</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Woloshynowych</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nestel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moorthy</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kidd</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Darzi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The effects of stress on surgical performance</article-title>
          <source>Am J Surg</source>
          <year>2006</year>
          <month>01</month>
          <volume>191</volume>
          <issue>1</issue>
          <fpage>5</fpage>
          <lpage>10</lpage>
          <pub-id pub-id-type="doi">10.1016/j.amjsurg.2005.08.034</pub-id>
          <pub-id pub-id-type="medline">16399098</pub-id>
          <pub-id pub-id-type="pii">S0002-9610(05)00732-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moulton</surname>
              <given-names>CE</given-names>
            </name>
          </person-group>
          <article-title>Complications: acknowledging, managing, and coping with human error</article-title>
          <source>Transl Androl Urol</source>
          <year>2017</year>
          <month>08</month>
          <volume>6</volume>
          <issue>4</issue>
          <fpage>773</fpage>
          <lpage>782</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28904910"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/tau.2017.06.28</pub-id>
          <pub-id pub-id-type="medline">28904910</pub-id>
          <pub-id pub-id-type="pii">tau-06-04-773</pub-id>
          <pub-id pub-id-type="pmcid">PMC5583051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kowlowitz</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Curtis</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sloane</surname>
              <given-names>PD</given-names>
            </name>
          </person-group>
          <article-title>The procedural skills of medical students: expectations and experiences</article-title>
          <source>Acad Med</source>
          <year>1990</year>
          <month>10</month>
          <volume>65</volume>
          <issue>10</issue>
          <fpage>656</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1097/00001888-199010000-00016</pub-id>
          <pub-id pub-id-type="medline">2261047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Badash</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Burtt</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Solorzano</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Carey</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>Innovations in surgery simulation: a review of past, current and future techniques</article-title>
          <source>Ann Transl Med</source>
          <year>2016</year>
          <month>12</month>
          <volume>4</volume>
          <issue>23</issue>
          <fpage>453</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28090509"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/atm.2016.12.24</pub-id>
          <pub-id pub-id-type="medline">28090509</pub-id>
          <pub-id pub-id-type="pii">atm-04-23-453</pub-id>
          <pub-id pub-id-type="pmcid">PMC5220028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Badash</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Burtt</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Solorzano</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Carey</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>Innovations in surgery simulation: a review of past, current and future techniques</article-title>
          <source>Ann Transl Med</source>
          <year>2016</year>
          <month>12</month>
          <volume>4</volume>
          <issue>23</issue>
          <fpage>453</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28090509"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/atm.2016.12.24</pub-id>
          <pub-id pub-id-type="medline">28090509</pub-id>
          <pub-id pub-id-type="pii">atm-04-23-453</pub-id>
          <pub-id pub-id-type="pmcid">PMC5220028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Escobar-Castillejos</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Noguez</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Neri</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Magana</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Benes</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A Review of Simulators with Haptic Devices for Medical Training</article-title>
          <source>J Med Syst</source>
          <year>2016</year>
          <month>04</month>
          <volume>40</volume>
          <issue>4</issue>
          <fpage>104</fpage>
          <pub-id pub-id-type="doi">10.1007/s10916-016-0459-8</pub-id>
          <pub-id pub-id-type="medline">26888655</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-016-0459-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Montbrun</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Macrae</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Simulation in surgical education</article-title>
          <source>Clin Colon Rectal Surg</source>
          <year>2012</year>
          <month>09</month>
          <volume>25</volume>
          <issue>3</issue>
          <fpage>156</fpage>
          <lpage>65</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/23997671"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0032-1322553</pub-id>
          <pub-id pub-id-type="medline">23997671</pub-id>
          <pub-id pub-id-type="pii">25156</pub-id>
          <pub-id pub-id-type="pmcid">PMC3577578</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Escobar-Castillejos</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Noguez</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bello</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Neri</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Magana</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Benes</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A Review of Training and Guidance Systems in Medical Surgery</article-title>
          <source>Applied Sciences</source>
          <year>2020</year>
          <month>08</month>
          <day>20</day>
          <volume>10</volume>
          <issue>17</issue>
          <fpage>5752</fpage>
          <pub-id pub-id-type="doi">10.3390/app10175752</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hassani</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nahvi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmadi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Design and implementation of an intelligent virtual environment for improving speaking and listening skills</article-title>
          <source>Interactive Learning Environments</source>
          <year>2013</year>
          <month>10</month>
          <day>10</day>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>252</fpage>
          <lpage>271</lpage>
          <pub-id pub-id-type="doi">10.1080/10494820.2013.846265</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Visser</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Parasuraman</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Adaptive Aiding of Human-Robot Teaming</article-title>
          <source>Journal of Cognitive Engineering and Decision Making</source>
          <year>2011</year>
          <month>06</month>
          <day>27</day>
          <volume>5</volume>
          <issue>2</issue>
          <fpage>209</fpage>
          <lpage>231</lpage>
          <pub-id pub-id-type="doi">10.1177/1555343411410160</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russell</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Norvig</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <source>Artificial Intelligence, A Modern Approach</source>
          <year>2021</year>
          <publisher-loc>Bengaluru</publisher-loc>
          <publisher-name>Pearson</publisher-name>
          <fpage>1</fpage>
          <lpage>1168</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chassignol</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Khoroshavin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Klimova</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bilyatdinova</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial Intelligence trends in education: a narrative overview</article-title>
          <source>Procedia Computer Science</source>
          <year>2018</year>
          <volume>136</volume>
          <fpage>16</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1016/j.procs.2018.08.233</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liebowitz</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Expert systems: A short introduction</article-title>
          <source>Engineering Fracture Mechanics</source>
          <year>1995</year>
          <month>3</month>
          <volume>50</volume>
          <issue>5-6</issue>
          <fpage>601</fpage>
          <lpage>607</lpage>
          <pub-id pub-id-type="doi">10.1016/0013-7944(94)e0047-k</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Adesope</surname>
              <given-names>OO</given-names>
            </name>
            <name name-style="western">
              <surname>Nesbit</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Intelligent tutoring systems and learning outcomes: A meta-analysis</article-title>
          <source>Journal of Educational Psychology</source>
          <year>2014</year>
          <month>11</month>
          <volume>106</volume>
          <issue>4</issue>
          <fpage>901</fpage>
          <lpage>918</lpage>
          <pub-id pub-id-type="doi">10.1037/a0037123</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nichols</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Herbert Chan</surname>
              <given-names>HW</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>MAB</given-names>
            </name>
          </person-group>
          <article-title>Machine learning: applications of artificial intelligence to imaging and diagnosis</article-title>
          <source>Biophys Rev</source>
          <year>2019</year>
          <month>03</month>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>111</fpage>
          <lpage>118</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.doi.org/10.1007/s12551-018-0449-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s12551-018-0449-9</pub-id>
          <pub-id pub-id-type="medline">30182201</pub-id>
          <pub-id pub-id-type="pii">10.1007/s12551-018-0449-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6381354</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cord</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Delany</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Supervised learning</article-title>
          <source>Machine Learning Techniques for Multimedia: Case Studies on Organization and Retrieval</source>
          <year>2008</year>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>21</fpage>
          <lpage>49</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greene</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mayer</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised learning and clustering</article-title>
          <source>Machine Learning Techniques for Multimedia: Case Studies on Organization and Retrieval</source>
          <year>2008</year>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>51</fpage>
          <lpage>90</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sivamayil</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rajasekar</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Aljafari</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Nikolovski</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vairavasundaram</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vairavasundaram</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>A systematic study on reinforcement learning based applications</article-title>
          <source>Energies</source>
          <year>2023</year>
          <volume>16</volume>
          <issue>3</issue>
          <fpage>1512</fpage>
          <pub-id pub-id-type="doi">10.3390/en16031512</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sarker</surname>
              <given-names>IH</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning: A comprehensive overview on techniques, taxonomy, applications and research directions</article-title>
          <source>SN Comput Sci</source>
          <year>2021</year>
          <volume>2</volume>
          <issue>6</issue>
          <fpage>420</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34426802"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s42979-021-00815-1</pub-id>
          <pub-id pub-id-type="medline">34426802</pub-id>
          <pub-id pub-id-type="pii">815</pub-id>
          <pub-id pub-id-type="pmcid">PMC8372231</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palmer</surname>
              <given-names>PB</given-names>
            </name>
            <name name-style="western">
              <surname>O'Connell</surname>
              <given-names>DG</given-names>
            </name>
          </person-group>
          <article-title>Regression analysis for prediction: understanding the process</article-title>
          <source>Cardiopulm Phys Ther J</source>
          <year>2009</year>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>23</fpage>
          <lpage>26</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/20467520"/>
          </comment>
          <pub-id pub-id-type="medline">20467520</pub-id>
          <pub-id pub-id-type="pmcid">PMC2845248</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>RCT</given-names>
            </name>
          </person-group>
          <article-title>Clustering analysisIts applications</article-title>
          <source>Advances in Information Systems Science</source>
          <year>1981</year>
          <publisher-loc>US</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>169</fpage>
          <lpage>292</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shmilovici</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Support vector machines</article-title>
          <source>Data Mining and Knowledge Discovery Handbook</source>
          <year>2005</year>
          <publisher-loc>US</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>257</fpage>
          <lpage>276</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Decision tree methods: applications for classification and prediction</article-title>
          <source>Shanghai Arch Psychiatry</source>
          <year>2015</year>
          <volume>27</volume>
          <issue>2</issue>
          <fpage>130</fpage>
          <lpage>135</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/26120265"/>
          </comment>
          <pub-id pub-id-type="doi">10.11919/j.issn.1002-0829.215044</pub-id>
          <pub-id pub-id-type="medline">26120265</pub-id>
          <pub-id pub-id-type="pii">sap-27-02-130</pub-id>
          <pub-id pub-id-type="pmcid">PMC4466856</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Breiman</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Random Forests</article-title>
          <source>Machine learning</source>
          <year>2001</year>
          <volume>45</volume>
          <issue>1</issue>
          <fpage>5</fpage>
          <lpage>32</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pophealthmetrics.biomedcentral.com/articles/10.1186/1478-7954-9-29"/>
          </comment>
          <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>
          <pub-id pub-id-type="pii">1478-7954-9-29</pub-id>
          <pub-id pub-id-type="pmcid">PMC3160922</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heckerman</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A tutorial on learning with Bayesian networks</article-title>
          <source>Innovations in Bayesian Networks: Theory and Applications</source>
          <year>2008</year>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>33</fpage>
          <lpage>82</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nierhaus</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Markov models</article-title>
          <source>Algorithmic Composition: Paradigms of Automated Music Generation</source>
          <year>2009</year>
          <publisher-loc>Vienna</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>67</fpage>
          <lpage>82</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Caggiano</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Chatti</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Laperrière</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Reinhart</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Tolio</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>CIRP Encyclopedia of Production Engineering</source>
          <year>2019</year>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>760</fpage>
          <lpage>766</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Youn</surname>
              <given-names>YC</given-names>
            </name>
          </person-group>
          <article-title>Artificial neural network: understanding the basic concepts without mathematics</article-title>
          <source>Dement Neurocogn Disord</source>
          <year>2018</year>
          <volume>17</volume>
          <issue>3</issue>
          <fpage>83</fpage>
          <lpage>89</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30906397"/>
          </comment>
          <pub-id pub-id-type="doi">10.12779/dnd.2018.17.3.83</pub-id>
          <pub-id pub-id-type="medline">30906397</pub-id>
          <pub-id pub-id-type="pmcid">PMC6428006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yamashita</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nishio</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Do</surname>
              <given-names>RKG</given-names>
            </name>
            <name name-style="western">
              <surname>Togashi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Convolutional neural networks: an overview and application in radiology</article-title>
          <source>Insights Imaging</source>
          <year>2018</year>
          <volume>9</volume>
          <issue>4</issue>
          <fpage>611</fpage>
          <lpage>629</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29934920"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13244-018-0639-9</pub-id>
          <pub-id pub-id-type="medline">29934920</pub-id>
          <pub-id pub-id-type="pii">10.1007/s13244-018-0639-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6108980</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marhon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cameron</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kremer</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Recurrent neural networks</article-title>
          <source>Handbook on Neural Information Processing</source>
          <year>2013</year>
          <publisher-loc>Berlin Heidelberg</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>29</fpage>
          <lpage>65</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lindemann</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vietz</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jazdi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Weyrich</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A survey on long short-term memory networks for time series prediction</article-title>
          <source>Procedia CIRP</source>
          <year>2021</year>
          <volume>99</volume>
          <fpage>650</fpage>
          <lpage>655</lpage>
          <pub-id pub-id-type="doi">10.1016/j.procir.2021.03.088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kriegeskorte</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Golan</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Neural network models and deep learning</article-title>
          <source>Curr Biol</source>
          <year>2019</year>
          <volume>29</volume>
          <issue>7</issue>
          <fpage>R231</fpage>
          <lpage>R236</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0960-9822(19)30204-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cub.2019.02.034</pub-id>
          <pub-id pub-id-type="medline">30939301</pub-id>
          <pub-id pub-id-type="pii">S0960-9822(19)30204-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Qiu</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>A survey of transformers</article-title>
          <source>AI Open</source>
          <year>2022</year>
          <volume>3</volume>
          <fpage>111</fpage>
          <lpage>132</lpage>
          <pub-id pub-id-type="doi">10.1016/j.aiopen.2022.10.001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Nuchged</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>An investigation of applying large language models to spoken language learning</article-title>
          <source>Applied Sciences</source>
          <year>2023</year>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>224</fpage>
          <pub-id pub-id-type="doi">10.3390/app14010224</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="web">
          <article-title>Digital learning and transformation of education</article-title>
          <source>UNESCO</source>
          <year>2024</year>
          <access-date>2024-03-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.unesco.org/en/digital-education">https://www.unesco.org/en/digital-education</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Lillie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zarin</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Colquhoun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Levac</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Horsley</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hempel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McGowan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hartling</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aldcroft</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Garritty</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lewin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Godfrey</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Macdonald</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Langlois</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Soares-Weiser</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Moriarty</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tunçalp</surname>
              <given-names>Ö</given-names>
            </name>
            <name name-style="western">
              <surname>Straus</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>PRISMA extension for scoping reviews (PRISMA-ScR): checklist and explanation</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <volume>169</volume>
          <issue>7</issue>
          <fpage>467</fpage>
          <lpage>473</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.acpjournals.org/doi/10.7326/M18-0850?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-0850</pub-id>
          <pub-id pub-id-type="medline">30178033</pub-id>
          <pub-id pub-id-type="pii">2700389</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rashidi Fathabadi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grantner</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Shebrain</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Abdel-Qader</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>3D autonomous surgeon's hand movement assessment using a cascaded fuzzy supervisor in multi-thread video processing</article-title>
          <source>Sensors (Basel)</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>2623</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s23052623"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s23052623</pub-id>
          <pub-id pub-id-type="medline">36904830</pub-id>
          <pub-id pub-id-type="pii">s23052623</pub-id>
          <pub-id pub-id-type="pmcid">PMC10007173</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fathabadi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grantner</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shebrain</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Abdel-Qader</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Two-level fuzzy logic evaluation system for surgeon's hand movement using object detection</article-title>
          <year>2022</year>
          <conf-name>IEEE Symposium Series On Computational Intelligence</conf-name>
          <conf-date>March 17, 2022</conf-date>
          <conf-loc>Singapore</conf-loc>
          <fpage>527</fpage>
          <pub-id pub-id-type="doi">10.1109/ssci51031.2022.10022295</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kulkarni</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hartman-Kenzler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Barnes</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Henrickson Parker</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Safford</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Rajamohan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>NK</given-names>
            </name>
          </person-group>
          <article-title>Differentiating laparoscopic skills of trainees with computer vision based metrics</article-title>
          <source>Proceedings of the Human Factors and Ergonomics Society Annual Meeting</source>
          <year>2021</year>
          <volume>65</volume>
          <issue>1</issue>
          <fpage>304</fpage>
          <lpage>308</lpage>
          <pub-id pub-id-type="doi">10.1177/1071181321651263</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kulkarni</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hartman-Kenzler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Barnes</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Parker</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Safford</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Scene-dependent, feedforward eye gaze metrics can differentiate technical skill levels of trainees in laparoscopic surgery</article-title>
          <source>Surg Endosc</source>
          <year>2022</year>
          <volume>37</volume>
          <issue>2</issue>
          <fpage>1569</fpage>
          <lpage>1580</lpage>
          <pub-id pub-id-type="doi">10.1007/s00464-022-09582-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sulek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sundaram</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Wachs</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Proctor</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Sensor-based indicators of performance changes between sessions during robotic surgery training</article-title>
          <source>Appl Ergon</source>
          <year>2021</year>
          <volume>90</volume>
          <fpage>103251</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32961465"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.apergo.2020.103251</pub-id>
          <pub-id pub-id-type="medline">32961465</pub-id>
          <pub-id pub-id-type="pii">S0003-6870(20)30201-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC7606790</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Kuchenbecker</surname>
              <given-names>KJ</given-names>
            </name>
          </person-group>
          <article-title>Effects of automated skill assessment on robotic surgery training</article-title>
          <source>Int J Med Robot</source>
          <year>2023</year>
          <volume>19</volume>
          <issue>2</issue>
          <fpage>e2492</fpage>
          <pub-id pub-id-type="doi">10.1002/rcs.2492</pub-id>
          <pub-id pub-id-type="medline">36524325</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keles</surname>
              <given-names>HO</given-names>
            </name>
            <name name-style="western">
              <surname>Cengiz</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Demiral</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ozmen</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Omurtag</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>High density optical neuroimaging predicts surgeons's subjective experience and skill levels</article-title>
          <source>PLoS One</source>
          <year>2021</year>
          <volume>16</volume>
          <issue>2</issue>
          <fpage>e0247117</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0247117"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0247117</pub-id>
          <pub-id pub-id-type="medline">33600502</pub-id>
          <pub-id pub-id-type="pii">PONE-D-20-34607</pub-id>
          <pub-id pub-id-type="pmcid">PMC7891714</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koskinen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bednarik</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Vrzakova</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Elomaa</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Combined gaze metrics as stress-sensitive indicators of microsurgical proficiency</article-title>
          <source>Surg Innov</source>
          <year>2020</year>
          <volume>27</volume>
          <issue>6</issue>
          <fpage>614</fpage>
          <lpage>622</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/1553350620942980?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1553350620942980</pub-id>
          <pub-id pub-id-type="medline">32687734</pub-id>
          <pub-id pub-id-type="pmcid">PMC7890692</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kasa</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Burns</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Goldenberg</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Selim</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Whyne</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hardisty</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Multi-modal deep learning for assessing surgeon technical skill</article-title>
          <source>Sensors (Basel)</source>
          <year>2022</year>
          <volume>22</volume>
          <issue>19</issue>
          <fpage>7328</fpage>
          <pub-id pub-id-type="doi">10.3390/s22197328</pub-id>
          <pub-id pub-id-type="medline">36236424</pub-id>
          <pub-id pub-id-type="pii">s22197328</pub-id>
          <pub-id pub-id-type="pmcid">PMC9571767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kruger</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Intes</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Schwaitzberg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A machine learning approach to predict surgical learning curves</article-title>
          <source>Surgery</source>
          <year>2020</year>
          <volume>167</volume>
          <issue>2</issue>
          <fpage>321</fpage>
          <lpage>327</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31753325"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.surg.2019.10.008</pub-id>
          <pub-id pub-id-type="medline">31753325</pub-id>
          <pub-id pub-id-type="pii">S0039-6060(19)30729-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6980926</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baghdadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hoshyarmanesh</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>de Lotbiniere-Bassett</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Lama</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sutherland</surname>
              <given-names>GR</given-names>
            </name>
          </person-group>
          <article-title>Data analytics interrogates robotic surgical performance using a microsurgery-specific haptic device</article-title>
          <source>Expert Rev Med Devices</source>
          <year>2020</year>
          <volume>17</volume>
          <issue>7</issue>
          <fpage>721</fpage>
          <lpage>730</lpage>
          <pub-id pub-id-type="doi">10.1080/17434440.2020.1782736</pub-id>
          <pub-id pub-id-type="medline">32536224</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Benmansour</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Malti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jannin</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Deep neural network architecture for automated soft surgical skills evaluation using objective structured assessment of technical skills criteria</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2023</year>
          <volume>18</volume>
          <issue>5</issue>
          <fpage>929</fpage>
          <lpage>937</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://hal.science/hal-03970306"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11548-022-02827-5</pub-id>
          <pub-id pub-id-type="medline">36694051</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-022-02827-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yanik</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kruger</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Intes</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Rahul</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Video-based formative and summative assessment of surgical tasks using deep learning</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>1038</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-022-26367-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-022-26367-9</pub-id>
          <pub-id pub-id-type="medline">36658186</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-022-26367-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC9852463</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shetty</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Cavuoto</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>Modeling of learning processes using continuous-time markov chain for virtual-reality-based surgical training in laparoscopic surgery</article-title>
          <source>IEEE Trans Learn Technol</source>
          <year>2024</year>
          <volume>17</volume>
          <fpage>462</fpage>
          <lpage>473</lpage>
          <pub-id pub-id-type="doi">10.1109/tlt.2023.3236899</pub-id>
          <pub-id pub-id-type="medline">38617582</pub-id>
          <pub-id pub-id-type="pmcid">PMC11013959</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hung</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sunmola</surname>
              <given-names>IO</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Anandkumar</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Capturing fine-grained details for video-based automation of suturing skills assessment</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2023</year>
          <volume>18</volume>
          <issue>3</issue>
          <fpage>545</fpage>
          <lpage>552</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36282465"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11548-022-02778-x</pub-id>
          <pub-id pub-id-type="medline">36282465</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-022-02778-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC9975072</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sawni</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fazlollahi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Bissonnette</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bajunaid</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sabbagh</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Assessment of learning curves on a simulated neurosurgical task using metrics selected by artificial intelligence</article-title>
          <source>J Neurosurg</source>
          <year>2022</year>
          <volume>137</volume>
          <issue>4</issue>
          <fpage>1160</fpage>
          <lpage>1171</lpage>
          <pub-id pub-id-type="doi">10.3171/2021.12.JNS211563</pub-id>
          <pub-id pub-id-type="medline">35120309</pub-id>
          <pub-id pub-id-type="pii">2021.12.JNS211563</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bissonnette</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>The virtual operative assistant: an explainable artificial intelligence tool for simulation-based training in surgery and medicine</article-title>
          <source>PLoS One</source>
          <year>2020</year>
          <volume>15</volume>
          <issue>2</issue>
          <fpage>e0229596</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0229596"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0229596</pub-id>
          <pub-id pub-id-type="medline">32106247</pub-id>
          <pub-id pub-id-type="pii">PONE-D-19-18905</pub-id>
          <pub-id pub-id-type="pmcid">PMC7046231</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fazlollahi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Makhani</surname>
              <given-names>HH</given-names>
            </name>
            <name name-style="western">
              <surname>Alsayegh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bakhaidar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Santaguida</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Effect of feedback modality on simulated surgical skills learning using automated educational systems- a four-arm randomized control trial</article-title>
          <source>J Surg Educ</source>
          <year>2024</year>
          <volume>81</volume>
          <issue>2</issue>
          <fpage>275</fpage>
          <lpage>287</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2023.11.001</pub-id>
          <pub-id pub-id-type="medline">38160107</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(23)00409-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siyar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Azarnoush</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rashidi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bissonnette</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ponnudurai</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Machine learning distinguishes neurosurgical skill levels in a virtual reality tumor resection task</article-title>
          <source>Med Biol Eng Comput</source>
          <year>2020</year>
          <volume>58</volume>
          <issue>6</issue>
          <fpage>1357</fpage>
          <lpage>1367</lpage>
          <pub-id pub-id-type="doi">10.1007/s11517-020-02155-3</pub-id>
          <pub-id pub-id-type="medline">32279203</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11517-020-02155-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reich</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bissonnette</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Karlik</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Artificial neural network approach to competency-based training using a virtual reality neurosurgical simulation</article-title>
          <source>Oper Neurosurg</source>
          <year>2022</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>31</fpage>
          <lpage>39</lpage>
          <pub-id pub-id-type="doi">10.1227/ons.0000000000000173</pub-id>
          <pub-id pub-id-type="medline">35726927</pub-id>
          <pub-id pub-id-type="pii">01787389-202207000-00005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Natheir</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Christie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bajunaid</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sabbagh</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Werthner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Fares</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Azarnoush</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Utilizing artificial intelligence and electroencephalography to assess expertise on a simulated neurosurgical task</article-title>
          <source>Comput Biol Med</source>
          <year>2023</year>
          <volume>152</volume>
          <fpage>106286</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106286</pub-id>
          <pub-id pub-id-type="medline">36502696</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(22)00994-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siyar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Azarnoush</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rashidi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Tremor assessment during virtual reality brain tumor resection</article-title>
          <source>J Surg Educ</source>
          <year>2020</year>
          <volume>77</volume>
          <issue>3</issue>
          <fpage>643</fpage>
          <lpage>651</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2019.11.011</pub-id>
          <pub-id pub-id-type="medline">31822389</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(19)30860-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Reich</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Christie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Fazlollahi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Santaguida</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Sabbagh</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bajunaid</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Continuous monitoring of surgical bimanual expertise using deep neural networks in virtual reality simulation</article-title>
          <source>NPJ Digit Med</source>
          <year>2022</year>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>54</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-022-00596-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-022-00596-8</pub-id>
          <pub-id pub-id-type="medline">35473961</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-022-00596-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC9042967</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fazlollahi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Bakhaidar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alsayegh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Langleben</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Sabbagh</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bajunaid</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Harley</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Effect of artificial intelligence tutoring vs expert instruction on learning simulated surgical skills among medical students: a randomized clinical trial</article-title>
          <source>JAMA Netw Open</source>
          <year>2022</year>
          <volume>5</volume>
          <issue>2</issue>
          <fpage>e2149008</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35191972"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.49008</pub-id>
          <pub-id pub-id-type="medline">35191972</pub-id>
          <pub-id pub-id-type="pii">2789268</pub-id>
          <pub-id pub-id-type="pmcid">PMC8864513</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Du</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Tai</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Using beta rhythm from eeg to assess physicians' operative skills in virtual surgical training</article-title>
          <source>IEEE Trans. Human-Mach. Syst</source>
          <year>2023</year>
          <volume>53</volume>
          <issue>4</issue>
          <fpage>688</fpage>
          <lpage>696</lpage>
          <pub-id pub-id-type="doi">10.1109/thms.2022.3228214</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dhanakshirur</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Katiyar</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>From feline classification to skills evaluation: a multitask learning framework for evaluating micro suturing neurosurgical skills</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE International Conference on Image Processing (ICIP)</conf-name>
          <conf-date>October 8-11, 2023</conf-date>
          <conf-loc>Kuala Lumpur</conf-loc>
          <fpage>3374</fpage>
          <lpage>3378</lpage>
          <pub-id pub-id-type="doi">10.1109/icip49359.2023.10222868</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The development of an eye movement-based deep learning system for laparoscopic surgical skills assessment</article-title>
          <source>Sci Rep</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>11036</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-022-15053-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-022-15053-5</pub-id>
          <pub-id pub-id-type="medline">35970911</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-022-15053-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC9378740</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shafiei</surname>
              <given-names>SB</given-names>
            </name>
            <name name-style="western">
              <surname>Shadpour</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mohler</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Sasangohar</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Seilanian Toussi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Shafqat</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Surgical skill level classification model development using EEG and eye-gaze data and machine learning algorithms</article-title>
          <source>J Robot Surg</source>
          <year>2023</year>
          <volume>17</volume>
          <issue>6</issue>
          <fpage>2963</fpage>
          <lpage>2971</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37864129"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11701-023-01722-8</pub-id>
          <pub-id pub-id-type="medline">37864129</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11701-023-01722-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC10678814</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lavanchy</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Zindel</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kirtac</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Twick</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hosgor</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Candinas</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Beldi</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Automation of surgical skill assessment using a three-stage machine learning algorithm</article-title>
          <source>Sci Rep</source>
          <year>2021</year>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>5197</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-021-84295-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-021-84295-6</pub-id>
          <pub-id pub-id-type="medline">33664317</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-021-84295-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7933408</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ryder</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Mott</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Anidi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Shigut</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bidwell</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ngam</surname>
              <given-names>BN</given-names>
            </name>
            <name name-style="western">
              <surname>Snell</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Forczmanski</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rooney</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Jeffcoach</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>GJ</given-names>
            </name>
          </person-group>
          <article-title>Using artificial intelligence to gauge competency on a novel laparoscopic training system</article-title>
          <source>J Surg Educ</source>
          <year>2024</year>
          <volume>81</volume>
          <issue>2</issue>
          <fpage>267</fpage>
          <lpage>274</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2023.10.007</pub-id>
          <pub-id pub-id-type="medline">38160118</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(23)00381-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Halperin</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sroka</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zuckerman</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Laufer</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Automatic performance evaluation of the intracorporeal suture exercise</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2024</year>
          <volume>19</volume>
          <issue>1</issue>
          <fpage>83</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-023-02963-6</pub-id>
          <pub-id pub-id-type="medline">37278834</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-023-02963-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ebina</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Abe</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hotta</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Higuchi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Furumido</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Iwahara</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Miyaji</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shibuya</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lingbo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Komizunai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kurashima</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kikuchi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Matsumoto</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Osawa</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Murai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tsujita</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sase</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Konno</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shinohara</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Objective evaluation of laparoscopic surgical skills in wet lab training based on motion analysis and machine learning</article-title>
          <source>Langenbecks Arch Surg</source>
          <year>2022</year>
          <volume>407</volume>
          <issue>5</issue>
          <fpage>2123</fpage>
          <lpage>2132</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35394212"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00423-022-02505-9</pub-id>
          <pub-id pub-id-type="medline">35394212</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00423-022-02505-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC9399206</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hamilton</surname>
              <given-names>BC</given-names>
            </name>
            <name name-style="western">
              <surname>Dairywala</surname>
              <given-names>MI</given-names>
            </name>
            <name name-style="western">
              <surname>Highet</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>O'Sullivan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chern</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Soriano</surname>
              <given-names>IS</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence based real-time video ergonomic assessment and training improves resident ergonomics</article-title>
          <source>Am J Surg</source>
          <year>2023</year>
          <volume>226</volume>
          <issue>5</issue>
          <fpage>741</fpage>
          <lpage>746</lpage>
          <pub-id pub-id-type="doi">10.1016/j.amjsurg.2023.07.028</pub-id>
          <pub-id pub-id-type="medline">37500299</pub-id>
          <pub-id pub-id-type="pii">S0002-9610(23)00342-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adrales</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ardito</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Chowbey</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Morales-Conde</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ferreres</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Hensman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Matthaei</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ramshaw</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Roberts</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Schrem</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tabiri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Vibert</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Woods</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Laparoscopic cholecystectomy critical view of safety (LC-CVS): a multi-national validation study of an objective, procedure-specific assessment using video-based assessment (VBA)</article-title>
          <source>Surg Endosc</source>
          <year>2024</year>
          <volume>38</volume>
          <issue>2</issue>
          <fpage>922</fpage>
          <lpage>930</lpage>
          <pub-id pub-id-type="doi">10.1007/s00464-023-10479-y</pub-id>
          <pub-id pub-id-type="medline">37891369</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00464-023-10479-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Popov</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <collab>ACM</collab>
          </person-group>
          <article-title>SketchSearch: fine-tuning reference maps to create exercises in support of video-based learning for surgeons</article-title>
          <year>2023</year>
          <conf-name>UIST '23: The 36th Annual ACM Symposium on User Interface Software and Technology</conf-name>
          <conf-date>October 29, 2023-November 1, 2023</conf-date>
          <conf-loc>San Francisco, CA, USA</conf-loc>
          <fpage>1</fpage>
          <lpage>3</lpage>
          <pub-id pub-id-type="doi">10.1145/3586182.3615816</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bissonnette</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler-Schwartz</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Karlik</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Artificial neural networks to assess virtual reality anterior cervical discectomy performance</article-title>
          <source>Oper Neurosurg</source>
          <year>2020</year>
          <volume>19</volume>
          <issue>1</issue>
          <fpage>65</fpage>
          <lpage>75</lpage>
          <pub-id pub-id-type="doi">10.1093/ons/opz359</pub-id>
          <pub-id pub-id-type="medline">31832652</pub-id>
          <pub-id pub-id-type="pii">5674993</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alkadri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ledwos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mirchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Reich</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yilmaz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Driscoll</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Del Maestro</surname>
              <given-names>RF</given-names>
            </name>
          </person-group>
          <article-title>Utilizing a multilayer perceptron artificial neural network to assess a virtual reality surgical procedure</article-title>
          <source>Comput Biol Med</source>
          <year>2021</year>
          <volume>136</volume>
          <fpage>104770</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.104770</pub-id>
          <pub-id pub-id-type="medline">34426170</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(21)00564-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shedage</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Farmer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Demirel</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Development of virtual skill trainers and their validation study analysis using machine learning</article-title>
          <source>ICISDM '21: Proceedings of the 2021 5th International Conference on Information System and Data Mining</source>
          <year>2021</year>
          <fpage>8</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1145/3471287.3471296</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tabuchi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Morita</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Miki</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Deguchi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kamiura</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Real-time artificial intelligence evaluation of cataract surgery: A preliminary study on demonstration experiment</article-title>
          <source>Taiwan J Ophthalmol</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>2</issue>
          <fpage>147</fpage>
          <lpage>154</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35813791"/>
          </comment>
          <pub-id pub-id-type="doi">10.4103/tjo.tjo_5_22</pub-id>
          <pub-id pub-id-type="medline">35813791</pub-id>
          <pub-id pub-id-type="pii">TJO-12-147</pub-id>
          <pub-id pub-id-type="pmcid">PMC9262019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Stanojcic</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>JO</given-names>
            </name>
            <name name-style="western">
              <surname>Long</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nie</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ni</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Intelligent cataract surgery supervision and evaluation via deep learning</article-title>
          <source>Int J Surg</source>
          <year>2022</year>
          <volume>104</volume>
          <fpage>106740</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1743-9191(22)00517-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijsu.2022.106740</pub-id>
          <pub-id pub-id-type="medline">35760343</pub-id>
          <pub-id pub-id-type="pii">S1743-9191(22)00517-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A practical continuous curvilinear capsulorhexis self-training system</article-title>
          <source>Indian J Ophthalmol</source>
          <year>2021</year>
          <volume>69</volume>
          <issue>10</issue>
          <fpage>2678</fpage>
          <lpage>2686</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34571614"/>
          </comment>
          <pub-id pub-id-type="doi">10.4103/ijo.IJO_210_21</pub-id>
          <pub-id pub-id-type="medline">34571614</pub-id>
          <pub-id pub-id-type="pii">IndianJOphthalmol_2021_69_10_2678_326452</pub-id>
          <pub-id pub-id-type="pmcid">PMC8597480</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simmonds</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Brentnall</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lenihan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of a novel universal robotic surgery virtual reality simulation proficiency index that will allow comparisons of users across any virtual reality simulation curriculum</article-title>
          <source>Surg Endosc</source>
          <year>2021</year>
          <volume>35</volume>
          <issue>10</issue>
          <fpage>5867</fpage>
          <lpage>5875</lpage>
          <pub-id pub-id-type="doi">10.1007/s00464-021-08609-5</pub-id>
          <pub-id pub-id-type="medline">34231063</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00464-021-08609-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kocielnik</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chu</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Deep multimodal fusion for surgical feedback classification</article-title>
          <source>Proc Mach Learn Res</source>
          <year>2023</year>
          <volume>225</volume>
          <fpage>256</fpage>
          <lpage>267</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.scopus.com/inward/record.uri?eid=2-s2.0-85184348444&amp;partnerID=40&amp;md5=bf8da670d0ba7b19b594338f8100c550"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Mariani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Menciassi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>De Momi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fey</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Uncertainty-aware self-supervised learning for cross-domain technical skill assessment in robot-assisted surgery</article-title>
          <source>IEEE Trans Med Robot Bionics</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>2</issue>
          <fpage>301</fpage>
          <lpage>311</lpage>
          <pub-id pub-id-type="doi">10.1109/tmrb.2023.3272008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bkheet</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>D'Angelo</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Goldbraikh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Laufer</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Using hand pose estimation to automate open surgery training feedback</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2023</year>
          <volume>18</volume>
          <issue>7</issue>
          <fpage>1279</fpage>
          <lpage>1285</lpage>
          <pub-id pub-id-type="doi">10.1007/s11548-023-02947-6</pub-id>
          <pub-id pub-id-type="medline">37253925</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-023-02947-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kadkhodamohammadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sivanesan Uthraraj</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Giataganas</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gras</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Kerr</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Luengo</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Oussedik</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stoyanov</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Towards video-based surgical workflow understanding in open orthopaedic surgery</article-title>
          <source>Computer Methods in Biomechanics and Biomedical Engineering: Imaging &amp; Visualization</source>
          <year>2020</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>286</fpage>
          <lpage>293</lpage>
          <pub-id pub-id-type="doi">10.1080/21681163.2020.1835552</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Papagiannakis</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zikas</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lydatakis</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>MAGES 3.0: Tying the knot of medical VR</article-title>
          <year>2020</year>
          <conf-name>SIGGRAPH '20: Special Interest Group on Computer Graphics and Interactive Techniques Conference</conf-name>
          <conf-date>August 17, 2020</conf-date>
          <conf-loc>Virtual Event, USA</conf-loc>
          <fpage>1</fpage>
          <lpage>2</lpage>
          <pub-id pub-id-type="doi">10.1145/3388536.3407888</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thanawala</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jesneck</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shelton</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rhee</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Seymour</surname>
              <given-names>NE</given-names>
            </name>
          </person-group>
          <article-title>Overcoming systems factors in case logging with artificial intelligence tools</article-title>
          <source>J Surg Educ</source>
          <year>2022</year>
          <volume>79</volume>
          <issue>4</issue>
          <fpage>1024</fpage>
          <lpage>1030</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2022.01.013</pub-id>
          <pub-id pub-id-type="medline">35193831</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(22)00013-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sung</surname>
              <given-names>MY</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Intelligent haptic virtual simulation for suture surgery</article-title>
          <source>Int J Adv Comput Sci Appl</source>
          <year>2020</year>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>54</fpage>
          <lpage>59</lpage>
          <pub-id pub-id-type="doi">10.14569/ijacsa.2020.0110208</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mellor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Janko</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Harwin</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Sherratt</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Craddock</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Plötz</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Generalized and efficient skill assessment from IMU data with applications in gymnastics and medical training</article-title>
          <source>ACM Trans Comput Healthcare</source>
          <year>2020</year>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.1145/3422168</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lamtara</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hanegbi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Talks</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Transfer of automated performance feedback models to different specimens in virtual reality temporal bone surgery</article-title>
          <year>2020</year>
          <conf-name>21st International Conference, AIED 2020</conf-name>
          <conf-date>July 6–10, 2020</conf-date>
          <conf-loc>Ifrane, Morocco</conf-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>296</fpage>
          <lpage>306</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-52237-7_24</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hernigou</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Sensor and machine learning-based assessment of gap balancing in cadaveric unicompartmental knee arthroplasty surgical training</article-title>
          <source>Int Orthop</source>
          <year>2021</year>
          <volume>45</volume>
          <issue>11</issue>
          <fpage>2843</fpage>
          <lpage>2849</lpage>
          <pub-id pub-id-type="doi">10.1007/s00264-021-05176-1</pub-id>
          <pub-id pub-id-type="medline">34351461</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00264-021-05176-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Seo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Moon</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Augmented reality for botulinum toxin injection</article-title>
          <source>Concurrency and Computation</source>
          <year>2019</year>
          <volume>32</volume>
          <issue>18</issue>
          <pub-id pub-id-type="doi">10.1002/cpe.5526</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saricilar</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Burgess</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Freeman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A pilot study of the use of artificial intelligence with high-fidelity simulations in assessing endovascular procedural competence independent of a human examiner</article-title>
          <source>ANZ J Surg</source>
          <year>2023</year>
          <volume>93</volume>
          <issue>6</issue>
          <fpage>1525</fpage>
          <lpage>1531</lpage>
          <pub-id pub-id-type="doi">10.1111/ans.18484</pub-id>
          <pub-id pub-id-type="medline">37088922</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kiyasseh</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Laca</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Haque</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wagner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Donoho</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Anandkumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hung</surname>
              <given-names>AJ</given-names>
            </name>
          </person-group>
          <article-title>A multi-institutional study using artificial intelligence to provide reliable and fair feedback to surgeons</article-title>
          <source>Commun Med (Lond)</source>
          <year>2023</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>42</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s43856-023-00263-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s43856-023-00263-3</pub-id>
          <pub-id pub-id-type="medline">36997578</pub-id>
          <pub-id pub-id-type="pii">10.1038/s43856-023-00263-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC10063640</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Machine learning-based operation skills assessment with vascular difficulty index for vascular intervention surgery</article-title>
          <source>Med Biol Eng Comput</source>
          <year>2020</year>
          <volume>58</volume>
          <issue>8</issue>
          <fpage>1707</fpage>
          <lpage>1721</lpage>
          <pub-id pub-id-type="doi">10.1007/s11517-020-02195-9</pub-id>
          <pub-id pub-id-type="medline">32468299</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11517-020-02195-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lawaetz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Skovbo Kristensen</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Nayahangan</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Van Herzeele</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Konge</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Eiberg</surname>
              <given-names>JP</given-names>
            </name>
          </person-group>
          <article-title>Simulation based training and assessment in open vascular surgery: a systematic review</article-title>
          <source>Eur J Vasc Endovasc Surg</source>
          <year>2021</year>
          <volume>61</volume>
          <issue>3</issue>
          <fpage>502</fpage>
          <lpage>509</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1078-5884(20)30996-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ejvs.2020.11.003</pub-id>
          <pub-id pub-id-type="medline">33309171</pub-id>
          <pub-id pub-id-type="pii">S1078-5884(20)30996-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abelleyra Lastoria</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Rehman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jasionowska</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Salibi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cavale</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dasgupta</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Aydin</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A systematic review of simulation-based training tools in plastic surgery</article-title>
          <source>J Surg Educ</source>
          <year>2025</year>
          <volume>82</volume>
          <issue>1</issue>
          <fpage>103320</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1931-7204(24)00468-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2024.103320</pub-id>
          <pub-id pub-id-type="medline">39615161</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(24)00468-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Woodward</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Aydın</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dasgupta</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sinha</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Simulation-based training in orthopedic surgery: A systematic review</article-title>
          <source>Curr Probl Surg</source>
          <year>2025</year>
          <volume>63</volume>
          <fpage>101676</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0011-3840(24)00237-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cpsurg.2024.101676</pub-id>
          <pub-id pub-id-type="medline">39922638</pub-id>
          <pub-id pub-id-type="pii">S0011-3840(24)00237-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref99">
        <label>99</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rahimi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Uluç</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Hardon</surname>
              <given-names>SF</given-names>
            </name>
            <name name-style="western">
              <surname>Bonjer</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>van der Peet</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Daams</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Training in robotic-assisted surgery: a systematic review of training modalities and objective and subjective assessment methods</article-title>
          <source>Surg Endosc</source>
          <year>2024</year>
          <volume>38</volume>
          <issue>7</issue>
          <fpage>3547</fpage>
          <lpage>3555</lpage>
          <pub-id pub-id-type="doi">10.1007/s00464-024-10915-7</pub-id>
          <pub-id pub-id-type="medline">38814347</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00464-024-10915-7</pub-id>
          <pub-id pub-id-type="pmcid">PMC11219449</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref100">
        <label>100</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Anastasiou</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tesfai</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ghamrawi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Mazomenos</surname>
              <given-names>Ev</given-names>
            </name>
            <name name-style="western">
              <surname>Curtis</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Sridhar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stoyanov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Francis</surname>
              <given-names>NK</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of objective tools and artificial intelligence in robotic surgery technical skills assessment: a systematic review</article-title>
          <source>Br J Surg</source>
          <year>2024</year>
          <volume>111</volume>
          <issue>1</issue>
          <fpage>znad331</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37951600"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bjs/znad331</pub-id>
          <pub-id pub-id-type="medline">37951600</pub-id>
          <pub-id pub-id-type="pii">7407357</pub-id>
          <pub-id pub-id-type="pmcid">PMC10771126</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref101">
        <label>101</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Levin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McKechnie</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Khalid</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Grantcharov</surname>
              <given-names>TP</given-names>
            </name>
            <name name-style="western">
              <surname>Goldenberg</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Automated methods of technical skill assessment in surgery: a systematic review</article-title>
          <source>J Surg Educ</source>
          <year>2019</year>
          <volume>76</volume>
          <issue>6</issue>
          <fpage>1629</fpage>
          <lpage>1639</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jsurg.2019.06.011</pub-id>
          <pub-id pub-id-type="medline">31272846</pub-id>
          <pub-id pub-id-type="pii">S1931-7204(19)30164-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref102">
        <label>102</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Iqbal</surname>
              <given-names>FM</given-names>
            </name>
            <name name-style="western">
              <surname>Darzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Purkayastha</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kinross</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Machine learning for technical skill assessment in surgery: a systematic review</article-title>
          <source>NPJ Digit Med</source>
          <year>2022</year>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-022-00566-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-022-00566-0</pub-id>
          <pub-id pub-id-type="medline">35241760</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-022-00566-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC8894462</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref103">
        <label>103</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pedrett</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mascagni</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Beldi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Padoy</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lavanchy</surname>
              <given-names>JL</given-names>
            </name>
          </person-group>
          <article-title>Technical skill assessment in minimally invasive surgery using artificial intelligence: a systematic review</article-title>
          <source>Surg Endosc</source>
          <year>2023</year>
          <volume>37</volume>
          <issue>10</issue>
          <fpage>7412</fpage>
          <lpage>7424</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris-portal.unibe.ch/handle/20.500.12422/169317"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00464-023-10335-z</pub-id>
          <pub-id pub-id-type="medline">37584774</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00464-023-10335-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC10520175</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref104">
        <label>104</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Tiefenbach</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Demetriades</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>The role of artificial intelligence in surgical simulation</article-title>
          <source>Frontiers in Medical Technology</source>
          <year>2024</year>
          <access-date>2025-10-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.frontiersin.org/journals/medical-technology/articles/10.3389/fmedt.2022.1076755">https://www.frontiersin.org/journals/medical-technology/articles/10.3389/fmedt.2022.1076755</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref105">
        <label>105</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Komasawa</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yokohira</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Simulation-based education in the artificial intelligence era</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <volume>15</volume>
          <issue>6</issue>
          <fpage>e40940</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37496549"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.40940</pub-id>
          <pub-id pub-id-type="medline">37496549</pub-id>
          <pub-id pub-id-type="pmcid">PMC10368461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref106">
        <label>106</label>
        <nlm-citation citation-type="web">
          <article-title>AI review - selected</article-title>
          <source>Zotero</source>
          <access-date>2025-10-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.zotero.org/groups/5450557/ai_review_-_selected">https://www.zotero.org/groups/5450557/ai_review_-_selected</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
