<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="review-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v26i1e60501</article-id>
      <article-id pub-id-type="pmid">39255030</article-id>
      <article-id pub-id-type="doi">10.2196/60501</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Prompt Engineering Paradigms for Medical Applications: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>de Azevedo Cardoso</surname>
            <given-names>Taiane</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bhasuran</surname>
            <given-names>Balu</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hu</surname>
            <given-names>Danqing</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Jain</surname>
            <given-names>Aditi</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Zaghir</surname>
            <given-names>Jamil</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Department of Radiology and Medical Informatics</institution>
            <institution>University of Geneva</institution>
            <addr-line>Chemin des Mines, 9</addr-line>
            <addr-line>Geneva, 1202</addr-line>
            <country>Switzerland</country>
            <phone>41 022 379 08 18</phone>
            <email>Jamil.Zaghir@unige.ch</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8209-6098</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Naguib</surname>
            <given-names>Marco</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-2950-8852</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Bjelogrlic</surname>
            <given-names>Mina</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6922-3283</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Névéol</surname>
            <given-names>Aurélie</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1846-9144</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Tannier</surname>
            <given-names>Xavier</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2452-8868</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Lovis</surname>
            <given-names>Christian</given-names>
          </name>
          <degrees>MPH, MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2681-8076</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Division of Medical Information Sciences</institution>
        <institution>Geneva University Hospitals</institution>
        <addr-line>Geneva</addr-line>
        <country>Switzerland</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Radiology and Medical Informatics</institution>
        <institution>University of Geneva</institution>
        <addr-line>Geneva</addr-line>
        <country>Switzerland</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Université Paris-Saclay</institution>
        <institution>CNRS</institution>
        <institution>Laboratoire Interdisciplinaire des Sciences du Numérique</institution>
        <addr-line>Orsay</addr-line>
        <country>France</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Sorbonne Université</institution>
        <institution>INSERM</institution>
        <institution>Université Sorbonne Paris-Nord, Laboratoire d'Informatique Médicale et d'Ingénierie des Connaissances en eSanté, LIMICS</institution>
        <addr-line>Paris</addr-line>
        <country>France</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Jamil Zaghir <email>Jamil.Zaghir@unige.ch</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>10</day>
        <month>9</month>
        <year>2024</year>
      </pub-date>
      <volume>26</volume>
      <elocation-id>e60501</elocation-id>
      <history>
        <date date-type="received">
          <day>14</day>
          <month>5</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>3</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>9</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>22</day>
          <month>7</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Jamil Zaghir, Marco Naguib, Mina Bjelogrlic, Aurélie Névéol, Xavier Tannier, Christian Lovis. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 10.09.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2024/1/e60501" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Prompt engineering, focusing on crafting effective prompts to large language models (LLMs), has garnered attention for its capabilities at harnessing the potential of LLMs. This is even more crucial in the medical domain due to its specialized terminology and language technicity. Clinical natural language processing applications must navigate complex language and ensure privacy compliance. Prompt engineering offers a novel approach by designing tailored prompts to guide models in exploiting clinically relevant information from complex medical texts. Despite its promise, the efficacy of prompt engineering in the medical domain remains to be fully explored.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of the study is to review research efforts and technical approaches in prompt engineering for medical applications as well as provide an overview of opportunities and challenges for clinical practice.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Databases indexing the fields of medicine, computer science, and medical informatics were queried in order to identify relevant published papers. Since prompt engineering is an emerging field, preprint databases were also considered. Multiple data were extracted, such as the prompt paradigm, the involved LLMs, the languages of the study, the domain of the topic, the baselines, and several learning, design, and architecture strategies specific to prompt engineering. We include studies that apply prompt engineering–based methods to the medical domain, published between 2022 and 2024, and covering multiple prompt paradigms such as prompt learning (PL), prompt tuning (PT), and prompt design (PD).</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We included 114 recent prompt engineering studies. Among the 3 prompt paradigms, we have observed that PD is the most prevalent (78 papers). In 12 papers, PD, PL, and PT terms were used interchangeably. While ChatGPT is the most commonly used LLM, we have identified 7 studies using this LLM on a sensitive clinical data set. Chain-of-thought, present in 17 studies, emerges as the most frequent PD technique. While PL and PT papers typically provide a baseline for evaluating prompt-based approaches, 61% (48/78) of the PD studies do not report any nonprompt-related baseline. Finally, we individually examine each of the key prompt engineering–specific information reported across papers and find that many studies neglect to explicitly mention them, posing a challenge for advancing prompt engineering research.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>In addition to reporting on trends and the scientific landscape of prompt engineering, we provide reporting guidelines for future studies to help advance research in the medical field. We also disclose tables and figures summarizing medical prompt engineering papers available and hope that future contributions will leverage these existing works to better advance the field.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>prompt engineering</kwd>
        <kwd>prompt design</kwd>
        <kwd>prompt learning</kwd>
        <kwd>prompt tuning</kwd>
        <kwd>large language models</kwd>
        <kwd>LLMs</kwd>
        <kwd>scoping review</kwd>
        <kwd>clinical natural language processing</kwd>
        <kwd>natural language processing</kwd>
        <kwd>NLP</kwd>
        <kwd>medical texts</kwd>
        <kwd>medical application</kwd>
        <kwd>medical applications</kwd>
        <kwd>clinical practice</kwd>
        <kwd>privacy</kwd>
        <kwd>medicine</kwd>
        <kwd>computer science</kwd>
        <kwd>medical informatics</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>In recent years, the development of large language models (LLMs) such as GPT-3 has disrupted the field of natural language processing (NLP). LLMs have demonstrated capabilities in processing and generating human-like text, with applications ranging from text generation and translation to question answering and summarization [<xref ref-type="bibr" rid="ref1">1</xref>]. However, harnessing the full potential of LLMs requires careful consideration of how input prompts are formulated and optimized [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
      <p>Input prompts denote a set of instructions provided to the LLM to execute a task. Prompt engineering, a term coined to describe the strategic design and optimization of prompts for LLMs, has emerged as a crucial aspect of leveraging these models. By crafting prompts that effectively convey tasks or queries, researchers and practitioners can guide LLMs to improve the accuracy and pertinence of responses. The literature defines prompt engineering in various ways: it can be regarded as a prompt structuring process that enhances the efficiency of an LLM to achieve a specific objective [<xref ref-type="bibr" rid="ref3">3</xref>] or as the mechanism through which LLMs are programmed by prompts [<xref ref-type="bibr" rid="ref4">4</xref>]. Prompt engineering encompasses a plethora of techniques, often separated into distinct categories such as output customization and prompt improvement [<xref ref-type="bibr" rid="ref4">4</xref>]. Existing prompt paradigms are presented in more detail in the Methods section.</p>
      <p>In the realm of medical NLP, significant advancements have been made, such as the release of LLMs specialized in medical language and the availability of public medical data sets, including in languages other than English [<xref ref-type="bibr" rid="ref5">5</xref>]. The unique intricacies of medical language, characterized by its terminological precision, context sensitivity, and domain-specific nuances, demand a dedicated focus and exploration of NLP in health care research. Despite these imperatives, to our knowledge, there is currently no systematic review analyzing prompt engineering applied to the medical domain.</p>
      <p>The aim of this scoping review is to shed light on prompt engineering, as it is developed and used in the medical field, by systematically analyzing the literature in the field. Specifically, we examine the definitions, methodologies, techniques, and outcomes of prompt engineering across various NLP tasks. Methodological strengths, weaknesses, and limitations of the current wave of experimentation are discussed. Finally, we provide guidelines for comprehensive reporting of prompt engineering–related studies to improve clarity and facilitate further research in the field. We aspire to furnish insights that will inform both researchers and users about the pivotal role of prompt engineering in optimizing the efficacy of LLMs. By gaining a thorough understanding of the current landscape of prompt engineering research, we can pinpoint areas warranting further investigation and development, thereby propelling the field of medical NLP forward.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Design</title>
        <p>Our scoping review was conducted following the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) guidelines for scoping reviews (available in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). In this review, we use terminology to denote emerging technical concepts that lack consensus definitions. We propose the following definitions based on previous use in the literature:</p>
        <list list-type="bullet">
          <list-item>
            <p>LLM: Object that models language and can be used to generate text by receiving large-scale language modeling pretraining (Luccioni and Rogers [<xref ref-type="bibr" rid="ref6">6</xref>] define an arbitrary threshold at 1 billion tokens of training data). An LLM can be adapted to downstream tasks through transfer learning approaches such as fine-tuning or prompt-based techniques. Following the study of Thirunavukarasu et al [<xref ref-type="bibr" rid="ref7">7</xref>] of models for the medical field, we include Bidirectional Encoder Representations From Transformers (BERT)–based and GPT-based models in this definition, although Zhao et al [<xref ref-type="bibr" rid="ref8">8</xref>] place BERT models in a separate category.</p>
          </list-item>
          <list-item>
            <p>Fine-tuning: Approach in which the weights of the pretrained LLM are retrained on new samples. The additional data can be labeled and designed to adapt the LLM to a new downstream task.</p>
          </list-item>
          <list-item>
            <p>Prompt design (PD) [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]: Manually building a prompt (named manual prompt or hard prompt), tailored to guide the LLM toward resolving the task by simply predicting the most probable continuity of the prompt. The prompt is usually a set of task-specific instructions, occasionally featuring a few demonstrations of the task.</p>
          </list-item>
          <list-item>
            <p>Prompt learning (PL) [<xref ref-type="bibr" rid="ref3">3</xref>]: Manually building a prompt and passing it to an LLM, trained via the masked language modeling (MLM) objective, to predict masked tokens. The prompt often features masked tokens, over which the LLM makes predictions. Those are then projected as predictions for a new downstream task. This approach is also referred to as prompt-based learning.</p>
          </list-item>
          <list-item>
            <p>Prompt tuning (PT) [<xref ref-type="bibr" rid="ref9">9</xref>]: Refers to the LLM prompting where part or all the prompt is a trainable vectorial representation (known as continuous prompt or soft prompt) that is optimized with respect to the annotated instances.</p>
          </list-item>
        </list>
        <p><xref rid="figure1" ref-type="fig">Figure 1</xref> illustrates the 4 approaches described above.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Illustration of traditional fine-tuning and the 3 prompt-based paradigms (the fire logo represents trainable parameters, and the flake logo illustrates frozen parameters). LLM: large language model.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Inclusion and Exclusion Criteria</title>
        <p>Studies were included if they met the following criteria: focus on prompt engineering, involvement of at least 1 LLM, relevance to the medical field (biomedical or clinical), pertaining to text-based generation (excluding vision-related prompts), and not focusing on prompting for academic writing purposes. Furthermore, as most of the first studies about prompt engineering emerged in 2022 [<xref ref-type="bibr" rid="ref2">2</xref>], we added the following constraint: the publication date should be later than 2021.</p>
      </sec>
      <sec>
        <title>Screening Process</title>
        <p>The initial set of papers retrieved from the searches underwent screening based on titles, abstracts, and keywords. The search strategy is described in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. Screening was performed by 2 reviewers (JZ and MN), working in a double-blind process. Interannotator agreement was calculated, with conflicts resolved through discussion.</p>
      </sec>
      <sec>
        <title>Data Synthesis</title>
        <p>We extracted information on prompt paradigms (PD, PL, and PT), involved LLMs, data sets used, studied language, domain (biomedical or clinical), medical subfield (if any), mentioned prompt engineering techniques, computational complexity, baselines, relative performances, and key findings. Additionally, we extracted journal information and noted instances of PD or PL or PT terminology misuse. Details are available in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. Finally, we compile a list of recommendations based on the positive or negative trends we identify from the selected papers.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Screening Results</title>
        <p>The systematic search across sources yielded 398 papers. Following the removal of duplicates, 251 papers underwent screening based on title, abstract, and keywords, leading to the exclusion of 94 studies. During this first screening step, 33 conflicts were identified and resolved among the annotators, resulting in an interannotator agreement of 86.8% (n=218). Subsequently, 157 studies remained, and full-text copies were retrieved and thoroughly screened. This process culminated in the inclusion of a total of 114 papers in this scoping review. The detailed process of study selection is shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. Among the selected papers, 13 are from clinical venues, 33 are from medical informatics sources, 31 are from computer science publications, and 4 are from other sources. Notably, 33 of them are preprints.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram for the review process.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Prompt Paradigms and Medical Subfields</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> depicts the number of papers identified within each prompt paradigm along with their associated medical subfields. Some papers may simultaneously involve several (up to 2 in this review) prompt paradigms. Notably, PD emerged as the predominant category, with a total of 78 papers. These papers spanned across various medical fields, with a greater emphasis on clinical (including specialties) rather than biomedical disciplines. The screening yields 29 PL papers and 19 PT papers, with both paradigms maintaining a balanced distribution between biomedical and clinical domains. However, it is noteworthy that unlike PL and PT, PD encompassed a much broader spectrum of clinical specialties, with a particular interest in psychiatry.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Paper distribution by prompt category and medical subfield, with corresponding references.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="670"/>
            <col width="300"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Prompt paradigm and domain of the topic</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>Prompt design (78)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Biomedical (17)</td>
                <td>[<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Medical licensing examination (12)</td>
                <td>[<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref38">38</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Clinical (general) (15)</td>
                <td>[<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref53">53</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Psychiatry (10)</td>
                <td>[<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref54">54</xref>-<xref ref-type="bibr" rid="ref62">62</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Oncology (5)</td>
                <td>[<xref ref-type="bibr" rid="ref63">63</xref>-<xref ref-type="bibr" rid="ref67">67</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Cardiology (4)</td>
                <td>[<xref ref-type="bibr" rid="ref68">68</xref>-<xref ref-type="bibr" rid="ref71">71</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Ophthalmology (3)</td>
                <td>[<xref ref-type="bibr" rid="ref72">72</xref>-<xref ref-type="bibr" rid="ref74">74</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Neurology (3)</td>
                <td>[<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Orthopedics (2)</td>
                <td>[<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Clinical trials (2)</td>
                <td>[<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Intensive care (2)</td>
                <td>[<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Geriatrics (2)</td>
                <td>[<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref76">76</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Radiology (2)</td>
                <td>[<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref82">82</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Nuclear medicine (1)</td>
                <td>[<xref ref-type="bibr" rid="ref29">29</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Hepatology (1)</td>
                <td>[<xref ref-type="bibr" rid="ref83">83</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Endocrinology (1)</td>
                <td>[<xref ref-type="bibr" rid="ref84">84</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Plastic surgery (1)</td>
                <td>[<xref ref-type="bibr" rid="ref85">85</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Gastroenterology (1)</td>
                <td>[<xref ref-type="bibr" rid="ref32">32</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Genetics (1)</td>
                <td>[<xref ref-type="bibr" rid="ref86">86</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Nursing (1)</td>
                <td>[<xref ref-type="bibr" rid="ref87">87</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Prompt learning (29)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Biomedical (13)</td>
                <td>[<xref ref-type="bibr" rid="ref88">88</xref>-<xref ref-type="bibr" rid="ref100">100</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Clinical (general) (15)</td>
                <td>[<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref101">101</xref>-<xref ref-type="bibr" rid="ref113">113</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Psychiatry (1)</td>
                <td>[<xref ref-type="bibr" rid="ref114">114</xref>]</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Prompt tuning (19)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Biomedical (9)</td>
                <td>[<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref98">98</xref>,<xref ref-type="bibr" rid="ref115">115</xref>,<xref ref-type="bibr" rid="ref116">116</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Clinical (general) (6)</td>
                <td>[<xref ref-type="bibr" rid="ref101">101</xref>,<xref ref-type="bibr" rid="ref105">105</xref>,<xref ref-type="bibr" rid="ref110">110</xref>,<xref ref-type="bibr" rid="ref117">117</xref>-<xref ref-type="bibr" rid="ref119">119</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Oncology (2)</td>
                <td>[<xref ref-type="bibr" rid="ref120">120</xref>,<xref ref-type="bibr" rid="ref121">121</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Psychiatry (1)</td>
                <td>[<xref ref-type="bibr" rid="ref122">122</xref>]</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Medical insurance (1)</td>
                <td>[<xref ref-type="bibr" rid="ref123">123</xref>]</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Terminology Use</title>
        <p>In our review, the consistency of terminology use around prompt engineering was investigated, particularly concerning its 3 paradigms: PD, PL, and PT. Across the papers, we meticulously tracked instances where the terminology was applied differently to the definitions used in the literature and described in the introduction. Notably, PL was used to refer to PD 4 times [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref86">86</xref>] and PT once [<xref ref-type="bibr" rid="ref119">119</xref>], while PT was used 5 times to describe PL [<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref96">96</xref>,<xref ref-type="bibr" rid="ref97">97</xref>,<xref ref-type="bibr" rid="ref99">99</xref>,<xref ref-type="bibr" rid="ref114">114</xref>] and twice for PD [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Terminology inconsistencies were identified in only 12 studies. Consequently, while there remains some degree of inconsistency, a significant majority of 102 papers adhered to the definitions identified as commonly used terminology.</p>
      </sec>
      <sec>
        <title>Language of Study</title>
        <p>Considering the latest developments in NLP research encompassing languages beyond English [<xref ref-type="bibr" rid="ref124">124</xref>], reporting the language of study is crucial. Several papers do not explicitly state the language of study. In some cases, the language can be inferred from prompt illustrations or examples. In the least informative cases, only the data set of the study is disclosed, indirectly hinting at the language.</p>
        <p><xref ref-type="table" rid="table2">Table 2</xref> illustrates the language distribution among the selected papers, noting whether languages are explicitly mentioned, implicitly inferred from prompt illustrations, or simply not stated but implied from the used data set. The language used in 2 papers [<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref68">68</xref>] remains unknown.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Frequency distribution of papers across various languages. The table also depicts the frequency distribution across venues for papers studying English (N=114).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="270"/>
            <col width="160"/>
            <col width="190"/>
            <col width="210"/>
            <col width="140"/>
            <thead>
              <tr valign="bottom">
                <td colspan="2">Language and type of venue</td>
                <td>Stated<sup>a</sup>, n (%)</td>
                <td>Inferred<sup>b</sup>, n (%)</td>
                <td>Not stated<sup>c</sup>, n (%)</td>
                <td>Total, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="6">
                  <bold>English</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>37 (32.5)</td>
                <td>48 (42.1)</td>
                <td>11 (9.6)</td>
                <td>96 (84.2)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Medical informatics</td>
                <td>16 (14)</td>
                <td>9 (7.9)</td>
                <td>2 (1.8)</td>
                <td>27 (23.7)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Computer science</td>
                <td>8 (7)</td>
                <td>18 (15.8)</td>
                <td>1 (0.9)</td>
                <td>27 (23.7)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Preprint</td>
                <td>9 (7.9)</td>
                <td>12 (10.5)</td>
                <td>5 (4.4)</td>
                <td>26 (22.8)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Clinical</td>
                <td>1 (0.9)</td>
                <td>8 (7)</td>
                <td>3 (2.6)</td>
                <td>12 (10.5)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>3 (2.6)</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>4 (3.5)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Chinese</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>18 (15.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>18 (15.8)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>French</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Dutch</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Japanese</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Portuguese</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Italian</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Spanish</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Korean</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Basque</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>German</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Swedish</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Polish</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Vietnamese</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Unknown</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>All</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
                <td>2 (1.8)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>Stated in the paper.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>Inferred from prompt figures and examples.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>Inferred from the data set.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>Notably, English dominates with 84.2% (n=96) of the selected papers, followed by Chinese at 15.7% (n=18). Then, the other languages are relatively rare, often appearing in studies featuring multiple languages. It is worth mentioning that languages besides English are usually explicitly stated, with the exception of a paper studying Korean [<xref ref-type="bibr" rid="ref63">63</xref>]. In total, the language had to be inferred from prompt figures and examples in 48 papers, all in English.</p>
      </sec>
      <sec>
        <title>Choice of LLMs</title>
        <p>Given the diverse array of LLMs available, spanning general or medical, open-source or proprietary, and monolingual or multilingual models, alongside various architectural configurations (encoder, decoder, or both), our study investigates LLM selection across prompt paradigms.</p>
        <p><xref rid="figure3" ref-type="fig">Figure 3</xref> outlines prevalent LLMs categorized by prompt paradigms, though it is not exhaustive and only includes commonly encountered architectures. For example, while encoder-decoder models are absent in PT in <xref rid="figure3" ref-type="fig">Figure 3</xref>, there are a few instances where they are used [<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref110">110</xref>].</p>
        <p>ChatGPT’s popularity in PD is unsurprising, given its accessibility. Models from Google, PaLM, and Bard (subsequently rebranded Gemini), all falling under closed models, are also prominent. Among open-source instruct-based LLMs, fewer are used, notably those based on LLaMA-2 with 7 occurrences.</p>
        <p>In PL, encoder models, those following the BERT architecture, dominate, covering both general and specialized variants. There are occasional uses of decoder models like GPT-2 in PL-based tasks [<xref ref-type="bibr" rid="ref103">103</xref>,<xref ref-type="bibr" rid="ref105">105</xref>]. PT involves all model types, with a preference toward encoders. Further details on the models used are available in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Involved large language models in the prompt engineering studies, covering all prompt paradigms. The number of studies that fit in a node is shown in parentheses. BERT: Bidirectional Encoder Representations From Transformers; RoBERTa: Robustly Optimized BERT Pre-training Approach; T5: Text-to-Text Transfer Transformer.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Topic Domain and NLP Task Trends</title>
        <p><xref rid="figure4" ref-type="fig">Figure 4</xref> [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref88">88</xref>-<xref ref-type="bibr" rid="ref123">123</xref>] illustrates the target tasks used in the PL and PT papers. PL-focused papers predominantly address classification-based tasks such as text classification, named entity recognition, and relation extraction, with text classification being particularly prominent. This aligns with the nature of PL, which centers around an MLM objective. Among other tasks, a study based on text generation [<xref ref-type="bibr" rid="ref111">111</xref>] makes use of PL to predict masked tokens from partial patient records, aiming to generate synthetic electronic health records. Conversely, PT papers tend to exhibit a slightly broader range of tasks.</p>
        <p><xref rid="figure5" ref-type="fig">Figure 5</xref> [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref87">87</xref>] presents the same analysis for PD-based papers. Unlike PL and PT, a prominent trend observed is that several studies focus on real-world board examinations. Notably, these studies predominantly center around tasks involving answering multiple-choice questions (MCQs). It is worth noting that although MCQs might be cast as a classification task, in practice, it is cast as a generation task using causal LLMs. It is interesting to note that none of the selected PD papers propose the task of entity linking, despite the clear opportunity of leveraging LLMs’ in-context learning ability for medical entity linking.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Overview of selected prompt learning and prompt tuning papers, showcasing natural language processing tasks alongside their topic domain (it includes tasks, such as text simplification, where none of the selected papers specifically focused on these tasks). Numbers within square brackets are reference citations.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Overview of selected prompt design papers, showcasing natural language processing tasks alongside their topic domain. Numbers within square brackets are reference citations.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Prompt Engineering Techniques</title>
        <p>We extensively investigated the used prompt techniques: among PD papers, 49 studies used zero-shot prompting, 23 used few-shot prompting, and 10 used one-shot prompting. Few shot tends to outperform in MCQs, but its advantage over zero shot is inconsistent in other NLP tasks. We propose a comprehensive summary of the existing techniques in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
        <p>As shown in <xref ref-type="table" rid="table3">Table 3</xref>, chain-of-thought (CoT) prompting [<xref ref-type="bibr" rid="ref2">2</xref>] stands as the most common technique, followed by the persona pattern. In medical MCQs, various attempts with CoT can lead to different reasoning pathways and answers. Hence, to improve accuracy, 2 studies [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>] used self-consistency, a method involving using multiple CoT prompts and selecting the most frequently occurring answer through voting.</p>
        <p>Flipped interaction was used for simulation tasks, such as doctor-patient engagement [<xref ref-type="bibr" rid="ref60">60</xref>] or to provide clinical training to medical students [<xref ref-type="bibr" rid="ref81">81</xref>]. Emotion enhancement was applied in mental health contexts [<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref60">60</xref>], allowing the LLM to produce emotional statements.</p>
        <p>More innovative prompt engineering techniques include k-nearest neighbor few-shot prompting [<xref ref-type="bibr" rid="ref19">19</xref>] and pseudoclassification prompting [<xref ref-type="bibr" rid="ref78">78</xref>]. The former uses the k-nearest neighbor algorithm to select the k-closest examples in a large annotated data set based on the input before using them in the prompt, and the latter presents to the LLMs all possible labels, asking the model to respond with a binary output for each provided label. Despite its potential, tree-of-thoughts pattern use was limited, with only 1 instance found among the papers [<xref ref-type="bibr" rid="ref77">77</xref>].</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Most recurrent prompt techniques found, with the corresponding description, template, and references.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="150"/>
            <col width="230"/>
            <col width="260"/>
            <col width="100"/>
            <col width="260"/>
            <thead>
              <tr valign="top">
                <td>Prompt techniques</td>
                <td>Description</td>
                <td>Prompt template examples</td>
                <td>Count papers</td>
                <td>References</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Chain-of-thought (CoT)</td>
                <td>Asking the large language model (LLM) to provide the reasoning before answering.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Basic CoT: “&lt;Prompt&gt;. Think step by step.”</p>
                    </list-item>
                    <list-item>
                      <p>Another example of CoT: “Solve this math problem. E.g.: You have 3 apples and buy 2 more, how many apples do you have? Solution: Start with 3 apples. Buy 2 more apples. Total apples is 3 + 2 = 5. New problem: You have 5 oranges and give away 2, how many oranges do you have left?”</p>
                    </list-item>
                  </list>
                </td>
                <td>17</td>
                <td>[<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref39">39</xref>, <xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref75">75</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref83">83</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Persona (role-defining)</td>
                <td>Assigning the LLM a particular role to accomplish a task related to that role.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“Act as X (e.g. Act as a Physician, Act as a Psychiatrist, etc).”</p>
                    </list-item>
                  </list>
                </td>
                <td>10</td>
                <td>[<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>,<xref ref-type="bibr" rid="ref59">59</xref>-<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref84">84</xref>,<xref ref-type="bibr" rid="ref85">85</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Ensemble prompting</td>
                <td>Using multiple independent prompts to answer the same question. The final output is decided by majority vote.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“Prompt1, Output1, Prompt2, Output2, […], Promptk, Outputk” Final output: Vote</p>
                    </list-item>
                  </list>
                </td>
                <td>4</td>
                <td>[<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Scene-defining</td>
                <td>Simulating a scene related to the addressed task.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“you are in a hospital, in front of a patient ...”</p>
                    </list-item>
                  </list>
                </td>
                <td>3</td>
                <td>[<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref61">61</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Prompt-chaining</td>
                <td>Separating a task into multiple subtasks, each resolved with a prompt.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“Prompt1-&gt;Output1, Output1+Prompt2 -&gt;Output2, [...] Outputk-1+Promptk-&gt; Outputk”</p>
                    </list-item>
                  </list>
                </td>
                <td>3</td>
                <td>[<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref84">84</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Flipped interaction</td>
                <td>Making the LLM take the lead (eg, asking questions) and the user interacting with it passively.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“I would like you to ask me questions to achieve X. You should ask questions until &lt;condition/goal&gt; is met.”</p>
                    </list-item>
                  </list>
                </td>
                <td>2</td>
                <td>[<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref81">81</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Emotion enhancement</td>
                <td>Making the LLM more or less expressing human-like emotions.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“You can have emotional fluctuations during the conversation.”</p>
                    </list-item>
                  </list>
                </td>
                <td>2</td>
                <td>[<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Prompt refinement</td>
                <td>Using the LLM to refine the prompt such as translating the prompt or rephrasing it.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“Please translate in English / rephrase this prompt: &lt;P&gt;.”</p>
                    </list-item>
                  </list>
                </td>
                <td>2</td>
                <td>[<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Retrieval-augmented generation</td>
                <td>Combining an information retrieval component with a generative LLM. Snippets extracted from documents are fed into the system along with the input prompt to generate an enriched output.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“&lt;List of relevant Snippets&gt; &lt;Input Prompt&gt;”</p>
                    </list-item>
                  </list>
                </td>
                <td>2</td>
                <td>[<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]</td>
              </tr>
              <tr valign="top">
                <td>Self-consistency (CoT ensembling)</td>
                <td>Ensemble prompting each prompt using CoT. Ideal if a problem has many possible reasoning paths.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>“CoT_Pr1, Output1, CoT_Pr2, Output2, ..., CoT_Prk, Outputk” Final output: Vote</p>
                    </list-item>
                  </list>
                </td>
                <td>2</td>
                <td>[<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Emerging Trends</title>
        <p><xref rid="figure6" ref-type="fig">Figure 6</xref> illustrates a chronological polar pie chart of selected papers and their citation connections, identifying five highly cited papers: (1) Agrawal et al [<xref ref-type="bibr" rid="ref40">40</xref>] demonstrate GPT-3’s clinical task performance, especially in named entity recognition and relation extraction through thorough PD. (2) Kung et al [<xref ref-type="bibr" rid="ref36">36</xref>] evaluate ChatGPT’s (GPT-3.5) ability for the United States Medical Licensing Examination, shortly after the public release of ChatGPT. (3) Singhal et al [<xref ref-type="bibr" rid="ref20">20</xref>] introduce MultiMedQA and HealthSearchQA benchmarks. The paper also presents instruction PT for domain alignment, a novel paradigm that entails learning a soft prompt prior to the LLM general instruction, which is usually written as a hard prompt. Using this approach on FlanPaLM led to the development of Med-PaLM, improving question answering over FlanPaLM. (4) Nori et al [<xref ref-type="bibr" rid="ref27">27</xref>] evaluate GPT-4 on the United States Medical Licensing Examination and MultiMedQA, surpassing previous state-of-the-art results, including GPT-3.5 and Med-PaLM. (5) Luo et al [<xref ref-type="bibr" rid="ref26">26</xref>] release BioGPT, a fine-tuned variant of GPT-2 for biomedical tasks, achieving state-of-the-art results on 6 biomedical NLP tasks with suffix-based PT.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>A chronological chart showing the selected papers across the 3 prompt-based paradigms. Papers are classified by different colors according to the venues in which they were published. Different shapes illustrate whether the LLM is fine-tuned, frozen, or both. Solid or striped color indicates whether authors used a nonprompt baseline (including humans) for evaluation. Arrows connecting 2 papers denote direct citations. The nodes in the border of PD, PL, or PT are studies proposing the 2 involved prompt engineering paradigms. Numbers within square brackets are reference citations. LLM: large language model; PD: prompt design; PL: prompt learning; PT: prompt tuning.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e60501_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Trends in PD</title>
        <p>As shown in <xref rid="figure6" ref-type="fig">Figure 6</xref>, the PD paradigm presents multiple trends: all papers disseminated in clinical-based venues, and 27 of 33 (82%) of the encountered preprints adhere to this paradigm. Furthermore, we observed a significant focus on work involving frozen LLMs within the PD domain. This trend is likely due to the frequent use of ChatGPT in 74 instances, as depicted in <xref rid="figure3" ref-type="fig">Figure 3</xref>, despite OpenAI offering fine-tuning capabilities for the model. It is worth mentioning that 46 of 78 (59%) PD papers do not include any baseline, including human comparison. This gap will be further explored in a subsequent section.</p>
      </sec>
      <sec>
        <title>Trends in PL and PT</title>
        <p>Among PL and PT papers, computer science and medical informatics are the most prevalent venues. Although PL has drawn attention to the idea of adapting the MLM objective to downstream tasks without needing to further update the LLM weights, many studies still opt to fine-tune their LLMs, with a nonnegligible amount of them evaluating in few-shot settings [<xref ref-type="bibr" rid="ref89">89</xref>,<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref112">112</xref>]. Unlike PD, PL and PT usually include a baseline, with it often being a traditional fine-tuning version of the evaluated model [<xref ref-type="bibr" rid="ref92">92</xref>,<xref ref-type="bibr" rid="ref93">93</xref>,<xref ref-type="bibr" rid="ref95">95</xref>] to compare it against novel prompt-based paradigms. These studies came to a common conclusion, being that PL is a promising alternative to traditional fine-tuning in few-shot scenarios.</p>
        <p>There are 2 ways for conducting PL: one involves filling in the blanks within a text, known as cloze prompts, while the other consists in predicting masked tokens at the end of the sequence, referred to as prefix prompts. A distinct advantage of the latter approach is its compatibility with autoregressive models, as they exclusively predict the appended masks. Among the 29 PL papers, 21 (72%) of them propose cloze prompts, while 15 (52%) use prefix prompting. The involved NLP tasks are well-distributed across these 2 prompt patterns. Another crucial component of PL is the verbalizer. As PL revolves around predicting masked tokens, classification-based tasks require mapping manually selected relevant tokens to each class (manual verbalizer). Alternatively, some studies propose a soft verbalizer, akin to soft prompts, which automatically determines the most relevant token embedding for each label through training. Of the 29 PL papers selected, 16 (55%) studies explicitly mention the use of a manual verbalizer, while 2 explored both verbalizers to assess performance [<xref ref-type="bibr" rid="ref101">101</xref>,<xref ref-type="bibr" rid="ref110">110</xref>]. Only 1 exclusively used a soft verbalizer [<xref ref-type="bibr" rid="ref89">89</xref>]. Another study does not use any verbalizer, as it focuses on generating synthetic data by filling the blanks [<xref ref-type="bibr" rid="ref111">111</xref>]. Notably, 8 (28%) studies did not report any mention regarding the verbalizer methodology.</p>
        <p>Hard prompts, which are related to PD and PL, involve manually crafted prompts. Regarding PT, optimal prompts are attainable through soft prompting (ie, prompts that are trained on a training data set), yet, determining the appropriate soft prompt length remains obscure. In total, 5 of 19 (26%) PT studies tried various soft prompt lengths and reported their corresponding performances [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref105">105</xref>,<xref ref-type="bibr" rid="ref118">118</xref>,<xref ref-type="bibr" rid="ref119">119</xref>,<xref ref-type="bibr" rid="ref122">122</xref>]. While there is no definitive optimal prompt length, a trend emerges: optimal soft prompt length typically exceeds 10 tokens. Surprisingly, 8 (42%) papers omit reporting the soft prompt length. Regarding the placement of soft prompts in relation to the input and the mask, consensus is lacking. A total of 5 (26%) papers prepend the soft prompt at the input’s outset, while 4 (21%) append it as a suffix. One paper uses both strategies in a single prompt template [<xref ref-type="bibr" rid="ref95">95</xref>]. Some innovative methods involve inserting a single soft prompt for each entity that needs to be identified in entity-linking tasks or using token-wise soft prompts, where each token in the textual input is accompanied by a distinct soft prompt. The position of soft prompts remains unreported in 5 (26%) studies. Finally, according to the 6 (32%) studies that used mixed prompts [<xref ref-type="bibr" rid="ref90">90</xref>,<xref ref-type="bibr" rid="ref91">91</xref>,<xref ref-type="bibr" rid="ref95">95</xref>,<xref ref-type="bibr" rid="ref101">101</xref>,<xref ref-type="bibr" rid="ref105">105</xref>,<xref ref-type="bibr" rid="ref110">110</xref>] (a combination of hard and soft prompts), it has consistently been reported that mixed prompts lead to a better performance than hard prompts alone.</p>
      </sec>
      <sec>
        <title>Baseline Comparison</title>
        <p>Only 62 of the screened papers reported comparisons to established baselines. These include traditional deep learning approaches (eg, fine-tuning approach), classical machine learning algorithms (eg, logistic regression), naive systems (eg, majority class), or human annotation. The remaining papers solely explored prompt-related solutions, without including baseline comparisons. <xref ref-type="table" rid="table4">Tables 4</xref>-<xref ref-type="table" rid="table6">6</xref> traces the presence of a nonprompt baseline among different prompt categories (<xref ref-type="table" rid="table4">Table 4</xref>), papers sources (<xref ref-type="table" rid="table5">Table 5</xref>), and NLP tasks addressed (<xref ref-type="table" rid="table6">Table 6</xref>).</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Baseline reports among prompt categories (N=114)<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="190"/>
            <col width="150"/>
            <col width="140"/>
            <col width="150"/>
            <col width="150"/>
            <thead>
              <tr valign="top">
                <td>Prompt category</td>
                <td>No baseline, n (%)</td>
                <td>Higher, n (%)</td>
                <td>Similar, n (%)</td>
                <td>Lower, n (%)</td>
                <td>Total, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Prompt design</td>
                <td>48 (42.1)</td>
                <td>13 (11.4)</td>
                <td>4 (3.5)</td>
                <td>13 (11.4)</td>
                <td>78 (68.4)</td>
              </tr>
              <tr valign="top">
                <td>Prompt learning</td>
                <td>5 (4.4)</td>
                <td>19 (16.7)</td>
                <td>3 (2.6)</td>
                <td>2 (1.8)</td>
                <td>29 (25.4)</td>
              </tr>
              <tr valign="top">
                <td>Prompt tuning</td>
                <td>3 (2.6)</td>
                <td>11 (9.6)</td>
                <td>2 (1.8)</td>
                <td>3 (2.6)</td>
                <td>19 (16.7)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>Higher or lower indicates that the performance of the proposed prompt-based approach is higher or lower than the baseline.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Baseline reports among venues (N=114)<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="190"/>
            <col width="150"/>
            <col width="140"/>
            <col width="150"/>
            <col width="150"/>
            <thead>
              <tr valign="top">
                <td>Type of venue</td>
                <td>No baseline, n (%)</td>
                <td>Higher, n (%)</td>
                <td>Similar, n (%)</td>
                <td>Lower, n (%)</td>
                <td>Total, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Medical informatics</td>
                <td>13 (11.4)</td>
                <td>16 (14)</td>
                <td>2 (1.8)</td>
                <td>2 (1.8)</td>
                <td>33 (28.9)</td>
              </tr>
              <tr valign="top">
                <td>Computer science</td>
                <td>7 (6.1)</td>
                <td>12 (10.5)</td>
                <td>3 (2.6)</td>
                <td>9 (7.9)</td>
                <td>31 (27.2)</td>
              </tr>
              <tr valign="top">
                <td>Preprint</td>
                <td>21 (18.4)</td>
                <td>6 (5.3)</td>
                <td>1 (0.9)</td>
                <td>5 (4.4)</td>
                <td>33 (28.9)</td>
              </tr>
              <tr valign="top">
                <td>Clinical</td>
                <td>13 (11.4)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>13 (11.4)</td>
              </tr>
              <tr valign="top">
                <td>Other</td>
                <td>1 (0.9)</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>4 (3.5)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>Higher or lower indicates that the performance of the proposed prompt-based approach is higher or lower than the baseline.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Baseline reports among addressed NLP<sup>a</sup> tasks (N=114)<sup>b</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="190"/>
            <col width="150"/>
            <col width="140"/>
            <col width="150"/>
            <col width="150"/>
            <thead>
              <tr valign="top">
                <td>NLP task</td>
                <td>No baseline, n (%)</td>
                <td>Higher, n (%)</td>
                <td>Similar, n (%)</td>
                <td>Lower, n (%)</td>
                <td>Total, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Text classification</td>
                <td>13 (11.4)</td>
                <td>18 (15.8)</td>
                <td>4 (3.5)</td>
                <td>11 (9.6)</td>
                <td>46 (40.4)</td>
              </tr>
              <tr valign="top">
                <td>Question answering</td>
                <td>13 (11.4)</td>
                <td>3 (2.6)</td>
                <td>1 (0.9)</td>
                <td>2 (1.8)</td>
                <td>19 (16.7)</td>
              </tr>
              <tr valign="top">
                <td>Relation extraction</td>
                <td>3 (2.6)</td>
                <td>10 (8.8)</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
                <td>16 (14)</td>
              </tr>
              <tr valign="top">
                <td>Information extraction</td>
                <td>10 (8.8)</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>2 (1.8)</td>
                <td>15 (13.2)</td>
              </tr>
              <tr valign="top">
                <td>Multiple-choice question</td>
                <td>10 (8.8)</td>
                <td>3 (2.6)</td>
                <td>1 (0.9)</td>
                <td>1 (0.9)</td>
                <td>15 (13.2)</td>
              </tr>
              <tr valign="top">
                <td>Named entity recognition</td>
                <td>4 (3.5)</td>
                <td>5 (4.4)</td>
                <td>1 (0.9)</td>
                <td>5 (4.4)</td>
                <td>15 (13.2)</td>
              </tr>
              <tr valign="top">
                <td>Text summarization</td>
                <td>7 (6.1)</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>11 (9.6)</td>
              </tr>
              <tr valign="top">
                <td>Reasoning</td>
                <td>5 (4.4)</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>9 (7.9)</td>
              </tr>
              <tr valign="top">
                <td>Generation</td>
                <td>5 (4.4)</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>8 (7)</td>
              </tr>
              <tr valign="top">
                <td>Entity linking</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td>Coreference resolution</td>
                <td>1 (0.9)</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td>Decision support</td>
                <td>2 (1.8)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td>Conversational</td>
                <td>3 (2.6)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>3 (2.6)</td>
              </tr>
              <tr valign="top">
                <td>Text simplification</td>
                <td>1 (0.9)</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>1 (0.9)</td>
                <td>2 (1.8)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table6fn1">
              <p><sup>a</sup>NLP: natural language processing.</p>
            </fn>
            <fn id="table6fn2">
              <p><sup>b</sup>Higher or lower indicates that the performance of the proposed prompt-based approach is higher or lower than the baseline.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>Nonprompt-related baselines are often featured in studies focused on PL and PT but not PD. Additionally, PL and PT have a tendency to perform better than their respective reported baselines, PD tends to report less conclusive results. More specifically, among the 22 papers using either PL or PT with an identical fine-tuned model as a baseline, 17 indicate superior performance with the prompt-based approach, 3 observed comparable performance, and 2 studies noted inferior performance.</p>
        <p>Significantly, papers from computer science venues tend to include more state-of-the-art baselines than those from medical informatics and clinical venues. Specifically, all 13 papers reviewed from clinical venues did not use any nonprompt baselines. Furthermore, there appears to be no consistent link between the type of NLP tasks and the omission of baselines, indicating that the decision to include baselines is more influenced by the evaluation methodology than by feasibility.</p>
      </sec>
      <sec>
        <title>Prompt Optimization</title>
        <p>Numerous studies in the literature highlight the few-shot learning capabilities of LLMs, often referred to as “few-shot prompting,” wherein they demonstrate proficiency in executing tasks with minimal demonstrations provided, typically through text prompts. However, it is crucial to acknowledge that the annotation cost associated with such frameworks might extend beyond the few annotated demonstrations within the prompt. Many studies claiming to explore few-shot or zero-shot learning through prompt engineering rely on extensive annotated validation data sets to refine PD and formulation. This is, for example, the case in the paper that popularized the term “few-shot learning” [<xref ref-type="bibr" rid="ref1">1</xref>]. Among the 45 analyzed papers concentrating on few-shot or zero-shot learning, 5 explicitly detail the optimization of prompt formulation using extensive validation data sets. Conversely, 18 of these papers either do not engage in prompt optimization or test various prompts and document all results. Notably, 22 papers present results using only 1 prompt choice, without clarifying whether this choice was made thanks to additional validation data sets.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Summary of the Findings</title>
        <p>This scoping review aimed to map the current landscape of medical prompt engineering, identifying key themes, gaps, and trends within the existing literature. The primary findings of this study reveal a greater prevalence of PD over PL and PT, with ChatGPT dominating the PD domain. Additionally, many studies omit nonprompt-based baselines, do not specify the language of study, or exhibit a lack of consensus in PL (prefix vs cloze prompt) and PT settings (soft prompt lengths and positions). English is notably dominant as the language of study. These findings suggest that while the field is emerging, there is a pressing need for improved research practices.</p>
      </sec>
      <sec>
        <title>Costs, Infrastructure, and LLMs in Clinical Settings</title>
        <p>Prompt engineering techniques enable competitive performance in scenarios with limited or no resources as well as in environments with low-cost computing infrastructure. As hospital data and infrastructure are often found in this scenario, these approaches hold great promise in the clinical field. <xref rid="figure6" ref-type="fig">Figure 6</xref> shows the absence of PL- and PT-related works in clinical journals. This trend may stem from the widespread accessibility of ChatGPT, favoring PD-focused investigations. Despite efforts like OpenPrompt [<xref ref-type="bibr" rid="ref125">125</xref>] to facilitate PL and PT works, the programming barrier likely deters clinical practitioners. Surprisingly, 7 papers use ChatGPT with sensitive clinical data. Despite the recent availability of ChatGPT Enterprise in GPT-4 for secure data handling, it is apparent that most of these studies have not used this feature since they used GPT-3.5. Limited use of local LLMs, especially LLaMA-based, suggests a need for their increased adoption in future clinical PD studies. The lack of local LLMs may be due to clinicians’ limited computational infrastructure.</p>
      </sec>
      <sec>
        <title>Prompt Engineering Techniques Effectiveness in Medical Research</title>
        <p>In documented prompt engineering techniques, the effectiveness of few-shot prompting compared to zero shot varies by task and scenario. However, CoT shows superior reasoning performance, compelling LLMs to present reasoning pathways and consistently outperforming zero-shot and few-shot methods across PD studies. Its ensemble-based variant, self-consistency, consistently outperforms CoT. Despite the persona pattern’s frequent use, there is a lack of ablation studies on its impact on medical task performance, with only 1 paper reporting negligible improvement [<xref ref-type="bibr" rid="ref61">61</xref>]. Prompt engineering is an emerging field of study that still needs to prove its efficacy. However, almost half of the papers focused only on prompt engineering and failed to report any nonprompt-related baseline performance, despite the availability of such baselines for the addressed NLP tasks. On the whole, the results are far from being systematically in favor of LLM-based methods, greatly attenuating the impression of a technological breakthrough that is generally commented on. Selecting a baseline remains a necessary step toward understanding the actual impact of prompt engineering.</p>
      </sec>
      <sec>
        <title>Bender Rule</title>
        <p>Regarding the languages, while <xref ref-type="table" rid="table2">Table 2</xref> shows the dominance of English in medical literature, many papers studying English fail to explicitly mention the language of study. This oversight is more prevalent in computer science and clinical venues, whereas medical informatics exhibits a more favorable trend, as validated by a chi-square test yielding a <italic>P</italic> value of .02 (Table S1 in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). Notably, languages such as Chinese are consistently mentioned across the 18 selected papers. However, the Bender rule, namely “always name the language(s) you are working on,” seems to be well respected for languages other than English. This finding has already been documented for NLP research in general [<xref ref-type="bibr" rid="ref126">126</xref>].</p>
      </sec>
      <sec>
        <title>Fine-Tuning Versus Prompt-Based Approaches</title>
        <p>While traditional LLM fine-tuning remains a viable method for various NLP tasks, PL and PT are competitive alternatives to fine-tuning, particularly in resource-constrained and low computational scenarios. PL, leveraging predefined prompts to guide model behavior, offers an efficient approach in low-to-no resource environments. Conversely, PT emerges as a viable solution in low computational scenarios, as it requires substantially fewer trainable parameters compared to traditional fine-tuning approaches. Since both prompt-based approaches do not require the LLM to be further trained, they are less prone to catastrophic forgetting [<xref ref-type="bibr" rid="ref127">127</xref>].</p>
      </sec>
      <sec>
        <title>Recommendations for Future Medical Prompt–Based Studies</title>
        <p>For future research in prompt engineering, we propose several recommendations aimed at improving research quality, reporting, and reproducibility. From this review, we identified several trends such as the computational advantages or the lack of evaluations on baselines with a lack of ablation studies to evaluate the performance of the prompting strategies. Some studies do not clearly mention the prompt engineering choices they made. For instance, in PL, choices range from using cloze to prefix prompting and from using manual to soft verbalizer. Similarly, PT is characterized by configurations of soft prompts, such as the length and the positions. To clarify these distinctions and enhance methodological transparency and reproducibility in future research, we have developed reporting guidelines available in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>. Adhering to these reporting guidelines will contribute to advancing prompt engineering methodologies and their practical applications in the medical field.</p>
        <boxed-text id="box1" position="float">
          <title>Detailed reporting guidelines for future prompt engineering studies.</title>
          <p>
            <bold>General reporting recommendations</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>For sensitive data, local large language models (LLMs) should be preferred to the ones that use an application programming interface or a web service.</p>
            </list-item>
            <list-item>
              <p>The language of the study used should be explicitly stated.</p>
            </list-item>
            <list-item>
              <p>The mention of whether the LLM undergoes fine-tuning should be made explicit.</p>
            </list-item>
            <list-item>
              <p>The prompt optimization process and results should be documented to ensure transparency, whether it is through different tested manual prompts or through a validation data set.</p>
            </list-item>
            <list-item>
              <p>The terms “few-shot,” “one-shot,” and “zero-shot” should not be used in settings where the prompts have been optimized on annotated examples.</p>
            </list-item>
            <list-item>
              <p>Experiments should include baseline comparisons or at least mention existing results, particularly when data sets originate from previous medical challenges or benchmarks.</p>
            </list-item>
          </list>
          <p>
            <bold>Specific to prompt learning and prompt tuning</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Concepts (such as prompt learning and prompt tuning) should be defined and used consistently with the consensus.</p>
            </list-item>
            <list-item>
              <p>In prompt learning experiments, the verbalizer used (soft and hard) should be explicitly specified, or a clear justification should be provided if the verbalizer is omitted. Additionally, whether the prompt template follows the cloze or the prefix format should be mentioned.</p>
            </list-item>
            <list-item>
              <p>In prompt tuning experiments, authors should provide details on soft prompt positions, length, and any variations tested, such as incorporating hard or mixed prompts, as part of the ablation study.</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>A limitation was the large number of papers retrieved during the initial search, which was addressed by limiting the search scope to titles, abstracts, and keywords. Furthermore, since some studies may perform prompt engineering techniques without mentioning any of the 4 prompt-related expressions used in the queries, they might be missed by our searches.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Medical prompt engineering is an emerging field with significant potential for enhancing clinical applications, particularly in resource-constrained environments. Despite the promising capabilities demonstrated, there is a pressing need for standardized research practices and comprehensive reporting to ensure methodological transparency and reproducibility. Consistent evaluation against nonprompt-based baselines, prompt optimization documentation, and prompt settings reporting will be crucial for advancing the field. We hope that a better adherence to the recommended guidelines, in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>, will improve our understanding of prompt engineering and enhance the capabilities of LLMs in health care.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews) checklist.</p>
        <media xlink:href="jmir_v26i1e60501_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 515 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Search strategy and statistical analysis.</p>
        <media xlink:href="jmir_v26i1e60501_app2.docx" xlink:title="DOCX File , 20 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Reading notes and details of the reviewed papers.</p>
        <media xlink:href="jmir_v26i1e60501_app3.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 52 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">BERT</term>
          <def>
            <p>Bidirectional Encoder Representations From Transformers</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CoT</term>
          <def>
            <p>chain-of-thought</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">MCQ</term>
          <def>
            <p>multiple-choice question</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">MLM</term>
          <def>
            <p>masked language modeling</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">PD</term>
          <def>
            <p>prompt design</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PL</term>
          <def>
            <p>prompt learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">PRISMA-ScR</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">PT</term>
          <def>
            <p>prompt tuning</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>JZ is financed by the NCCR Evolving Language, a National Centre of Competence in Research, funded by the Swiss National Science Foundation (grant <bold>#</bold>51NF40_180888).</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>JZ and MN performed the screening and data extraction of the papers and synthesized the findings. AN and XT supervised MN. MB and CL supervised JZ. JZ and MN wrote the manuscript with support from MB, AN, XT, and CL. All authors contributed to the analysis of the results. CL conceived the original idea.</p>
      </fn>
      <fn fn-type="conflict">
        <p>CL is the editor-in-chief of <italic>JMIR Medical Informatics</italic>. All other authors have no conflict of interest to declare.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ryder</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Subbiah</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kaplan</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Dhariwal</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Neelakantan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shyam</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sastry</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Askell</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Language models are few-shot learners</article-title>
          <year>2020</year>
          <conf-name>Advances in Neural Information Processing Systems</conf-name>
          <conf-date>December 6, 2020</conf-date>
          <conf-loc>Virtual</conf-loc>
          <fpage>1877</fpage>
          <lpage>1901</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.nips.cc/paper/2020/hash/1457c0d6bfcb4967418bfb8ac142f64a-Abstract.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kojima</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Reid</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Matsuo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasawa</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Large language models are zero-shot reasoners</article-title>
          <year>2022</year>
          <conf-name>Advances in Neural Information Processing Systems</conf-name>
          <conf-date>November 28, 2022</conf-date>
          <conf-loc>New Orleans</conf-loc>
          <fpage>22199</fpage>
          <lpage>22213</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.nips.cc/paper_files/paper/2022/hash/8bb0d291acd4acf06ef112099c16f326-Abstract-Conference.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Hayashi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Neubig</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing</article-title>
          <source>ACM Comput Surv</source>
          <year>2023</year>
          <month>01</month>
          <day>16</day>
          <volume>55</volume>
          <issue>9</issue>
          <fpage>1</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1145/3560815</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>White</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Hays</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sandborn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Olea</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gilbert</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Elnashar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Spencer-Smith</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>DC</given-names>
            </name>
          </person-group>
          <article-title>A prompt pattern catalog to enhance prompt engineering with ChatGPT</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on February 21, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2302.11382v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2302.11382</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Névéol</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dalianis</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Velupillai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Savova</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zweigenbaum</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Clinical natural language processing in languages other than English: opportunities and challenges</article-title>
          <source>J Biomed Semantics</source>
          <year>2018</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>12</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jbiomedsem.biomedcentral.com/articles/10.1186/s13326-018-0179-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13326-018-0179-8</pub-id>
          <pub-id pub-id-type="medline">29602312</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13326-018-0179-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC5877394</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luccioni</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Mind your language (model): fact-checking LLMs and their role in NLP research and practice</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on June 1, 2024</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2308.07120</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thirunavukarasu</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSJ</given-names>
            </name>
            <name name-style="western">
              <surname>Elangovan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
          </person-group>
          <article-title>Large language models in medicine</article-title>
          <source>Nat Med</source>
          <year>2023</year>
          <volume>29</volume>
          <issue>8</issue>
          <fpage>1930</fpage>
          <lpage>1940</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-023-02448-8</pub-id>
          <pub-id pub-id-type="medline">37460753</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-023-02448-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>WX</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Min</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Nie</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>A survey of large language models</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on November 24, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2303.18223"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2303.18223</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lester</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Rfou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Constant</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>The power of scale for parameter-efficient prompt tuning</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</conf-name>
          <conf-date>January 10, 2021</conf-date>
          <conf-loc>Online and Punta Cana, Dominican Republic</conf-loc>
          <fpage>3045</fpage>
          <lpage>3059</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2021.emnlp-main.243</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fries</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Weber</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Seelam</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Altay</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Datta</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Garda</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kusa</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Cahyawijaya</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>BigBIO: a framework for data-centric biomedical natural language processing</article-title>
          <year>2022</year>
          <conf-name>Advances in Neural Information Processing Systems</conf-name>
          <conf-date>November 28, 2022</conf-date>
          <conf-loc>New Orleans</conf-loc>
          <fpage>25792</fpage>
          <lpage>25806</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://proceedings.neurips.cc/paper_files/paper/2022/hash/a583d2197eafc4afdd41f5b8765555c5-Abstract-Datasets_and_Benchmarks.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weisenthal</surname>
              <given-names>SJ</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and post-test probability</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on July 20, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2311.12188"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2311.12188</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ning</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>ProBioRE: a framework for biomedical causal relation extraction based on dual-head prompt and prototypical network</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>
          <conf-date>December 5, 2023</conf-date>
          <conf-loc>Istanbul, Turkiye</conf-loc>
          <fpage>2071</fpage>
          <lpage>2074</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/3n45uwdb"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Belkadi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Micheletti</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Shardlow</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nenadic</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Large language models and control mechanisms improve text readability of biomedical abstracts</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on March 16, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2309.13202"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2309.13202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>From beginner to expert: modeling medical knowledge into general LLMs</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on January 7, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2312.01040"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2312.01040</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ateia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kruschwitz</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <article-title>Is ChatGPT a biomedical expert?</article-title>
          <year>2023</year>
          <conf-name>Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2023)</conf-name>
          <conf-date>September 18-21, 2023</conf-date>
          <conf-loc>Thessaloniki, Greece</conf-loc>
          <fpage>73</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ceur-ws.org/Vol-3497/paper-006.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Belyaeva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cosentino</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hormozdiari</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Eswaran</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shetty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Carroll</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McLean</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Furlotte</surname>
              <given-names>NA</given-names>
            </name>
          </person-group>
          <article-title>Multimodal LLMs for health grounded in individual-specific data</article-title>
          <year>2023</year>
          <conf-name>Machine Learning for Multimodal Healthcare Data</conf-name>
          <conf-date>July 29, 2023</conf-date>
          <conf-loc>Honolulu, Hawaii, United States</conf-loc>
          <fpage>86</fpage>
          <lpage>102</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-031-47679-2_7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ran</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Niu</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>An extensive benchmark study on biomedical text generation and mining with ChatGPT</article-title>
          <source>Bioinformatics</source>
          <year>2023</year>
          <volume>39</volume>
          <issue>9</issue>
          <fpage>btad557</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37682111"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bioinformatics/btad557</pub-id>
          <pub-id pub-id-type="medline">37682111</pub-id>
          <pub-id pub-id-type="pii">7264174</pub-id>
          <pub-id pub-id-type="pmcid">PMC10562950</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mollá</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Large language models and prompt engineering for biomedical query focused multi-document summarisation</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on November 9, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2311.05169"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2311.05169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nori</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>YT</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Carignan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Edgar</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fusi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>McKinney</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Ness</surname>
              <given-names>RO</given-names>
            </name>
            <name name-style="western">
              <surname>Usuyama</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>White</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Can generalist foundation models outcompete special-purpose tuning? Case study in medicine</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on November 28, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2311.16452"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2311.16452</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singhal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Azizi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mahdavi</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>HW</given-names>
            </name>
            <name name-style="western">
              <surname>Scales</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tanwani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cole-Lewis</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Pfohl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Payne</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Seneviratne</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gamble</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Babiker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schärli</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Chowdhery</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mansfield</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Demner-Fushman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Agüera Y Arcas</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Matias</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chou</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gottweis</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tomasev</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Rajkomar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Barral</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Semturs</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Karthikesalingam</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Natarajan</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Large language models encode clinical knowledge</article-title>
          <source>Nature</source>
          <year>2023</year>
          <volume>620</volume>
          <issue>7972</issue>
          <fpage>172</fpage>
          <lpage>180</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37438534"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41586-023-06291-2</pub-id>
          <pub-id pub-id-type="medline">37438534</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41586-023-06291-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC10396962</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Yeganova</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>PT</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Comeau</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Islamaj</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kapoor</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Opportunities and challenges for ChatGPT and large language models in biomedicine and health</article-title>
          <source>Brief Bioinform</source>
          <year>2023</year>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>bbad493</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38168838"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bib/bbad493</pub-id>
          <pub-id pub-id-type="medline">38168838</pub-id>
          <pub-id pub-id-type="pii">7505071</pub-id>
          <pub-id pub-id-type="pmcid">PMC10762511</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmälzle</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence for health message generation: an empirical study using a large language model (LLM) and prompt engineering</article-title>
          <source>Front Commun</source>
          <year>2023</year>
          <volume>8</volume>
          <fpage>1129082</fpage>
          <pub-id pub-id-type="doi">10.3389/fcomm.2023.1129082</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>YH</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kao</surname>
              <given-names>HY</given-names>
            </name>
          </person-group>
          <article-title>IKM_Lab at BioLaySumm Task 1: longformer-based prompt tuning for biomedical lay summary generation</article-title>
          <year>2023</year>
          <conf-name>The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks</conf-name>
          <conf-date>July 13, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.18653/v1/2023.bionlp-1.64</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ruan</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A co-adaptive duality-aware framework for biomedical relation extraction</article-title>
          <source>Bioinformatics</source>
          <year>2023</year>
          <volume>39</volume>
          <issue>5</issue>
          <fpage>btad301</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37220895"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bioinformatics/btad301</pub-id>
          <pub-id pub-id-type="medline">37220895</pub-id>
          <pub-id pub-id-type="pii">7176367</pub-id>
          <pub-id pub-id-type="pmcid">PMC10209527</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Few-shot biomedical named entity recognition via knowledge-guided instance generation and prompt contrastive learning</article-title>
          <source>Bioinformatics</source>
          <year>2023</year>
          <volume>39</volume>
          <issue>8</issue>
          <fpage>btad496</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37549065"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/bioinformatics/btad496</pub-id>
          <pub-id pub-id-type="medline">37549065</pub-id>
          <pub-id pub-id-type="pii">7238215</pub-id>
          <pub-id pub-id-type="pmcid">PMC10444965</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Poon</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>BioGPT: generative pre-trained transformer for biomedical text generation and mining</article-title>
          <source>Brief Bioinform</source>
          <year>2022</year>
          <volume>23</volume>
          <issue>6</issue>
          <fpage>bbac409</fpage>
          <pub-id pub-id-type="doi">10.1093/bib/bbac409</pub-id>
          <pub-id pub-id-type="medline">36156661</pub-id>
          <pub-id pub-id-type="pii">6713511</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nori</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>McKinney</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Carignan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Capabilities of GPT-4 on medical challenge problems</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on April 12, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2303.13375"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2303.13375</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heinz</surname>
              <given-names>MV</given-names>
            </name>
            <name name-style="western">
              <surname>Bhattacharya</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Trudeau</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Quist</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobson</surname>
              <given-names>NC</given-names>
            </name>
          </person-group>
          <article-title>Testing domain knowledge and risk of bias of a large-scale general artificial intelligence model in mental health</article-title>
          <source>Digit Health</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>20552076231170499</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076231170499?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076231170499</pub-id>
          <pub-id pub-id-type="medline">37101589</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076231170499</pub-id>
          <pub-id pub-id-type="pmcid">PMC10123874</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hsieh</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kao</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT incorporated chain-of-thought method in bilingual nuclear medicine physician board examinations</article-title>
          <source>Digit Health</source>
          <year>2024</year>
          <volume>10</volume>
          <fpage>20552076231224074</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076231224074?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076231224074</pub-id>
          <pub-id pub-id-type="medline">38188855</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076231224074</pub-id>
          <pub-id pub-id-type="pmcid">PMC10771043</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casola</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Labruna</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lavelli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Magnini</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Testing ChatGPT for stability and reasoning: a case study using Italian medical specialty tests</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 9th Italian Conference on Computational Linguistics</conf-name>
          <conf-date>November 30-Decemeber 2, 2023</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ceur-ws.org/Vol-3596/paper13.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roemer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmood</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Dauer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bellamy</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence model GPT4 narrowly fails simulated radiological protection exam</article-title>
          <source>J Radiol Prot</source>
          <year>2024</year>
          <volume>44</volume>
          <issue>1</issue>
          <fpage>013502</fpage>
          <pub-id pub-id-type="doi">10.1088/1361-6498/ad1fdf</pub-id>
          <pub-id pub-id-type="medline">38232401</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shahab</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Al Shabeeb</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ladak</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>JO</given-names>
            </name>
            <name name-style="western">
              <surname>Nadkarni</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Echavarria</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Babar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shaukat</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Soroush</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>General purpose large language models match human performance on gastroenterology board exam self-assessments</article-title>
          <source>MedRxi</source>
          <comment>Preprint posted online on September 25, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.09.21.23295918v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.09.21.23295918</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Raut</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zimlichman</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cheetirala</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nadkarni</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Glicksberg</surname>
              <given-names>BS</given-names>
            </name>
            <name name-style="western">
              <surname>Freeman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Timsina</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Klang</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The limits of prompt engineering in medical problem-solving: a comparative analysis with ChatGPT on calculation based USMLE medical questions</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on August 9, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.08.06.23293710v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.08.06.23293710</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sallam</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Salahat</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Eid</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Puladi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Human versus artificial intelligence: ChatGPT-4 outperforming Bing, Bard, ChatGPT-3.5, and humans in clinical chemistry multiple-choice questions</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on January 9, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2024.01.08.24300995v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2024.01.08.24300995</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Savage</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nayak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gallo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rangan</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Diagnostic reasoning prompts reveal the potential for large language model interpretability in medicine</article-title>
          <source>NPJ Digit Med</source>
          <year>2024</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>20</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-024-01010-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-024-01010-1</pub-id>
          <pub-id pub-id-type="medline">38267608</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-024-01010-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC10808088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kung</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Cheatham</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Medenilla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sillos</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>De Leon</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Elepaño</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Madriaga</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aggabao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Diaz-Candido</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Maningo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>e0000198</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812645"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000198</pub-id>
          <pub-id pub-id-type="medline">36812645</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-22-00371</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Nakata</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Aiga</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Etani</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Muramatsu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Katagiri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kawai</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Higashino</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Enomoto</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Noda</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kometani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Takamura</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yoneda</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kakizaki</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nomura</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Performance of generative pretrained transformer on the national medical licensing examination in Japan</article-title>
          <source>PLOS Digit Health</source>
          <year>2024</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>e0000433</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38261580"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000433</pub-id>
          <pub-id pub-id-type="medline">38261580</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-23-00146</pub-id>
          <pub-id pub-id-type="pmcid">PMC10805303</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rosoł</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gąsior</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Łaba</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Korzeniewski</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Młyńczak</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of the performance of GPT-3.5 and GPT-4 on the Polish medical final examination</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>20512</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-46995-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-46995-z</pub-id>
          <pub-id pub-id-type="medline">37993519</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-46995-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC10665355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sivarajkumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kelley</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Samolyk-Mazzanti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Visweswaran</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>An empirical evaluation of prompting strategies for large language models in zero-shot clinical natural language processing</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on September 14, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2309.08008"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/55318</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Agrawal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hegselmann</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sontag</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Large language models are few-shot clinical information extractors</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing</conf-name>
          <conf-date>May 25, 2022</conf-date>
          <conf-loc>Abu Dhabi</conf-loc>
          <fpage>1998</fpage>
          <lpage>2022</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2022.emnlp-main.130</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Toward a stable and low-resource PLM-based medical diagnostic system via prompt tuning and MoE structure</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>12595</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-39543-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-39543-2</pub-id>
          <pub-id pub-id-type="medline">37537202</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-39543-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC10400680</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>KLT</given-names>
            </name>
            <name name-style="western">
              <surname>Viacrusis</surname>
              <given-names>PML</given-names>
            </name>
          </person-group>
          <article-title>Bridging the gap or widening the divide: a call for capacity-building in artificial intelligence for healthcare in the Philippines</article-title>
          <source>JMUST</source>
          <year>2023</year>
          <volume>7</volume>
          <issue>2</issue>
          <fpage>1325</fpage>
          <lpage>1334</lpage>
          <pub-id pub-id-type="doi">10.35460/2546-1621.2023-0081</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Nipu</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Madiraju</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Deshpande</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Autocompletion of chief complaints in the electronic health records using large language models</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE International Conference on Big Data (BigData)</conf-name>
          <conf-date>December 15-18, 2023</conf-date>
          <conf-loc>Sorrento, Italy</conf-loc>
          <fpage>4912</fpage>
          <lpage>4921</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/4ajdyddt"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/bigdata59044.2023.10386778</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meoni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ryffel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>De La Clergerie</surname>
              <given-names>É</given-names>
            </name>
          </person-group>
          <article-title>Annotate French clinical data using large language model predictions</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE 11th International Conference on Healthcare Informatics (ICHI)</conf-name>
          <conf-date>June 26-29, 2023</conf-date>
          <conf-loc>Houston, TX, United States</conf-loc>
          <fpage>550</fpage>
          <lpage>557</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/yy2b9fe8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/ichi57859.2023.00099</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meoni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>De la Clergerie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ryffel</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Large language models as instructors: a study on multilingual clinical entity extraction</article-title>
          <year>2023</year>
          <conf-name>The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks</conf-name>
          <conf-date>July 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>178</fpage>
          <lpage>190</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aclanthology.org/2023.bionlp-1.15/"/>
          </comment>
          <pub-id pub-id-type="doi">10.18653/v1/2023.bionlp-1.15</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>LingX at ROCLING 2023 MultiNER-health task: intelligent capture of Chinese medical named entities by LLMs</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)</conf-name>
          <conf-date>October 20-21, 2023</conf-date>
          <conf-loc>Taipei City, Taiwan</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aclanthology.org/2023.rocling-1.44.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Modeling clinical thinking based on knowledge hypergraph attention network and prompt learning for disease prediction</article-title>
          <source>SSRN</source>
          <comment>Preprint posted online on June 30, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4496800"/>
          </comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.4496800</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Jaafar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Do physicians know how to prompt? The need for automatic prompt optimization help in clinical note generation</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on July 5, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2311.09684"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2311.09684</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Zandvoort</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wiersema</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Huibers</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>van Dulmen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brinkkemper</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Enhancing summarization performance through transformer-based prompt engineering in automated medical reporting</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on January 19, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2311.13274"/>
          </comment>
          <pub-id pub-id-type="doi">10.5220/0012422600003657</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Teodoro</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>DS4DH at MEDIQA-Chat 2023: leveraging SVM and GPT-3 prompt engineering for medical dialogue classification and summarization</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 5th Clinical Natural Language Processing Workshop</conf-name>
          <conf-date>June 12, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>536</fpage>
          <lpage>545</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.clinicalnlp-1.57</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Overview of the PromptCBLUE Shared Task in CHIP2023</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on December 29, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2312.17522"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2312.17522</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Caruccio</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cirillo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Polese</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Solimando</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Sundaramurthy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tortora</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Can ChatGPT provide intelligent diagnoses? A comparative study between predictive models and ChatGPT to define a new medical diagnostic bot</article-title>
          <source>Expert Syst Appl</source>
          <year>2024</year>
          <volume>235</volume>
          <fpage>121186</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2023.121186</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Unlocking the secrets behind advanced artificial intelligence language models in deidentifying Chinese-English mixed clinical text: development and validation study</article-title>
          <source>J Med Internet Res</source>
          <year>2024</year>
          <volume>26</volume>
          <fpage>e48443</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2024//e48443/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48443</pub-id>
          <pub-id pub-id-type="medline">38271060</pub-id>
          <pub-id pub-id-type="pii">v26i1e48443</pub-id>
          <pub-id pub-id-type="pmcid">PMC10853853</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bhaumik</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Srivastava</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Jalali</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chandrasekaran</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Mindwatch: a smart cloud-based AI solution for suicide ideation detection leveraging large language models</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on September 26, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.09.25.23296062v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.09.25.23296062</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heston</surname>
              <given-names>TF</given-names>
            </name>
          </person-group>
          <article-title>Safety of large language models in addressing depression</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <fpage>15</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cureus.com/articles/213293-safety-of-large-language-models-in-addressing-depression.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.50729</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grabb</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The impact of prompt engineering in large language model performance: a psychiatric example</article-title>
          <source>J Med Artif Intell</source>
          <year>2023</year>
          <volume>6</volume>
          <fpage>20</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jmai.amegroups.org/article/view/8190/html"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/jmai-23-71</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Santos</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Paraboni</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Prompt-based mental health screening from social media text</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on May 11, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2401.05912"/>
          </comment>
          <pub-id pub-id-type="doi">10.5753/brasnam.2024.1879</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Kuang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Ananiadou</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Towards interpretable mental health analysis with large language models</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</conf-name>
          <conf-date>January 16, 2023</conf-date>
          <conf-loc>Singapore</conf-loc>
          <fpage>6056</fpage>
          <lpage>6077</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.emnlp-main.370</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gabriel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hendler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dey</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Mental-LLM: leveraging large language models for mental health prediction via online text data</article-title>
          <source>Proc ACM Interact Mob Wearable Ubiquitous Technol</source>
          <year>2024</year>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1145/3643540</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>KQ</given-names>
            </name>
            <name name-style="western">
              <surname>Lan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>LLM-empowered chatbots for psychiatrist and patient simulation: application and evaluation</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on May 23, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2305.13614"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2305.13614</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Qi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhai</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Dan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zou</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Supervised learning and large language model benchmarks on mental health datasets: cognitive distortions and suicidal risks in Chinese social media</article-title>
          <source>ResearchSquare</source>
          <comment>Preprint posted online on November 02, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchsquare.com/article/rs-3523508/latest"/>
          </comment>
          <pub-id pub-id-type="doi">10.21203/rs.3.rs-3523508/v1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sambath</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Advancements of artificial intelligence in mental health applications?: A comparative analysis of ChatGPT 3.5 and ChatGPT 4</article-title>
          <source>ResearchGate</source>
          <comment>Preprint posted online on December, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/376517245_Advancements_of_Artificial_Intelligence_in_Mental_Health_Applications_A_Comparative_analysis_of_ChatGPT_35_and_ChatGPT_4"/>
          </comment>
          <pub-id pub-id-type="doi">10.13140/RG.2.2.28713.36961</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Jang</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Developing prompts from large language model for extracting clinical information from pathology and ultrasound reports in breast cancer</article-title>
          <source>Radiat Oncol J</source>
          <year>2023</year>
          <volume>41</volume>
          <issue>3</issue>
          <fpage>209</fpage>
          <lpage>216</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37793630"/>
          </comment>
          <pub-id pub-id-type="doi">10.3857/roj.2023.00633</pub-id>
          <pub-id pub-id-type="medline">37793630</pub-id>
          <pub-id pub-id-type="pii">roj.2023.00633</pub-id>
          <pub-id pub-id-type="pmcid">PMC10556835</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Vaid</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Menon</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Freeman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Matteson</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Marin</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Nadkarni</surname>
              <given-names>GN</given-names>
            </name>
          </person-group>
          <article-title>Development of a privacy preserving large language model for automated data extraction from thyroid cancer pathology reports</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on November 8, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.11.08.23298252v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.11.08.23298252</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dennstädt</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hastings</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Putora</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Vu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>GF</given-names>
            </name>
            <name name-style="western">
              <surname>Süveg</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Glatzer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Riggenbach</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Hà</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Cihoric</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Exploring capabilities of large language models such as ChatGPT in radiation oncology</article-title>
          <source>Adv Radiat Oncol</source>
          <year>2024</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>101400</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2452-1094(23)00228-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.adro.2023.101400</pub-id>
          <pub-id pub-id-type="medline">38304112</pub-id>
          <pub-id pub-id-type="pii">S2452-1094(23)00228-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC10831180</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gilbert</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ghanem</surname>
              <given-names>AI</given-names>
            </name>
            <name name-style="western">
              <surname>Siddiqui</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Thind</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Feasibility of using zero-shot learning in transformer-based natural language processing algorithm for key information extraction from head and neck tumor board notes</article-title>
          <source>Int J Radiat Oncol Biol Phys</source>
          <year>2023</year>
          <volume>117</volume>
          <issue>2</issue>
          <fpage>e500</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijrobp.2023.06.1743</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>HW-TSC at SemEval-2023 task 7: exploring the natural language inference capabilities of ChatGPT and pre-trained language model for clinical trial</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 17th International Workshop on Semantic Evaluation (SemEval-2023)</conf-name>
          <conf-date>July 10, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>1603</fpage>
          <lpage>1608</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.semeval-1.221</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nazary</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Deldjoo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Di Noia</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT-HealthPrompt. Harnessing the power of XAI in prompt-based healthcare decision support using ChatGPT</article-title>
          <year>2023</year>
          <conf-name>Artificial Intelligence. ECAI 2023 International Workshops</conf-name>
          <conf-date>September 30-October 4, 2023</conf-date>
          <conf-loc>Kraków, Poland</conf-loc>
          <fpage>382</fpage>
          <lpage>397</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-031-50396-2_22</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Enhancing real-world data extraction in clinical research: evaluating the impact of the implementation of large language models in hospital setting</article-title>
          <source>ResearchSquare</source>
          <comment>Preprint posted online on November 29, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.21203/rs.3.rs-3644810/v2"/>
          </comment>
          <pub-id pub-id-type="doi">10.21203/rs.3.rs-3644810/v2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sarraju</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kalwani</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Dexter</surname>
              <given-names>JP</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of prompts to simplify cardiovascular disease information using a large language model</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on November 9, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.11.08.23298225v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.11.08.23298225</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Azizi</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Goyal</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pedron</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ganesan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ruiperez-Campillo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Deb</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Clopton</surname>
              <given-names>PL</given-names>
            </name>
            <name name-style="western">
              <surname>Baykaner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Narayan</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Optimizing ChatGPT to detect VT recurrence from complex medical notes</article-title>
          <source>Circulation</source>
          <year>2023</year>
          <volume>148</volume>
          <issue>Suppl 1</issue>
          <fpage>A16401</fpage>
          <pub-id pub-id-type="doi">10.1161/circ.148.suppl_1.16401</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chowdhury</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Higham</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McKinnon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ventoura</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>De Pennington</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Can large language models safely address patient questions following cataract surgery?</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 5th Clinical Natural Language Processing Workshop</conf-name>
          <conf-date>June 10, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>131</fpage>
          <lpage>137</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.clinicalnlp-1.17</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kleinig</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kovoor</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Bacchi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>WO</given-names>
            </name>
          </person-group>
          <article-title>How to use large language models in ophthalmology: from prompt engineering to protecting confidentiality</article-title>
          <source>Eye (Lond)</source>
          <year>2024</year>
          <volume>38</volume>
          <issue>4</issue>
          <fpage>649</fpage>
          <lpage>653</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37798360"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41433-023-02772-w</pub-id>
          <pub-id pub-id-type="medline">37798360</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41433-023-02772-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC10920651</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arsenyan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bughdaryan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Shaya</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Small</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shahnazaryan</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Large language models for biomedical knowledge graph construction: information extraction from EMR notes</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on December 9, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2301.12473"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2301.12473</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kwon</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ong</surname>
              <given-names>KT</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Moon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sohn</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sim</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yeo</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Large language models are clinical reasoners: reasoning-aware diagnosis framework with prompt-generated rationales</article-title>
          <year>2024</year>
          <conf-name>Proceedings of the Association for the Advancement of Artificial Intelligence Conference on Artificial Intelligence</conf-name>
          <conf-date>February 20, 2024</conf-date>
          <conf-loc>Vancouver</conf-loc>
          <fpage>18417</fpage>
          <lpage>18425</lpage>
          <pub-id pub-id-type="doi">10.1609/aaai.v38i16.29802</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Text dialogue analysis for primary screening of mild cognitive impairment: development and validation study</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e51501</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e51501/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/51501</pub-id>
          <pub-id pub-id-type="medline">38157230</pub-id>
          <pub-id pub-id-type="pii">v25i1e51501</pub-id>
          <pub-id pub-id-type="pmcid">PMC10787336</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>You</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Are you asking GPT-4 medical questions properly?—Prompt engineering in consistency and reliability with evidence-based guidelines for ChatGPT-4: a pilot study</article-title>
          <source>ResearchSquare. Posted online on October 3, 2023</source>
          <year>2023</year>
          <fpage>1</fpage>
          <lpage>20</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://web.archive.org/web/20240307033415id_/https://assets.researchsquare.com/files/rs-3336823/v1/7c3c8e95-14ee-4a13-b7c0-0cc27487e651.pdf?c=1708503116"/>
          </comment>
          <pub-id pub-id-type="doi">10.21203/rs.3.rs-3336823/v1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zaidat</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lahoti</surname>
              <given-names>YS</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mohamed</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>JS</given-names>
            </name>
          </person-group>
          <article-title>Artificially intelligent billing in spine surgery: an analysis of a large language model</article-title>
          <source>Global Spine J</source>
          <year>2023</year>
          <fpage>21925682231224753</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/abs/10.1177/21925682231224753?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/21925682231224753</pub-id>
          <pub-id pub-id-type="medline">38147047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Datta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Paek</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Manion</surname>
              <given-names>FJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ofoegbu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>AutoCriteria: a generalizable clinical trial eligibility criteria extraction system powered by large language models</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2024</year>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>375</fpage>
          <lpage>385</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37952206"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocad218</pub-id>
          <pub-id pub-id-type="medline">37952206</pub-id>
          <pub-id pub-id-type="pii">7413158</pub-id>
          <pub-id pub-id-type="pmcid">PMC10797270</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>White</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sripitak</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenberg Johansen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Snyder</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>CliniDigest: a case study in large language model based large-scale summarization of clinical trial descriptions</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 2023 ACM Conference on Information Technology for Social Good</conf-name>
          <conf-date>September 6-8, 2023</conf-date>
          <conf-loc>Lisbon, Portugal</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3582515.3609559</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherr</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Halaseh</surname>
              <given-names>FF</given-names>
            </name>
            <name name-style="western">
              <surname>Spina</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Andalib</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rivera</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT interactive medical simulations for early clinical education: case study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e49877</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e49877/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/49877</pub-id>
          <pub-id pub-id-type="medline">37948112</pub-id>
          <pub-id pub-id-type="pii">v9i1e49877</pub-id>
          <pub-id pub-id-type="pmcid">PMC10674152</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akinci D'Antonoli</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Stanzione</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bluethgen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vernuccio</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ugga</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Klontzas</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Cuocolo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cannella</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Koçak</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Large language models in radiology: fundamentals, applications, ethical considerations, risks, and future directions</article-title>
          <source>Diagn Interv Radiol</source>
          <year>2024</year>
          <volume>30</volume>
          <issue>2</issue>
          <fpage>80</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37789676"/>
          </comment>
          <pub-id pub-id-type="doi">10.4274/dir.2023.232417</pub-id>
          <pub-id pub-id-type="medline">37789676</pub-id>
          <pub-id pub-id-type="pmcid">PMC10916534</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wiest</surname>
              <given-names>IC</given-names>
            </name>
            <name name-style="western">
              <surname>Ferber</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>van Treeck</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Juglan</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Carrero</surname>
              <given-names>ZI</given-names>
            </name>
            <name name-style="western">
              <surname>Paech</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kleesiek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ebert</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kather</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>From text to tables: a local privacy preserving large language model for structured information retrieval from medical documents</article-title>
          <source>MedRxiv</source>
          <comment>Preprint posted online on December 8, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.medrxiv.org/content/10.1101/2023.12.07.23299648v1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.12.07.23299648</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hamed</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Eid</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alberry</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Exploring ChatGPT's potential in facilitating adaptation of clinical guidelines: a case study of diabetic ketoacidosis guidelines</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>e38784</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37303347"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.38784</pub-id>
          <pub-id pub-id-type="medline">37303347</pub-id>
          <pub-id pub-id-type="pmcid">PMC10249915</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leypold</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Schäfer</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Boos</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Beier</surname>
              <given-names>JP</given-names>
            </name>
          </person-group>
          <article-title>Can AI think like a plastic surgeon? Evaluating GPT-4's clinical judgment in reconstructive procedures of the upper extremity</article-title>
          <source>Plast Reconstr Surg Glob Open</source>
          <year>2023</year>
          <volume>11</volume>
          <issue>12</issue>
          <fpage>e5471</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38093728"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/GOX.0000000000005471</pub-id>
          <pub-id pub-id-type="medline">38093728</pub-id>
          <pub-id pub-id-type="pmcid">PMC10718352</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Weng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Enhancing phenotype recognition in clinical notes using large language models: PhenoBCBERT and PhenoGPT</article-title>
          <source>Patterns (NY)</source>
          <year>2024</year>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>100887</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2666-3899(23)00288-X"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.patter.2023.100887</pub-id>
          <pub-id pub-id-type="medline">38264716</pub-id>
          <pub-id pub-id-type="pii">S2666-3899(23)00288-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC10801236</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xiong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A novel approach to nursing clinical intelligent decision-making: integration of large language models and local knowledge bases</article-title>
          <source>ResearchSquare</source>
          <comment>Preprint posted online on December 8, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://assets-eu.researchsquare.com/files/rs-3756467/v1/264ae798-f8f8-4953-9351-9f4a4c2cf87b.pdf?c=1703192974"/>
          </comment>
          <pub-id pub-id-type="doi">10.21203/rs.3.rs-3756467/v1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>A medical question classification approach based on prompt tuning and contrastive learning</article-title>
          <year>2023</year>
          <conf-name>The Thirty Fifth International Conference on Software Engineering and Knowledge Engineering (SEKE 2023)</conf-name>
          <conf-date>July 1-10, 2023</conf-date>
          <conf-loc>San Francisco, CA, United States</conf-loc>
          <fpage>632</fpage>
          <lpage>635</lpage>
          <pub-id pub-id-type="doi">10.18293/seke2023-025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Biomedical document relation extraction with prompt learning and KNN</article-title>
          <source>J Biomed Inform</source>
          <year>2023</year>
          <volume>145</volume>
          <fpage>104459</fpage>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2023.104459</pub-id>
          <pub-id pub-id-type="medline">37531999</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(23)00180-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Xiang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>BioPRO: context-infused prompt learning for biomedical entity linking</article-title>
          <source>IEEE/ACM Trans Audio Speech Lang Process</source>
          <year>2024</year>
          <volume>32</volume>
          <issue>2023</issue>
          <fpage>374</fpage>
          <lpage>385</lpage>
          <pub-id pub-id-type="doi">10.1109/taslp.2023.3331149</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>CPK-Adapter: infusing medical knowledge into K-adapter with continuous prompt</article-title>
          <year>2023</year>
          <conf-name>2023 8th International Conference on Intelligent Computing and Signal Processing (ICSP)</conf-name>
          <conf-date>April 21-23, 2023</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <fpage>1017</fpage>
          <lpage>1023</lpage>
          <pub-id pub-id-type="doi">10.1109/icsp58490.2023.10248750</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yeh</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Lavergne</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zweigenbaum</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Decorate the examples: a simple method of prompt design for biomedical relation extraction</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the Thirteenth Language Resources and Evaluation Conference</conf-name>
          <conf-date>August 13, 2024</conf-date>
          <conf-loc>Marseille, France</conf-loc>
          <fpage>3780</fpage>
          <lpage>3787</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aclanthology.org/2022.lrec-1.403"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Su</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>EPTQA: a Chinese medical prompt learning method based on entity pair type question answering</article-title>
          <source>SSRN</source>
          <year>2023</year>
          <fpage>24</fpage>
          <pub-id pub-id-type="doi">10.2139/ssrn.4563840</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bhalerao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>GraphPrompt: graph-based prompt templates for biomedical synonym prediction</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the Association for the Advancement of Artificial Intelligence Conference on Artificial Intelligence</conf-name>
          <conf-date>February 7, 2023</conf-date>
          <conf-loc>Washington, DC, United States</conf-loc>
          <fpage>10576</fpage>
          <lpage>10584</lpage>
          <pub-id pub-id-type="doi">10.1609/aaai.v37i9.26256</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Stefanidis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Improving biomedical claim detection using prompt learning approaches</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE 4th International Conference on Pattern Recognition and Machine Learning (PRML)</conf-name>
          <conf-date>August 4-6, 2023</conf-date>
          <conf-loc>Urumqi, China</conf-loc>
          <fpage>369</fpage>
          <lpage>376</lpage>
          <pub-id pub-id-type="doi">10.1109/prml59573.2023.10348317</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Improving biomedical entity linking with cross-entity interaction</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the Association for the Advancement of Artificial Intelligence Conference on Artificial Intelligence</conf-name>
          <conf-date>February 7, 2023</conf-date>
          <conf-loc>Washington</conf-loc>
          <fpage>13869</fpage>
          <lpage>13877</lpage>
          <pub-id pub-id-type="doi">10.1609/aaai.v37i11.26624</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Medical text classification based on the discriminative pre-training model and prompt-tuning</article-title>
          <source>Digit Health</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076231193213?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076231193213</pub-id>
          <pub-id pub-id-type="medline">37559830</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076231193213</pub-id>
          <pub-id pub-id-type="pmcid">PMC10408339</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mao</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Open-world biomedical knowledge probing and verification</article-title>
          <year>2023</year>
          <conf-name>Proceedings of The 12th International Joint Conference on Knowledge Graphs (IJCKG-23)</conf-name>
          <conf-date>December 8-9, 2023</conf-date>
          <conf-loc>Tokyo, Japan</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ijckg2023.knowledge-graph.jp/pages/proc/paper_3.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref99">
        <label>99</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Potash</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Naumann</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Prompt discriminative language models for domain adaptation</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 5th Clinical Natural Language Processing Workshop</conf-name>
          <conf-date>July 14, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>247</fpage>
          <lpage>258</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.clinicalnlp-1.30</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref100">
        <label>100</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Towards more generalizable and accurate sentence classification in medical abstracts with less data</article-title>
          <source>J Healthc Inform Res</source>
          <year>2023</year>
          <volume>7</volume>
          <issue>4</issue>
          <fpage>542</fpage>
          <lpage>556</lpage>
          <pub-id pub-id-type="doi">10.1007/s41666-023-00141-6</pub-id>
          <pub-id pub-id-type="medline">37927376</pub-id>
          <pub-id pub-id-type="pii">141</pub-id>
          <pub-id pub-id-type="pmcid">PMC10620359</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref101">
        <label>101</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Joyce</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Kormilitzin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nevado-Holgado</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Clinical prompt learning with frozen language models</article-title>
          <source>IEEE Trans Neural Netw Learn Syst</source>
          <year>2023</year>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1109/TNNLS.2023.3294633</pub-id>
          <pub-id pub-id-type="medline">37566498</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref102">
        <label>102</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Landi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Alleva</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Valentine</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Lepow</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Charney</surname>
              <given-names>AW</given-names>
            </name>
          </person-group>
          <article-title>Clinical text deduplication practices for efficient pretraining and improved clinical tasks</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on September 29, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2312.09469"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2312.09469</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref103">
        <label>103</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sivarajkumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>HealthPrompt: a zero-shot learning paradigm for clinical natural language processing</article-title>
          <source>AMIA Annu Symp Proc</source>
          <year>2022</year>
          <volume>2022</volume>
          <fpage>972</fpage>
          <lpage>981</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37128372"/>
          </comment>
          <pub-id pub-id-type="medline">37128372</pub-id>
          <pub-id pub-id-type="pii">123</pub-id>
          <pub-id pub-id-type="pmcid">PMC10148337</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref104">
        <label>104</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sivarajkumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of healthprompt for zero-shot clinical text classification</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE 11th International Conference on Healthcare Informatics (ICHI)</conf-name>
          <conf-date>June 26-29, 2023</conf-date>
          <conf-loc>Houston, TX, United States</conf-loc>
          <fpage>492</fpage>
          <lpage>494</lpage>
          <pub-id pub-id-type="doi">10.1109/ichi57859.2023.00081</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref105">
        <label>105</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Intent-aware prompt learning for medical question summarization</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>
          <conf-date>December 6-8, 2022</conf-date>
          <conf-loc>Las Vegas, NV, United States</conf-loc>
          <fpage>672</fpage>
          <lpage>679</lpage>
          <pub-id pub-id-type="doi">10.1109/bibm55620.2022.9995317</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref106">
        <label>106</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alleva</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Landi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Shaw</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Böttinger</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fuchs</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ensari</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Keyword-optimized template insertion for clinical information extraction via prompt-based learning</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on October 31, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2310.20089"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2310.20089</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref107">
        <label>107</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Language inference-based learning for low-resource Chinese clinical named entity recognition using language model</article-title>
          <source>J Biomed Inform</source>
          <year>2024</year>
          <volume>149</volume>
          <fpage>104559</fpage>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2023.104559</pub-id>
          <pub-id pub-id-type="medline">38056702</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(23)00280-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref108">
        <label>108</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>SA</given-names>
            </name>
          </person-group>
          <article-title>MED-Prompt: a novel prompt engineering framework for medicine prediction on free-text clinical notes</article-title>
          <source>J King Saud Univ Comput Inf Sci</source>
          <year>2024</year>
          <volume>36</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jksuci.2024.101933</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref109">
        <label>109</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>MedKPL: a heterogeneous knowledge enhanced prompt learning framework for transferable diagnosis</article-title>
          <source>J Biomed Inform</source>
          <year>2023</year>
          <volume>143</volume>
          <fpage>104417</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(23)00138-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2023.104417</pub-id>
          <pub-id pub-id-type="medline">37315832</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(23)00138-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref110">
        <label>110</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nenadic</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>MedTem2.0: prompt-based temporal classification of treatment events from discharge summaries</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)</conf-name>
          <conf-date>July 10-12, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>160</fpage>
          <lpage>183</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.acl-srw.27</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref111">
        <label>111</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>PromptEHR: conditional electronic healthcare records generation with prompt learning</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, Association for Computational Linguistics</conf-name>
          <conf-date>October 11, 2022</conf-date>
          <conf-loc>Abu Dhabi, United Arab Emirates</conf-loc>
          <fpage>2873</fpage>
          <lpage>2855</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2022.emnlp-main.185</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref112">
        <label>112</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Majety</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rousseau</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Shih</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Trustworthy assertion classification through prompting</article-title>
          <source>J Biomed Inform</source>
          <year>2022</year>
          <volume>132</volume>
          <fpage>104139</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(22)00153-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2022.104139</pub-id>
          <pub-id pub-id-type="medline">35811026</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(22)00153-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC9378721</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref113">
        <label>113</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Tsai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Druhl</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Reisman</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Automated identification of eviction status from electronic health record notes</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2023</year>
          <volume>30</volume>
          <issue>8</issue>
          <fpage>1429</fpage>
          <lpage>1437</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37203429"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocad081</pub-id>
          <pub-id pub-id-type="medline">37203429</pub-id>
          <pub-id pub-id-type="pii">7172838</pub-id>
          <pub-id pub-id-type="pmcid">PMC10354775</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref114">
        <label>114</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kwon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Druhl</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sung</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Reisman</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kerns</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Becker</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>ODD: a benchmark dataset for the natural language processing based opioid related aberrant behavior detection</article-title>
          <year>2024</year>
          <conf-name>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies</conf-name>
          <conf-date>June 16, 2024</conf-date>
          <conf-loc>Mexico City</conf-loc>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2024.naacl-long.244</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref115">
        <label>115</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Su</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>EGDE: a framework for bridging the gap in medical zero-shot relation triplet extraction</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>
          <conf-date>December 5-8, 2023</conf-date>
          <conf-loc>Istanbul, Turkiye</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bibm58861.2023.10385666</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref116">
        <label>116</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>You</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>BioKnowPrompt: incorporating imprecise knowledge into prompt-tuning verbalizer with biomedical text for relation extraction</article-title>
          <source>Inf Sci</source>
          <year>2022</year>
          <volume>617</volume>
          <fpage>346</fpage>
          <lpage>358</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ins.2022.10.063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref117">
        <label>117</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>KE</given-names>
            </name>
            <name name-style="western">
              <surname>Costa</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Bian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Generative large language models are all-purpose text analytics engines: text-to-text learning is all your need</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2024</year>
          <month>09</month>
          <day>01</day>
          <volume>31</volume>
          <issue>9</issue>
          <fpage>1892</fpage>
          <lpage>1903</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://academic.oup.com/jamia/advance-article-abstract/doi/10.1093/jamia/ocae078/7648661?redirectedFrom=fulltext"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocae078</pub-id>
          <pub-id pub-id-type="medline">38630580</pub-id>
          <pub-id pub-id-type="pii">7648661</pub-id>
          <pub-id pub-id-type="pmcid">PMC11339507</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref118">
        <label>118</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>KE</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Model tuning or prompt tuning? A study of large language models for clinical concept and relation extraction</article-title>
          <source>J Biomed Inform</source>
          <year>2024</year>
          <volume>153</volume>
          <fpage>104630</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38548007"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2024.104630</pub-id>
          <pub-id pub-id-type="medline">38548007</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(24)00048-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC11065560</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref119">
        <label>119</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>MVP: optimizing multi-view prompts for medical dialogue summarization</article-title>
          <year>2023</year>
          <conf-name>2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>
          <conf-date>December 5-8, 2023</conf-date>
          <conf-loc>Istanbul, Turkiye</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bibm58861.2023.10385916</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref120">
        <label>120</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rohanian</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Jauncey</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nouriborji</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Gonalves</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Kartsonaki</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Merson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Clifton</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Using bottleneck adapters to identify cancer in clinical notes under low-resource constraints</article-title>
          <year>2023</year>
          <conf-name>The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks</conf-name>
          <conf-date>July 23, 2023</conf-date>
          <conf-loc>Toronto, Canada</conf-loc>
          <fpage>6239</fpage>
          <lpage>78</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2023.bionlp-1.5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref121">
        <label>121</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Elfrink</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vagliano</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Abu-Hanna</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Calixto</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes</article-title>
          <source>Artificial Intelligence in Medicine</source>
          <year>2023</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer Nature Switzerland</publisher-name>
          <fpage>193</fpage>
          <lpage>198</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref122">
        <label>122</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh Rawat</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Parameter efficient transfer learning for suicide attempt and ideation detection</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the 13th International Workshop on Health Text Mining and Information Analysis (LOUHI)</conf-name>
          <conf-date>September 7, 2022</conf-date>
          <conf-loc>Abu Dhabi, United Arab Emirates</conf-loc>
          <fpage>108</fpage>
          <lpage>115</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2022.louhi-1.13</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref123">
        <label>123</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mi</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>COSSUM: towards conversation-oriented structured summarization for automatic medical insurance assessment</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining</conf-name>
          <conf-date>August 14-18, 2022</conf-date>
          <conf-loc>Washington, DC, United States</conf-loc>
          <fpage>4248</fpage>
          <lpage>4256</lpage>
          <pub-id pub-id-type="doi">10.1145/3534678.3539116</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref124">
        <label>124</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shaitarova</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zaghir</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lavelli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Krauthammer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rinaldi</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Exploring the latest highlights in medical natural language processing across multiple languages: a survey</article-title>
          <source>Yearb Med Inform</source>
          <year>2023</year>
          <volume>32</volume>
          <issue>1</issue>
          <fpage>230</fpage>
          <lpage>243</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.1055/s-0043-1768726"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0043-1768726</pub-id>
          <pub-id pub-id-type="medline">38147865</pub-id>
          <pub-id pub-id-type="pmcid">PMC10751112</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref125">
        <label>125</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>HT</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>OpenPrompt: an open-source framework for prompt-learning</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on November 3, 2021</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2111.01998"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2111.01998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref126">
        <label>126</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ducel</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Fort</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lejeune</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lepage</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Do we name the languages we study? The #BenderRule in LREC and ACL articles</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the Thirteenth Language Resources and Evaluation Conference</conf-name>
          <conf-date>June 20-25, 2022</conf-date>
          <conf-loc>Marseille, France</conf-loc>
          <fpage>564</fpage>
          <lpage>573</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aclanthology.org/2022.lrec-1.60"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref127">
        <label>127</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>An empirical study of catastrophic forgetting in large language models during continual fine-tuning</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on April 2, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2308.08747"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2308.08747</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
