<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="letter" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v26i1e54607</article-id>
      <article-id pub-id-type="pmid">38764297</article-id>
      <article-id pub-id-type="doi">10.2196/54607</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Research Letter</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Research Letter</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Multimodal ChatGPT-4V for Electrocardiogram Interpretation: Promise and Limitations</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Puladi</surname>
            <given-names>Behrus</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chiu</surname>
            <given-names>Wan Hang Keith</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Amini-Salehi</surname>
            <given-names>Ehsan</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Zhu</surname>
            <given-names>Lingxuan</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0001-9077-408X</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Mou</surname>
            <given-names>Weiming</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-1089-6516</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Wu</surname>
            <given-names>Keren</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0000-2397-7808</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lai</surname>
            <given-names>Yancheng</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0004-8444-7535</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Lin</surname>
            <given-names>Anqi</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6324-0410</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Yang</surname>
            <given-names>Tao</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-5246-3284</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Zhang</surname>
            <given-names>Jian</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7217-0111</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Luo</surname>
            <given-names>Peng</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Oncology</institution>
            <institution>Zhujiang Hospital</institution>
            <institution>Southern Medical University</institution>
            <addr-line>253 Industrial Avenue</addr-line>
            <addr-line>Guangzhou, 510282</addr-line>
            <country>China</country>
            <phone>86 020 61643888</phone>
            <email>luopeng@smu.edu.cn</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8215-2045</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Oncology</institution>
        <institution>Zhujiang Hospital</institution>
        <institution>Southern Medical University</institution>
        <addr-line>Guangzhou</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Urology</institution>
        <institution>Shanghai General Hospital</institution>
        <institution>Shanghai Jiao Tong University School of Medicine</institution>
        <addr-line>Shanghai</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Medical Oncology</institution>
        <institution>National Cancer Center/National Clinical Research Center for Cancer/Cancer Hospital</institution>
        <institution>Chinese Academy of Medical Sciences and Peking Union Medical College</institution>
        <addr-line>Bejing</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Peng Luo <email>luopeng@smu.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>26</day>
        <month>6</month>
        <year>2024</year>
      </pub-date>
      <volume>26</volume>
      <elocation-id>e54607</elocation-id>
      <history>
        <date date-type="received">
          <day>16</day>
          <month>11</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>29</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>3</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>19</day>
          <month>4</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Lingxuan Zhu, Weiming Mou, Keren Wu, Yancheng Lai, Anqi Lin, Tao Yang, Jian Zhang, Peng Luo. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 26.06.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2024/1/e54607" xlink:type="simple"/>
      <abstract>
        <p>This study evaluated the capabilities of the newly released ChatGPT-4V, a large language model with visual recognition abilities, in interpreting electrocardiogram waveforms and answering related multiple-choice questions for assisting with cardiovascular care.</p>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT</kwd>
        <kwd>ECG</kwd>
        <kwd>electrocardiogram</kwd>
        <kwd>multimodal</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
        <kwd>large language model</kwd>
        <kwd>diagnostic</kwd>
        <kwd>quantitative analysis</kwd>
        <kwd>clinical</kwd>
        <kwd>clinicians</kwd>
        <kwd>ECG interpretation</kwd>
        <kwd>cardiovascular care</kwd>
        <kwd>cardiovascular</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Electrocardiogram (ECG) interpretation is an essential skill in cardiovascular medicine. The rise of artificial intelligence (AI) has led to many attempts to automate ECG interpretations [<xref ref-type="bibr" rid="ref1">1</xref>]. As a representative of generative AI, ChatGPT has shown promising potential in cardiovascular medicine [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. However, since early versions of ChatGPT cannot process graphical information, its ability for ECG interpretation is unclear. The newly released ChatGPT-4V(ision) model adds visual recognition capabilities [<xref ref-type="bibr" rid="ref4">4</xref>], which makes it possible to directly read and interpret ECG waveforms. Therefore, we evaluated the performance of ChatGPT-4V in ECG interpretations.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p>We gathered a set of multiple-choice questions related to ECG waveform interpretation from various question banks, including the American Heart Association Advanced Cardiovascular Life Support exam (February 2016), United States Medical Licensing Examination (USMLE) sample questions, USMLE practice questions available on the AMBOSS platform [<xref ref-type="bibr" rid="ref5">5</xref>], and the Certified EKG Technician practice exam. The 62 ECG-related questions included for analysis involved ECG diagnosis and the ability to determine further treatment plans based on ECG findings and corresponding clinical scenarios. </p>
      <p>ChatGPT was prompted to answer the questions by analyzing the accompanying ECG images; the prompt also stated that ChatGPT was undergoing a diagnostic challenge as a representative of AI to prevent it from refusing to make a diagnosis (see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p>
      <p>ChatGPT was asked each question 3 times to mitigate the effect of randomness in responses in the evaluation. Accuracy was then evaluated based on ChatGPT getting at least 1, 2, or 3 correct answers out of the 3 attempts. To further confirm whether ChatGPT could make accurate diagnoses without relying on options, 19 diagnostic-related questions that purely examined ECG interpretation without requiring integration of clinical history were converted to open-ended questions. ChatGPT was then prompted to provide a diagnosis after reading the ECG without options.</p>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>The 62 questions included 26 questions for diagnosis, 29 for treatment, and 7 for counting tasks such as QT-interval length calculation. The overall accuracy was 83.87%, 70.97%, and 53.23% for getting at least 1, 2, and 3 out of the 3 attempts correct (<xref rid="figure1" ref-type="fig">Figure 1</xref>). There were significant differences in accuracy across question types with 1 or 2 correct responses, whereas there was no significant difference when all 3 responses were required to be correct (<xref ref-type="table" rid="table1">Table 1</xref>). Accuracy at least 2 times was the highest for treatment recommendation questions, followed by diagnosis and counting questions. Subgroup analysis showed lower accuracy in counting-type than diagnostic- and treatment-related questions when requiring at least 1 or 2 correct responses. Treatment recommendation questions had higher accuracy than other types when at least 2 correct responses were needed (<xref ref-type="table" rid="table1">Table 1</xref>).</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Accuracy of the multimodal ChatGPT-4V model in answering multiple-choice questions related to electrocardiogram (ECG) interpretation. The number of correct responses among 3 attempts for each question are shown from left to right. The accuracy rates with at least 1, 2, and 3 correct responses are annotated on the right from the bottom to the top. Different shapes represent different question types. We evaluated ChatGPT-4V responses using the official reference answers as a standard for reliability. Any questions involving ECG image interpretations were included without additional exclusion criteria. Unedited ECG images were uploaded to ChatGPT at the original resolution and no additional information was provided to maintain consistency with the original test questions. The prompt we used did not contain any hints about the correct answer. ChatGPT's responses were collected from October 4 to 8, 2023. The ggplot2 R package was used for visualization.</p>
        </caption>
        <graphic xlink:href="jmir_v26i1e54607_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>Accuracy of the multimodal ChatGPT-4V model for different types of questions.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="30"/>
          <col width="390"/>
          <col width="0"/>
          <col width="190"/>
          <col width="0"/>
          <col width="260"/>
          <col width="0"/>
          <col width="0"/>
          <col width="130"/>
          <thead>
            <tr valign="top">
              <td colspan="3">Question type</td>
              <td colspan="2">Number of questions</td>
              <td colspan="2">Correct answers, n (%)</td>
              <td colspan="2"><italic>P</italic> value<sup>a</sup></td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td colspan="8">
                <bold>At least 1 correct</bold>
              </td>
              <td>.02</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Diagnosis</td>
              <td colspan="2">26</td>
              <td colspan="2">24 (92.31)</td>
              <td colspan="3">.17</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Treatment recommendation</td>
              <td colspan="2">29</td>
              <td colspan="2">25 (86.21)</td>
              <td colspan="3">.74</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Counting</td>
              <td colspan="2">7</td>
              <td colspan="2">3 (42.86)</td>
              <td colspan="3">.001</td>
            </tr>
            <tr valign="top">
              <td colspan="8">
                <bold>At least 2 correct</bold>
              </td>
              <td>.009</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Diagnosis</td>
              <td colspan="2">26</td>
              <td colspan="2">17 (65.38)</td>
              <td colspan="3">.57</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Treatment recommendation</td>
              <td colspan="2">29</td>
              <td colspan="2">25 (86.21)</td>
              <td colspan="3">.02</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Counting</td>
              <td colspan="2">7</td>
              <td colspan="2">2 (28.57)</td>
              <td colspan="3">.02</td>
            </tr>
            <tr valign="top">
              <td colspan="8">
                <bold>All 3 correct</bold>
              </td>
              <td>.09</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Diagnosis</td>
              <td colspan="2">26</td>
              <td colspan="2">14 (53.85)</td>
              <td colspan="3">—<sup>b</sup></td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Treatment recommendation</td>
              <td colspan="2">29</td>
              <td colspan="2">18 (62.07)</td>
              <td colspan="3">—</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Counting</td>
              <td colspan="2">7</td>
              <td colspan="2">1 (14.29)</td>
              <td colspan="3">—</td>
            </tr>
          </tbody>
        </table>
        <table-wrap-foot>
          <fn id="table1fn1">
            <p><sup>a</sup>The Fisher exact test was used to compare the accuracy of ChatGPT in answering different types of questions with the <italic>fisher.test</italic> function in R (version 4.2.3). If there was a statistically significant difference, subgroup analysis using the Fisher exact test was further performed to respectively compare the accuracy of each type with the other two types.</p>
          </fn>
          <fn id="table1fn2">
            <p><sup>b</sup>Not applicable; subgroup analysis was not performed since there was no significant difference among the three question types overall.</p>
          </fn>
        </table-wrap-foot>
      </table-wrap>
      <p>ChatGPT performed poorly in diagnosing ECGs without options, making the correct ECG diagnosis in only 7 out of 57 responses, which suggests that the ECG-based diagnostic ability of the current version is only possible with a limited range of options provided. Incorrect responses were related to specific functionalities of ChatGPT-4V. The insufficient ability of ChatGPT-4V to count parameters such as PR intervals could lead to errors in diagnostic and therapeutic questions, and its inadequacy in integrating ECG parameters could result in nonspecific diagnoses. For example, ChatGPT-4V could diagnose myocardial infarction but fail to combine various parameters to determine the specific location of the infarction.</p>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>Although ChatGPT-4V can analyze ECGs to some extent and can even make treatment decisions based on the EGC, its diagnostic stability and reliability need further improvement for clinical application. ChatGPT-4V had significantly lower accuracy on counting-based questions than treatment- or diagnostic-related questions, suggesting its limitations in precise quantitative ECG measurements.</p>
      <p>Notably, the model was not specifically trained on ECG data. Thus, we expect ChatGPT-4 to perform better on ECG interpretation as it accumulates more data and training. As a general-purpose model, ChatGPT-4V’s capabilities are not limited to correctly diagnosing ECGs; however, its good performance on ECG-based treatment recommendation questions highlights its potential application in medical decision-making. By leveraging ChatGPT-4V’s abilities to analyze free text and images, management recommendations can be directly generated based on patient data and ECG waveforms to improve health care efficiency. While current bedside cardiac monitors can only offer a warning for issues such as abnormal heart rhythms or atrial fibrillation, models such as ChatGPT-4V could be positioned to serve as 24/7 “attending physicians” that monitor and analyze ECGs of patients with critical illness, capturing low-frequency but important ECG abnormalities and promptly detecting condition changes to recommend timely interventions. ChatGPT can also be used to train medical trainees about ECG interpretation and act as an automated second reader to identify high-risk diagnoses.</p>
      <p>Our study provides a first look at the state-of-the-art ChatGPT-4V model’s capabilities in ECG interpretation. While these early results are promising, they also highlight current limitations of the model. With further technological developments, multimodal generative AI tools such as ChatGPT may eventually play an important role in clinical ECG interpretation and cardiovascular care. Larger-scale validation is needed to fully evaluate this ability. Rapid development of large language models is expected to contribute exciting progress in the cardiovascular field.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Prompts used for this study.</p>
        <media xlink:href="jmir_v26i1e54607_app1.docx" xlink:title="DOCX File , 23 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">ECG</term>
          <def>
            <p>electrocardiogram</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">USMLE</term>
          <def>
            <p>United States Medical Licensing Examination</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The data that support the findings of this study are available on request from the corresponding author.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siontis</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Noseworthy</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Attia</surname>
              <given-names>ZI</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>PA</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence-enhanced electrocardiography in cardiovascular disease management</article-title>
          <source>Nat Rev Cardiol</source>
          <year>2021</year>
          <month>07</month>
          <volume>18</volume>
          <issue>7</issue>
          <fpage>465</fpage>
          <lpage>478</lpage>
          <pub-id pub-id-type="doi">10.1038/s41569-020-00503-2</pub-id>
          <pub-id pub-id-type="medline">33526938</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41569-020-00503-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC7848866</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sarraju</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bruemmer</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Van Iterson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Rodriguez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Laffin</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Appropriateness of cardiovascular disease prevention recommendations obtained from a popular online chat-based artificial intelligence model</article-title>
          <source>JAMA</source>
          <year>2023</year>
          <month>03</month>
          <day>14</day>
          <volume>329</volume>
          <issue>10</issue>
          <fpage>842</fpage>
          <lpage>844</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2023.1044</pub-id>
          <pub-id pub-id-type="medline">36735264</pub-id>
          <pub-id pub-id-type="pii">2801244</pub-id>
          <pub-id pub-id-type="pmcid">PMC10015303</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mou</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT can pass the AHA exams: open-ended questions outperform multiple-choice format</article-title>
          <source>Resuscitation</source>
          <year>2023</year>
          <month>07</month>
          <volume>188</volume>
          <fpage>109783</fpage>
          <pub-id pub-id-type="doi">10.1016/j.resuscitation.2023.109783</pub-id>
          <pub-id pub-id-type="medline">37349064</pub-id>
          <pub-id pub-id-type="pii">S0300-9572(23)00096-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The dawn of LMMs: preliminary explorations with GPT-4V(ision)</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on October 11, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2309.17421v2"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2309.17421</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <source>AMBOSS: medical knowledge platform for doctors and students</source>
          <access-date>2023-10-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.amboss.com/us">https://www.amboss.com/us</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
