<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="letter" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v26i1e54948</article-id>
      <article-id pub-id-type="pmid">38691404</article-id>
      <article-id pub-id-type="doi">10.2196/54948</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Research Letter</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Research Letter</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Integrating Text and Image Analysis: Exploring GPT-4V’s Capabilities in Advanced Radiological Applications Across Subspecialties</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhu</surname>
            <given-names>Lingxuan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Kommireddy</surname>
            <given-names>Shreeven</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Younes</surname>
            <given-names>Hadi</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Busch</surname>
            <given-names>Felix</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Neuroradiology</institution>
            <institution>Charité – Universitätsmedizin Berlin, corporate member of Freie Universität Berlin and Humboldt Universität zu Berlin</institution>
            <addr-line>Charitépl. 1</addr-line>
            <addr-line>Berlin, 10117</addr-line>
            <country>Germany</country>
            <phone>49 3045050</phone>
            <email>felix.busch@charite.de</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9770-8555</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Han</surname>
            <given-names>Tianyu</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8636-6462</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Makowski</surname>
            <given-names>Marcus R</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8778-647X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Truhn</surname>
            <given-names>Daniel</given-names>
          </name>
          <degrees>MSc, MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9605-0728</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Bressem</surname>
            <given-names>Keno K</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9249-8624</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Adams</surname>
            <given-names>Lisa</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5836-4542</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Neuroradiology</institution>
        <institution>Charité – Universitätsmedizin Berlin, corporate member of Freie Universität Berlin and Humboldt Universität zu Berlin</institution>
        <addr-line>Berlin</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Diagnostic and Interventional Radiology</institution>
        <institution>University Hospital Aachen</institution>
        <addr-line>Aachen</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Diagnostic and Interventional Radiology</institution>
        <institution>Klinikum rechts der Isar, Technical University Munich</institution>
        <addr-line>Munich</addr-line>
        <country>Germany</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Institute for Radiology and Nuclear Medicine</institution>
        <institution>German Heart Center Munich</institution>
        <institution>Technical University of Munich</institution>
        <addr-line>Munich</addr-line>
        <country>Germany</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Felix Busch <email>felix.busch@charite.de</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>1</day>
        <month>5</month>
        <year>2024</year>
      </pub-date>
      <volume>26</volume>
      <elocation-id>e54948</elocation-id>
      <history>
        <date date-type="received">
          <day>28</day>
          <month>11</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>6</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>10</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>20</day>
          <month>3</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Felix Busch, Tianyu Han, Marcus R Makowski, Daniel Truhn, Keno K Bressem, Lisa Adams. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 01.05.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2024/1/e54948" xlink:type="simple"/>
      <related-article related-article-type="correction-forward" xlink:title="This is a corrected version. See correction statement in:" xlink:href="https://www.jmir.org/2024/1/e64411" vol="26" page="e64411"> </related-article>
      <related-article related-article-type="correction-forward" xlink:title="This is a corrected version. See correction statement in:" xlink:href="https://www.jmir.org/2026/1/e91415" vol="26" page="e91415"> </related-article>
      <abstract>
        <p>This study demonstrates that GPT-4V outperforms GPT-4 across radiology subspecialties in analyzing 207 cases with 1312 images from the Radiological Society of North America Case Collection.</p>
      </abstract>
      <kwd-group>
        <kwd>GPT-4</kwd>
        <kwd>ChatGPT</kwd>
        <kwd>Generative Pre-Trained Transformer</kwd>
        <kwd>multimodal large language models</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>AI applications in medicine</kwd>
        <kwd>diagnostic radiology</kwd>
        <kwd>clinical decision support systems</kwd>
        <kwd>generative AI</kwd>
        <kwd>medical image analysis</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>The launch of GPT-4 has generated significant interest in the scientific and medical communities, demonstrating its potential in medicine with notable achievements such as an 83.76% zero-shot accuracy on the United States Medical Licensing Examination (USMLE) [<xref ref-type="bibr" rid="ref1">1</xref>]. In radiology, GPT has spanned text-based tasks, including board exam question scoring, data mining, and report structuring [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. The recent release of GPT-4’s visual capabilities (GPT-4V) enables the combined analysis of text and visual data [<xref ref-type="bibr" rid="ref4">4</xref>]. Our study focuses on evaluating the diagnostic capabilities of GPT-4V by comparing it to GPT-4 in advanced radiological tasks, benchmarking the potential of this multimodal large language model in the medical imaging field.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p>We sourced 207 cases with 1312 images from the Radiological Society of North America (RSNA) Case Collection (accessible for RSNA members on the RSNA Case Collection website [<xref ref-type="bibr" rid="ref5">5</xref>]), aiming to cover at least 10 cases for each of the 22 presented subspecialties. The cases within each subspeciality were chosen to present different pathologies. Each case had varying numbers of images and were usually labeled for more than 1 subspecialty, so that the total number of cases per subspeciality varied between 1 (for “Physics and Basic Science,” no more than 1 case was available) and 43 (for “Gastrointestinal,” 10 cases in this category were chosen, with 33 additional cases from other subspecialties that were also labeled for “Gastrointestinal”).</p>
      <p>GPT-4 and GPT-4V were accessed between November 6, 2023, and November 17, 2023. We utilized an application programming interface (API) account, which allowed us to use the models programmatically and ensure a consistent environment for each test. This access level was crucial, as it provided stable and repeatable interactions with the models, unlike what might be experienced with fluctuating conditions of regular account usage. The ground truth was established based on the final diagnoses stated in the RSNA case entries. We prompted each model 3 times via the API for the following two tasks: first, the models were asked to identify the diagnosis and 2 differentials (providing the patient history only for GPT-4 or patient history with images for GPT-4V); second, the models were asked to answer corresponding multiple-choice questions from the RSNA Case Collection. The GPT-4V assessment used a “chain-of-thought” prompt that guided the model through diagnostic reasoning (<xref rid="figure1" ref-type="fig">Figure 1</xref>), in contrast to the text-only assessment of GPT-4. For both tasks, a case was considered correctly diagnosed if the same correct result appeared for at least 2 of 3 prompts. Cases with no repeated correct diagnoses and cases with only false diagnoses across the 3 prompts were marked as incorrectly diagnosed. Mean accuracies and bootstrapped 95% CIs were calculated, and statistical significance was determined by using the McNemar test (<italic>P</italic>&lt;.001).</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>An example conversation with GPT-4V showcasing the prompting style that was used if the question contained more than 4 images. Notably, GPT-4V often disregards the initial textual case description when additional image prompts are introduced, necessitating the description’s repetition for accurate responses. As the context window, image resolution limit, and embedding size limit of the web application version of GPT-4V are unknown, definitive conclusions regarding the cause of this forgetfulness could not be drawn. Nevertheless, the model’s consistent ability to recognize and recall images from the initial prompt (eg, axial FLAIR [fluid-attenuated inversion recovery] images) suggests that running out of context length is an unlikely explanation. Reproduced with permission from the Radiological Society of North America. Link to the displayed case: https://cases.rsna.org/take-quiz/07c4b917-80fb- 43c0-8b3b-59a0d8ceb203 (accessed 14th January 2026).</p>
        </caption>
        <graphic xlink:href="jmir_v26i1e54948_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>GPT-4 accurately identified the primary diagnosis in 18% (95% CI 12%-25%) of cases (first task). When including differential diagnoses, this accuracy increased to 28% (95% CI 22%-33%). In contrast, GPT-4V achieved a 27% (95% CI 21%-34%) accuracy rate for primary diagnosis, which increased to 35% (95% CI 29%-40%) when differential diagnoses were accounted for. After being presented with multiple-choice questions, including information about clinical history and presentation (second task), GPT-4 achieved an accuracy of 47% (95% CI 42%-56%). Again, GPT-4V demonstrated a higher accuracy of 64% (95% CI 59%-72%). The observed difference in performance was statistically significant (<italic>P</italic>&lt;.001). Across 15 subspecialties, GPT-4V outperformed GPT-4, with the sole exception being in “Cardiac Imaging.” <xref rid="figure2" ref-type="fig">Figure 2</xref> summarizes the accuracies across all subspecialties.</p>
      <fig id="figure2" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Comparison of GPT-4 and GPT-4V in various radiology subspecialties. Many cases spanned multiple subspecialties, and some subspecialties had very few cases. The number of images for individual cases ranged from 2 to 30, and the overall accuracy across all subspecialties, as shown in the bar plot, showed that GPT-4V performed significantly better than GPT-4. Error bars represent the 95% CIs. The radar plot shows the accuracy of GPT-4 (green line) and GPT-4V (purple line) across different radiology subspecialties. Each axis represents a specific radiology subspecialty, with the percentages indicating the accuracy of the model in that domain. Both models show varying levels of performance across subspecialties, with GPT-4V consistently performing better than GPT-4, except in “Cardiac Imaging” (cases: n=14; GPT-4V accuracy: 36%; GPT-4 accuracy: 57%). For “Physics and Basic Science” (cases: n=1), “Breast Imaging” (cases: n=10), and “Obstetrics/Gynecology” (cases: n=12), GPT-4V and GPT-4 showed on-par performance (accuracy: 100%, 50%, and 58%, respectively). Due to the small sample sizes in some categories, which ranged from 1 to 43 cases, these results should primarily be viewed as indicative trends rather than definitive conclusions about the models’ performance in these specific areas.</p>
        </caption>
        <graphic xlink:href="jmir_v26i1e54948_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>Our study shows that GPT-4V has improved performance over GPT-4 in solving complex radiological problems, indicating its potential to detect pathological features in medical images and thus its radiological domain knowledge. The RSNA Case Collection, which is aimed at expert-level professional radiologists, highlights the promise of GPT-4V in specialized medical contexts.</p>
      <p>However, the use of GPT-4V warrants a cautious approach. At this time, it should be considered, at best, as a supplemental tool to augment—not replace—the comprehensive analyses performed by trained medical professionals.</p>
      <p>Extending the initial research by Yang et al [<xref ref-type="bibr" rid="ref6">6</xref>], our study explores the medical image analysis capabilities of GPT-4V in more complex scenarios and with a wider range of cases. The ongoing development of multimodal models, such as Med-Flamingo, for medical applications signals a growing interest in this area [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      <p>One challenge is the scarcity of specialized medical data sets. As our study used RSNA member–exclusive cases, it was unlikely that these cases were in GPT-4V’s training data; thus, the risk of data contamination was minimized. However, the corresponding images for each case were indented to highlight specific pathologies, and this does not fully replicate clinical practice, where one would have to analyze each separate image to identify potential pathologies—a task that specialized deep learning models would be better suited to perform.</p>
      <p>Future efforts should focus on detailed performance comparisons between generalist models (like GPT-4V) and emerging, radiological domain–specialized, artificial intelligence diagnostic models to clarify the clinical relevance and applicability of generalist models in clinical practice.</p>
      <p>Our results encourage conducting further performance evaluations of multimodal models in different radiologic subdisciplines, as well as using larger data sets, to gain a more holistic understanding of their role in radiology.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">API</term>
          <def>
            <p>application programming interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">RSNA</term>
          <def>
            <p>Radiological Society of North America</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">USMLE</term>
          <def>
            <p>United States Medical Licensing Examination</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The cases analyzed in this study are available from the Radiological Society of North America (RSNA) Case Collection. This repository can be accessed by RSNA members on the RSNA Case Collection website [<xref ref-type="bibr" rid="ref5">5</xref>], where each case is presented with detailed clinical information, imaging data, questions, multiple-choice answers, and diagnostic conclusions. The cases from the RSNA Case Collection were reproduced with permission from the Radiological Society of North America. These cases were not used for model training, nor were they retained by any tools or systems employed in this study. No additional unpublished data from these cases were utilized in this study. Researchers and readers are encouraged to directly access the RSNA Case Collection for further information.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nori</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>McKinney</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Carignan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Capabilities of GPT-4 on medical challenge problems</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on Apr 12, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/2303.13375.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2303.13375</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bhayana</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Krishna</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bleakney</surname>
              <given-names>RR</given-names>
            </name>
          </person-group>
          <article-title>Performance of ChatGPT on a radiology board-style examination: insights into current strengths and limitations</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <month>06</month>
          <volume>307</volume>
          <issue>5</issue>
          <fpage>e230582</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.230582</pub-id>
          <pub-id pub-id-type="medline">37191485</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Busch</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kader</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Niehues</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Makowski</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Bressem</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Leveraging GPT-4 for post hoc transformation of free-text radiology reports into structured reporting: a multilingual feasibility study</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <month>05</month>
          <volume>307</volume>
          <issue>4</issue>
          <fpage>e230725</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.230725</pub-id>
          <pub-id pub-id-type="medline">37014240</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="web">
          <article-title>GPT-4V(ision) system card</article-title>
          <source>OpenAI</source>
          <year>2023</year>
          <month>09</month>
          <day>25</day>
          <access-date>2023-10-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/research/gpt-4v-system-card">https://openai.com/research/gpt-4v-system-card</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <article-title>RSNA Case Collection</article-title>
          <source>Radiological Society of North America</source>
          <access-date>2024-04-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cases.rsna.org/">https://cases.rsna.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The dawn of LMMs: preliminary explorations with GPT-4V(ision)</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on Oct 11, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/2309.17421.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2309.17421</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moor</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yasunaga</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zakka</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dalmia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Reis</surname>
              <given-names>EP</given-names>
            </name>
            <name name-style="western">
              <surname>Rajpurkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Leskovec</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Med-Flamingo: a multimodal medical few-shot learner</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on Jul 27, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/2307.15189.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2307.15189</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
