<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="letter"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id><journal-id journal-id-type="publisher-id">jmir</journal-id><journal-id journal-id-type="index">1</journal-id><journal-title>Journal of Medical Internet Research</journal-title><abbrev-journal-title>J Med Internet Res</abbrev-journal-title><issn pub-type="epub">1438-8871</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v27i1e73144</article-id><article-id pub-id-type="doi">10.2196/73144</article-id><article-categories><subj-group subj-group-type="heading"><subject>Letter to the Editor</subject></subj-group></article-categories><title-group><article-title>Author&#x2019;s Reply: Large Language Models Could Revolutionize Health Care, but Technical Hurdles May Limit Their Applications</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Ji</surname><given-names>Jiaming</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Meng</surname><given-names>Xiangbin</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Yan</surname><given-names>Xiangyu</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Institute for Artificial Intelligence, Peking University</institution><addr-line>Beijing</addr-line><country>China</country></aff><aff id="aff2"><institution>Peng Cheng Laboratory</institution><addr-line>12 Xili Road</addr-line><addr-line>Shenzhen</addr-line><country>China</country></aff><aff id="aff3"><institution>Institute of Disaster and Emergency Medicine, Tianjin University</institution><addr-line>Tianjin</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Leung</surname><given-names>Tiffany</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Xiangbin Meng, MD, PhD, Peng Cheng Laboratory, 12 Xili Road, Shenzhen, 518055, China, 86 15896850171; <email>15896850171@163.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>25</day><month>6</month><year>2025</year></pub-date><volume>27</volume><elocation-id>e73144</elocation-id><history><date date-type="received"><day>26</day><month>02</month><year>2025</year></date><date date-type="rev-recd"><day>17</day><month>05</month><year>2025</year></date><date date-type="accepted"><day>23</day><month>05</month><year>2025</year></date></history><copyright-statement>&#x00A9; jiaming ji, Xiangbin Meng, xiangyu yan. Originally published in the Journal of Medical Internet Research (<ext-link ext-link-type="uri" xlink:href="https://www.jmir.org">https://www.jmir.org</ext-link>), 25.6.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/">https://www.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.jmir.org/2025/1/e73144"/><related-article related-article-type="commentary article" id="v27e71618" ext-link-type="doi" xlink:href="10.2196/71618" xlink:title="Comment on" vol="27" page="e71618" xlink:type="simple">https://www.jmir.org/2025/1/e71618</related-article><related-article related-article-type="commentary article" id="v27" ext-link-type="doi" xlink:href="10.2196/59069" xlink:title="Comment on" vol="27" xlink:type="simple">https://www.jmir.org/2025/1/e59069</related-article><kwd-group><kwd>large language models</kwd><kwd>LLMs</kwd><kwd>digital health</kwd><kwd>medical diagnosis</kwd><kwd>treatment</kwd><kwd>multimodal data integration</kwd><kwd>technological fairness</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>natural language processing</kwd><kwd>NLP</kwd></kwd-group></article-meta></front><body><p>We thank Beltramin et al [<xref ref-type="bibr" rid="ref1">1</xref>] for the valuable feedback and the opportunity to address the insightful comments on our Viewpoint article, &#x201C;Revolutionizing Health Care: The Transformative Impact of Large Language Models in Medicine&#x201D; [<xref ref-type="bibr" rid="ref2">2</xref>]. We appreciate the their thoughtful input, which strengthens our discussion on the role of large language models (LLMs) in health care.</p><p>Our article aimed to provide a forward-looking perspective on LLMs&#x2019; potential in medicine, prioritizing conceptual insights over granular technical details. The reviewers&#x2019; points regarding multimodal data integration, image analysis, and resource allocation align with emerging research and underscore LLMs&#x2019; transformative capabilities. For example, multimodal frameworks like Med-Gemini demonstrate LLMs&#x2019; ability to process 2D and 3D medical images, extending their utility beyond conventional deep learning approaches [<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>On health care resource optimization, LLM-based methods have shown promise in enhancing operational efficiency. Techniques leveraging natural language processing can generate optimization models to improve medical resource allocation with greater accuracy [<xref ref-type="bibr" rid="ref4">4</xref>]. Furthermore, LLMs have achieved over 90% accuracy in transforming clinical text into Fast Healthcare Interoperability Resources (FHIR) resources, facilitating streamlined data extraction and decision support [<xref ref-type="bibr" rid="ref5">5</xref>]. While these advancements are promising, we acknowledge the need for rigorous validation and seamless integration with electronic health record systems to ensure practical adoption [<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>Regarding the second figure in our paper, our intent was to depict a generalized transformer-based framework, highlighting shared design principles across models like bidirectional encoder representations from transformers (BERT) and generative pretrained transformers (GPTs), rather than delineating their architectural differences. This schematic was meant to illustrate the broader impact of transformer-based models on medical artificial intelligence development.</p><p>Finally, our Viewpoint article does not contain factual inaccuracies, but rather provides general schematic representations of LLM architectures.</p></body><back><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">BERT</term><def><p>bidirectional encoder representations from transformers</p></def></def-item><def-item><term id="abb2">FHIR</term><def><p>Fast Healthcare Interoperability Resources</p></def></def-item><def-item><term id="abb3">GPT</term><def><p>generative pretrained transformer</p></def></def-item><def-item><term id="abb4">LLM</term><def><p>large language model</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beltramin</surname><given-names>D</given-names> </name><name name-style="western"><surname>Bousquet</surname><given-names>C</given-names> </name><name name-style="western"><surname>Tiffet</surname><given-names>T</given-names> </name></person-group><article-title>Large language models could revolutionize health care, but technical hurdles may limit their applications (preprint)</article-title><source>J Med Internet Res</source><comment><ext-link ext-link-type="uri" xlink:href="https://www.jmir.org/2025/1/e71618">https://www.jmir.org/2025/1/e71618</ext-link></comment><pub-id pub-id-type="doi">10.2196/71618</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>K</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>X</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Revolutionizing health care: the transformative impact of large language models in medicine</article-title><source>J Med Internet Res</source><year>2025</year><month>01</month><day>7</day><volume>27</volume><fpage>e59069</fpage><pub-id pub-id-type="doi">10.2196/59069</pub-id><pub-id pub-id-type="medline">39773666</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sellergren</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Advancing multimodal medical capabilities of Gemini</article-title><source>arXiv</source><comment>Preprint posted online on  May 6, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2405.03162</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Tang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>X</given-names> </name><etal/></person-group><article-title>ORLM: training large language models for optimization modeling</article-title><source>arXiv</source><access-date>2025-06-18</access-date><comment>Preprint posted online on  May 30, 2024</comment><comment><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/html/2405.17743v2">https://arxiv.org/html/2405.17743v2</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yerebakan</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Enhancing health data interoperability with large language models: a FHIR study</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 19, 2023</comment><pub-id pub-id-type="doi">10.1056/AIcs2300301</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahsan</surname><given-names>H</given-names> </name><name name-style="western"><surname>McInerney</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Retrieving evidence from EHRs with LLMs: possibilities and challenges</article-title><source>Proc Mach Learn Res</source><year>2024</year><month>06</month><volume>248</volume><fpage>489</fpage><lpage>505</lpage><pub-id pub-id-type="medline">39224857</pub-id></nlm-citation></ref></ref-list></back></article>