<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="editorial" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v25i1e51584</article-id>
      <article-id pub-id-type="pmid">37651164</article-id>
      <article-id pub-id-type="doi">10.2196/51584</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Editorial</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Editorial</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Best Practices for Using AI Tools as an Author, Peer Reviewer, or Editor</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Leung</surname>
            <given-names>Tiffany</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Leung</surname>
            <given-names>Tiffany I</given-names>
          </name>
          <degrees>MD, MPH</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>JMIR Publications, Inc</institution>
            <addr-line>130 Queens Quay East</addr-line>
            <addr-line>Unit 1100</addr-line>
            <addr-line>Toronto, ON, M5A 0P6</addr-line>
            <country>Canada</country>
            <phone>1 416 583 2040</phone>
            <email>tiffany.leung@jmir.org</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6007-4023</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>de Azevedo Cardoso</surname>
            <given-names>Taiane</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1925-8709</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6106-0873</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
          <degrees>MD, MPH</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6479-5330</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>JMIR Publications, Inc</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Internal Medicine (adjunct)</institution>
        <institution>Southern Illinois University School of Medicine</institution>
        <addr-line>Springfield, IL</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>University of Victoria</institution>
        <addr-line>Victoria, BC</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Tiffany I Leung <email>tiffany.leung@jmir.org</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>31</day>
        <month>8</month>
        <year>2023</year>
      </pub-date>
      <volume>25</volume>
      <elocation-id>e51584</elocation-id>
      <history>
        <date date-type="received">
          <day>28</day>
          <month>8</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>28</day>
          <month>8</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Tiffany I Leung, Taiane de Azevedo Cardoso, Amaryllis Mavragani, Gunther Eysenbach. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 31.08.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2023/1/e51584" xlink:type="simple"/>
      <abstract>
        <p>The ethics of generative artificial intelligence (AI) use in scientific manuscript content creation has become a serious matter of concern in the scientific publishing community. Generative AI has computationally become capable of elaborating research questions; refining programming code; generating text in scientific language; and generating images, graphics, or figures. However, this technology should be used with caution. In this editorial, we outline the current state of editorial policies on generative AI or chatbot use in authorship, peer review, and editorial processing of scientific and scholarly manuscripts. Additionally, we provide JMIR Publications’ editorial policies on these issues. We further detail JMIR Publications’ approach to the applications of AI in the editorial process for manuscripts in review in a JMIR Publications journal.</p>
      </abstract>
      <kwd-group>
        <kwd>publishing</kwd>
        <kwd>open access publishing</kwd>
        <kwd>open science</kwd>
        <kwd>publication policy</kwd>
        <kwd>science editing</kwd>
        <kwd>scholarly publishing</kwd>
        <kwd>scientific publishing</kwd>
        <kwd>research</kwd>
        <kwd>scientific research</kwd>
        <kwd>editorial</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Technology tools are useful for making the scientific writing process more timely and effective. Many advances have been made in terms of the tools available to help conduct more sophisticated statistical analysis, manage references, and check grammar. Among these advances, large language model (LLMs) are neural networks trained on large corpora of textual information that can be fine-tuned to respond to natural language queries in a conversational fashion. In late 2022, OpenAI released ChatGPT, an artificial intelligence (AI) chatbot [<xref ref-type="bibr" rid="ref1">1</xref>] that uses an LLM, which has become enormously popular and a focal point for regulatory debate in a matter of months. Since then, countless LLMs have been developed and launched for research, commercial, and other applications.</p>
      <p>The ethics of generative AI use in scientific manuscript content creation has become a serious matter of concern in the scientific publishing community [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. More generally, there are already broader calls for the regulation of AI, and LLMs in particular, in general public use [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. This is because generative AI has computationally become capable of elaborating research questions; refining programming code; generating text in scientific language; and generating images, graphics, or figures. However, this technology should be used with caution. For instance, LLMs may produce errors and misleading information, especially when dealing with technical topics that they may have had limited data to train on. In the technical report released by OpenAI, it is acknowledged that Generative Pre-trained Transformer (GPT)–4 can produce biased and unreliable content [<xref ref-type="bibr" rid="ref6">6</xref>]. Such biased output can result from inherent biases in the data on which they were trained. A recent study published in the <italic>Journal of Medical Internet Research</italic> showed that ChatGPT was able to generate a highly convincing, fraudulent scientific manuscript article in approximately 1 hour [<xref ref-type="bibr" rid="ref7">7</xref>]. The authors used tools to detect AI-generated text (AI Detector and AI Text Classifier), and the results were inconclusive, indicating that these tools were unable to determine that the manuscript was generated by ChatGPT. Finally, the authors were able to detect mistakes in the generated article, specifically in the references, as ChatGPT generated fictitious citations. These findings reinforce the importance of having well-established regulations around the use of ChatGPT in the scientific field.</p>
      <p>For authors of academic manuscripts, key issues of concern include the need to fact-check AI-generated content of any form (including but not limited to textual information or graphics); assign accountability for AI-generated information; and disclose transparently the use of generative AI in producing any scholarly or scientific work, especially when it impacts the meaning and content of the information submitted for potential publication [<xref ref-type="bibr" rid="ref8">8</xref>]. For peer reviewers, additional issues pertain to the typical processing of manuscripts, wherein humans traditionally have generated peer review reports and issued editorial decisions on revising, rejecting, or accepting manuscripts. Currently, it is possible to prompt generative AI to facilitate these processes when given specific inputs and prompts as well. For editors, receiving AI-generated material in manuscripts (from authors) or in peer review reports (from peer reviewers) also warrant additional considerations.</p>
      <p>In this editorial, we outline the current state of editorial policies on generative AI or chatbot use in authorship, peer review, and editorial processing of scientific and scholarly manuscripts. Additionally, we provide JMIR Publications’ editorial policies on these issues, with the goal of ensuring the integrity of the science published and the publishing process. We further detail JMIR Publications’ approach to the applications of AI in the editorial process for manuscripts in review in a JMIR Publications journal.</p>
    </sec>
    <sec>
      <title>For Authors</title>
      <p>In scientific publishing, there is already historical precedent that the transparency of authorship is essential to the integrity of scientific publication [<xref ref-type="bibr" rid="ref9">9</xref>]. Regarding AI, general consensus already states that AI cannot be a listed coauthor on a manuscript because of the inability for the AI to be accountable for the content written [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref13">13</xref>]. The lack of accountability and ability to give consent to be published as a coauthor would be consistent with not listing an AI tool as a coauthor [<xref ref-type="bibr" rid="ref14">14</xref>]. According to Committee on Publication Ethics (COPE) guidance, “AI tools cannot meet the requirements for authorship as they cannot take responsibility for the submitted work. As non-legal entities, they cannot assert the presence or absence of conflicts of interest nor manage copyright and license agreements” [<xref ref-type="bibr" rid="ref2">2</xref>]. The World Associate of Medical Editors (WAME) states in their <italic>Recommendations on Chatbots and Generative Artificial Intelligence in Relation to Scholarly Publication</italic> that “Chatbots cannot be authors” [<xref ref-type="bibr" rid="ref11">11</xref>]. One examination of ChatGPT (the free version of GPT-3) against the Contributor Roles Taxonomy (CRediT) authorship criteria [<xref ref-type="bibr" rid="ref15">15</xref>] noted that the chatbot meets only 3 of 14 criteria for authorship [<xref ref-type="bibr" rid="ref16">16</xref>]. Unfortunately, before such widespread publisher policies and recommendations became the norm, some manuscripts and preprints have already been published that identified ChatGPT as a coauthor [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
      <p>At JMIR Publications, early guidance in our knowledge base of editorial policies explained that authors must appropriately include a description of the use of generative AI in the conduct or reporting of scientific work; otherwise, if this information is not a part of the study design (eg, in the Methods section of a manuscript), then providing acknowledgment of the use of generative AI in writing or creating text, figures, or other content for scientific publication is required [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. We welcome authors to submit relevant work to the flagship journal of JMIR Publications, the <italic>Journal of Medical Internet Research</italic>, which now has a section on generative language models (including ChatGPT), where it may be appropriate to submit work that uses such technology as a core component of the work (<xref ref-type="table" rid="table1">Table 1</xref>). If an author does not use AI to generate any portions of a submitted manuscript, it would be appropriate for the author also to provide a pertinent attestation in their cover letter on submission.</p>
      <p>Such acknowledgements must be fully transparent, precise, and complete throughout the submission, editorial, and production processes and will be disclosed upon the publication of a manuscript, if accepted for publication after the disclosure has been provided [<xref ref-type="bibr" rid="ref19">19</xref>]. In addition, we strongly recommend authors to supply their transcripts, including complete prompts and responses, in supplementary files (whether or not it is published) as exemplified in Eysenbach [<xref ref-type="bibr" rid="ref20">20</xref>], as this serves as additional information for the peer reviewers or editor to consider in their evaluation of the manuscript.</p>
      <p>Authors must also be cautious of the use of generative AI because of its predispositions to hallucination information and references [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. Because generative AI cannot be accountable for the outputs and possible hallucinations that they generate in response to a prompt, authors are accountable for fact- and reference-checking any references suggested by a generative AI tool. Authors must also be cautious of the potential for <italic>unintentional plagiarism</italic> (because the AI may not be able to properly source or cite literature) [<xref ref-type="bibr" rid="ref23">23</xref>] or overt <italic>AI plagiarism</italic> (the authors passing off or taking credit for the production of statements that were generated by AI). Either form of plagiarism is deemed not acceptable and would be examined carefully in accordance with COPE guidance [<xref ref-type="bibr" rid="ref24">24</xref>]. Authors may wish to adhere to the WAME recommendation that they “specify what they have done to mitigate the risk of plagiarism, provide a balanced view, and ensure the accuracy of all their references” [<xref ref-type="bibr" rid="ref11">11</xref>]. Furthermore, instances of suspected or potential scientific misconduct or violations of publication ethics principles, regardless of the involvement or use of generative AI, would be investigated in accordance with JMIR Publications policies, which adhere to COPE guidance.</p>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>Author’s responsibilities when using generative artificial intelligence (AI) in preparing a manuscript.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="180"/>
          <col width="820"/>
          <thead>
            <tr valign="top">
              <td>Guiding principle</td>
              <td>Author’s responsibilities</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Accountability</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Be accountable for the content of AI-generated comments submitted in the manuscript. For example, AI-generated statements should have accompanying citations where appropriate and be fact-checked for accuracy, and generated references should be checked to ensure that they have not been hallucinated.</p>
                  </list-item>
                </list>
                <list list-type="bullet">
                  <list-item>
                    <p>Do not list generative AI as a coauthor.</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Transparency</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>If generative AI was a part of the study design, include appropriate methodological detail in the Methods section of a manuscript. Describe how generative AI was used in the conduct of the scientific work in sufficient detail for a peer-reviewed publication.</p>
                  </list-item>
                </list>
                <list list-type="bullet">
                  <list-item>
                    <p>If generative AI was used to generate manuscript content, then state clearly in the Acknowledgments section how and where generative AI was used. This may include but is not limited to writing or creating text, figures, or other content for scientific publication. Disclose which generative AI tool was used by attesting to its use, such as stating, “I conducted this review with the assistance of [ProductName, Version, from CompanyName, Year].”</p>
                  </list-item>
                  <list-item>
                    <p>If no generative AI was used, state in the cover letter of the submission the following: “The author(s) attest that there was no use of generative artificial intelligence (AI) technology in the generation of text, figures, or other informational content of this manuscript.”</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Confidentiality</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Authors use generative AI at their own risk. Understanding the terms of use of any generative AI is recommended to understand how the content of prompts may be reused by the generative AI and the company that created it.</p>
                  </list-item>
                </list>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec>
      <title>For Peer Reviewers</title>
      <p>For peer reviewers, JMIR Publications adheres to expectations similar to that for authors: specifically, peer reviewers are accountable for the content of AI-generated comments submitted in a peer review. Consequently, peer reviewers are strongly advised to still ensure that the quality and content of the peer review meet the recommended standards described elsewhere in JMIR Publications policies [<xref ref-type="bibr" rid="ref25">25</xref>]. However, peer reviewers must remain cautious about the risks of such use, including but not limited to the perpetuation of bias and nonneutral language in AI use (eg, gender, racial, political, or other biases based on individual characteristics) [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>] and information leakage or breaches of confidentiality [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>] (<xref ref-type="table" rid="table2">Table 2</xref>). The latter point on the confidentiality of manuscript information warrants a more extended clarification: when authors agree to open peer review of their JMIR Publications manuscript (ie, on <italic>JMIR Preprints</italic> [<xref ref-type="bibr" rid="ref29">29</xref>]), information leakage is of lesser concern because authors have already consented to an open peer review process, and their manuscript is publicly viewable. JMIR Publications encourages open peer review [<xref ref-type="bibr" rid="ref30">30</xref>]. However, in some instances, authors wish to maintain a traditional, closed peer review process; in such cases, peer reviewers may risk information leakage by engaging generative AI in assisting them in the process of peer review report generation.</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Peer reviewer’s responsibilities when using generative artificial intelligence (AI) in peer review.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="180"/>
          <col width="820"/>
          <thead>
            <tr valign="top">
              <td>Guiding principle</td>
              <td>Peer reviewer’s responsibilities</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Accountability</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Be accountable for the content of AI-generated comments submitted in their peer review. The quality and content of the peer review meet the recommended standards in JMIR Publications policies [<xref ref-type="bibr" rid="ref31">31</xref>].</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Transparency</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Disclose which generative AI tool was used by attesting to its use at the end of a peer review report (in Comments to Authors), such as stating, “I conducted this review with the assistance of [ProductName, Version, from CompanyName, Year].”</p>
                  </list-item>
                  <list-item>
                    <p>Describe in detail how it was used in supporting peer review generation (in Confidential Comments to the Editor). Sufficient detail must be provided so that an editor has a clear and complete understanding of the role of AI in peer review report generation. The handling editor may request the peer reviewer to provide more detail, for example, the prompts used and the responses generated by AI.</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Confidentiality</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Carefully and thoroughly review the terms of use of any generative AI. If the peer reviewer’s relationship to the content (manuscript) does not adhere to the terms of use, or the peer reviewer doubts that the generative AI maintains the confidentiality of content, do not engage in its use for this task.</p>
                  </list-item>
                </list>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <p>In addition to accountability and confidentiality, transparency is essential to ensure the integrity of the peer review process. Agencies such as the US National Institutes of Health (NIH) have issued clear guidance that the use of AI in assisting a review with the grant peer review process is prohibited due to a breach of their confidentiality and nondisclosure agreements [<xref ref-type="bibr" rid="ref32">32</xref>]. Some publishers have opted to ban generative AI use or restrict use to in-house or licensed technologies [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. The WAME states that “peer reviewers should specify, to authors and each other, any use of chatbots in the evaluation of the manuscript and generation of reviews” [<xref ref-type="bibr" rid="ref11">11</xref>].</p>
      <p>At JMIR Publications, we adhere to this guidance of transparency and disclosure; we do not endorse a ban on generative AI in peer review, which can be counterproductive in various ways [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Peer reviewers are expected to disclose and describe their use of generative AI (<xref ref-type="table" rid="table2">Table 2</xref>). As JMIR Publications follows single-blind peer review with unblinding only upon publication, the publisher may include a comment (Editorial Notice) at their discretion, which would accompany the publication history of a manuscript regarding a peer reviewer’s disclosure of AI use during the peer review process. Here, we further elaborate on some of the detailed considerations a peer reviewer must account for when considering generative AI use to support their personal peer review process.</p>
      <p>Importantly, when peer reviewers use generative AI to support their peer review, they are accountable to ensuring the confidentiality of the peer review process. Detailed and careful review of the terms of use of any generative AI is strongly advised, if not required. Furthermore, if the peer reviewer has any doubts about potential information leakage after a careful review of the terms of use of a generative AI tool, then they should not engage in its use for this task. For example, in the free version of Open AI’s ChatGPT, their March 14, 2023, Terms of Use (<xref rid="figure1" ref-type="fig">Figure 1</xref> and <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) do not exclude the potential for secondary use or reuse of provided information (“Input”), although the use of their application programming interface (API) suggests that they would exclude the reuse of input: “We do not use Content that you provide to or receive from our API to develop or improve our Services. We may use Content from Services other than our API to help develop and improve our Services” [<xref ref-type="bibr" rid="ref36">36</xref>]. Because there is potential for the input to be reused, JMIR Publications would <italic>not</italic> permit the use of the free version of ChatGPT for assisting with peer review comment generation.</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>(A) Screenshot of 3(c) from OpenAI’s ChatGPT Terms of Use (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). (B) Screenshot of 6(a) from Anthropic’s Claude Terms of Service (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p>
        </caption>
        <graphic xlink:href="jmir_v25i1e51584_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>In another example, Anthropic’s Claude also has clearly stated language in their July 8, 2023, Terms of Service (<xref rid="figure1" ref-type="fig">Figure 1</xref> and <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>): “You represent and warrant that you have all rights, and have provided any notices and obtained any consents that are necessary for us to process any Prompts you submit to the Services in accordance with our Terms. You also represent and warrant that your submission of Prompts to us will not violate our Terms...including intellectual property laws and any privacy or data protection laws governing personal information contained in your Prompts” [<xref ref-type="bibr" rid="ref37">37</xref>]. Because peer reviewers do not have “all rights” or have not “obtained any consents” with regard to a manuscript they may review, JMIR Publications would <italic>not</italic> permit the use of the free version of Claude for assisting with peer review comment generation.</p>
      <p>Peer reviewers for JMIR Publications journals are advised to carefully review the content of the Peer Reviewer Hub for guidance [<xref ref-type="bibr" rid="ref25">25</xref>], including guidance on writing a high-quality peer review [<xref ref-type="bibr" rid="ref31">31</xref>]. Instances of suspected or potential peer review manipulation, fraud, scientific misconduct, or violations of publication ethics principles during the peer review process would be investigated in accordance with JMIR Publications policies, which adhere to COPE guidance.</p>
    </sec>
    <sec>
      <title>For Editors</title>
      <p>AI is already in use by some publishers, as an attempt to optimize the editorial workflow. For instance, some publishers have publicly available tools where the authors can add the title, keywords, and abstract of their manuscript, and the AI tool will list the journals that this work is more suitable for. This approach could be time-saving for both the editors and the authors.</p>
      <p>Similar to peer reviewers and authors, editors evaluating and issuing decisions about manuscripts are accountable for the content of their decisions and the final decision on the manuscript, whether it is accepted or rejected (<xref ref-type="table" rid="table3">Table 3</xref>). This includes whether the editor may choose to use generative AI to assist in the summarization of peer review reports or the generation of text for an editorial decision [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. The transparency and maintenance of confidentiality again remain essential, in precisely the same ways as noted for peer reviewers: the editor is accountable for ensuring the confidentiality of the peer review process where it is required (ie, when authors choose not to engage in open peer review).</p>
      <p>When editors evaluate peer reviews of a manuscript that they are assigned to, the editor should follow JMIR Publications policies in evaluating the quality, validity, relevance, and professional language use of a peer review. In a recommendation from the WAME, similar to peer reviewers, editors are also accountable for the generated content, the transparency of the disclosure of use, and maintaining confidentiality during the peer review process [<xref ref-type="bibr" rid="ref11">11</xref>]. Routinely, plagiarism is a serious concern in scientific publishing, and existing tools are able to identify writing that is plagiarized from existing published literature. AI plagiarism occurs when a person generates extensive material using AI and claims it as their own work [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Plagiarism detection tools now must encompass AI plagiarism as well [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. To avoid AI plagiarism, authors must disclose the use of generative AI as detailed above. Peer reviews may electively opt to use plagiarism detection tools when performing a peer review and would be required to adhere to appropriate disclosures as previously detailed. Editors (or the publisher) may use tools to detect whether a manuscript presents content written by generative AI, although all users of any AI plagiarism detection tools must again adhere to the principles of transparency and confidentiality. For example, although GPTZero may seem to be a promising option, there is a risk of information leakage or loss of confidentiality, based upon a review of its terms of use [<xref ref-type="bibr" rid="ref41">41</xref>] (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>). If an editor identifies issues with research integrity regarding any of the above guidance for authors or peer reviewers, then these would be investigated according to JMIR Publications policies.</p>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Editor’s responsibilities when using generative artificial intelligence (AI) in peer review.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="180"/>
          <col width="820"/>
          <thead>
            <tr valign="top">
              <td>Guiding principle</td>
              <td>Editor’s responsibilities</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Accountability</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Be accountable for the content of their decisions, including AI-generated content, and the final decision on the manuscript, whether it is accepted or rejected. Follow JMIR Publications policies in evaluating the quality, validity, relevance, and professional language use of a peer review.</p>
                  </list-item>
                  <list-item>
                    <p>Optionally request peer reviewers who have disclosed generative AI use to provide more detail, for example, the prompts used and the responses generated by AI.</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Transparency</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Disclose which generative AI tool was used by attesting to its use at the end of a decision, if necessary, such as stating, “I conducted this review with the assistance of [ProductName, Version, from CompanyName, Year].”</p>
                  </list-item>
                  <list-item>
                    <p>The publisher may include a comment (Editorial Notice) at their discretion, which would accompany the publication history of a manuscript regarding peer reviewers’ or handling editors’ disclosure of generative AI use during the peer review process.</p>
                  </list-item>
                </list>
              </td>
            </tr>
            <tr valign="top">
              <td>Confidentiality</td>
              <td>
                <list list-type="bullet">
                  <list-item>
                    <p>Carefully and thoroughly review the terms of use of any generative AI. If the editor’s relationship to the content (manuscript and peer reviews) does not adhere to the terms of use, or the editor doubts that the generative AI maintains the confidentiality of the content, do not engage in its use for this task.</p>
                  </list-item>
                </list>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec>
      <title>Closing Comments</title>
      <p>The accountability of parties using generative AI, transparency regarding complete disclosure, and the maintenance of confidentiality are fundamental in maintaining the integrity of the scientific record and are key components of JMIR Publications’ editorial policies. Because of the rapidly evolving nature of AI technologies, related policies, regulations [<xref ref-type="bibr" rid="ref42">42</xref>], investigations [<xref ref-type="bibr" rid="ref43">43</xref>], and best practices [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>], JMIR Publications looks forward to continuing to lead and evolve as an innovator in scientific publishing.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>OpenAI Terms of Use, updated March 14, 2023.</p>
        <media xlink:href="jmir_v25i1e51584_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 689 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Anthropic Terms of Service, version 3.0, updated July 8, 2023.</p>
        <media xlink:href="jmir_v25i1e51584_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 185 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>GPTZero Terms of Use, updated January 22, 2023.</p>
        <media xlink:href="jmir_v25i1e51584_app3.pdf" xlink:title="PDF File  (Adobe PDF File), 176 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">API</term>
          <def>
            <p>application programming interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ChatGPT</term>
          <def>
            <p>Chat Generative Pre-trained Transformer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">COPE</term>
          <def>
            <p>Committee on Publication Ethics</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">CRediT</term>
          <def>
            <p>Contributor Roles Taxonomy</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">GPT</term>
          <def>
            <p>Generative Pre-trained Transformer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">NIH</term>
          <def>
            <p>National Institutes of Health</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">WAME</term>
          <def>
            <p>World Associate of Medical Editors</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This manuscript was produced as a result of discussion among JMIR Publications staff and managers.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>TIL and TdAC contributed to writing the original draft. TIL, TdAC, AM, and GE contributed to conceptualization, writing, review, and editing of the manuscript. TIL contributed to project administration. GE contributed to supervision.</p>
      </fn>
      <fn fn-type="conflict">
        <p>TIL is the scientific editorial director at JMIR Publications. TdAC and AM are scientific editors at JMIR Publications. GE is the founder, chief executive officer, and executive editor of JMIR Publications, receives a salary and owns equity.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <source>OpenAI</source>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/">https://openai.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Landis</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Baskin</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Hadsell</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>English</surname>
              <given-names>M</given-names>
            </name>
            <collab>CSE Editorial Policy Committee</collab>
          </person-group>
          <article-title>CSE guidance on machine learning and artificial intelligence tools</article-title>
          <source>Science Editor</source>
          <year>2023</year>
          <month>5</month>
          <day>1</day>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.csescienceeditor.org/article/cse-guidance-on-machine-learning-and-artificial-intelligence-tools/">https://www.csescienceeditor.org/article/cse-guidance-on-machine-learning-and-artificial-intelligence-tools/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Vines</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>SSP conference debate: AI and the integrity of scholarly publishing</article-title>
          <source>The Scholarly Kitchen</source>
          <year>2023</year>
          <month>6</month>
          <day>27</day>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/yxy9w2ah">https://tinyurl.com/yxy9w2ah</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meskó</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>The imperative for regulatory oversight of large language models (or generative AI) in healthcare</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <month>07</month>
          <day>06</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>120</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00873-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00873-0</pub-id>
          <pub-id pub-id-type="medline">37414860</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00873-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10326069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <article-title>AI Act: a step closer to the first rules on artificial intelligence</article-title>
          <source>European Parliament</source>
          <year>2023</year>
          <month>5</month>
          <day>11</day>
          <access-date>2023-07-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a-step-closer-to-the-first-rules-on-artificial-intelligence">https://www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a-step-closer-to-the-first-rules-on-artificial-intelligence</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="web">
          <article-title>GPT-4 technical report</article-title>
          <source>OpenAI</source>
          <year>2023</year>
          <month>3</month>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cdn.openai.com/papers/gpt-4.pdf">https://cdn.openai.com/papers/gpt-4.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Májovský</surname>
              <given-names>Martin</given-names>
            </name>
            <name name-style="western">
              <surname>Černý</surname>
              <given-names>Martin</given-names>
            </name>
            <name name-style="western">
              <surname>Kasal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Komarc</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Netuka</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence can generate fraudulent but authentic-looking scientific medical articles: Pandora's box has been opened</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <month>05</month>
          <day>31</day>
          <volume>25</volume>
          <fpage>e46924</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e46924/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46924</pub-id>
          <pub-id pub-id-type="medline">37256685</pub-id>
          <pub-id pub-id-type="pii">v25i1e46924</pub-id>
          <pub-id pub-id-type="pmcid">PMC10267787</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosseini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rasmussen</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Resnik</surname>
              <given-names>DB</given-names>
            </name>
          </person-group>
          <article-title>Using AI to write scholarly publications</article-title>
          <source>Account Res</source>
          <year>2023</year>
          <month>01</month>
          <day>25</day>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1080/08989621.2023.2168535</pub-id>
          <pub-id pub-id-type="medline">36697395</pub-id>
          <pub-id pub-id-type="pmcid">PMC10366336</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McNutt</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Bradford</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Drazen</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Hanson</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Jamieson</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Kiermer</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Marcus</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Pope</surname>
              <given-names>BK</given-names>
            </name>
            <name name-style="western">
              <surname>Schekman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Swaminathan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stang</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Verma</surname>
              <given-names>IM</given-names>
            </name>
          </person-group>
          <article-title>Transparency in authors' contributions and responsibilities to promote integrity in scientific publication</article-title>
          <source>Proc Natl Acad Sci U S A</source>
          <year>2018</year>
          <month>03</month>
          <day>13</day>
          <volume>115</volume>
          <issue>11</issue>
          <fpage>2557</fpage>
          <lpage>2560</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.pnas.org/doi/abs/10.1073/pnas.1715374115?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1073/pnas.1715374115</pub-id>
          <pub-id pub-id-type="medline">29487213</pub-id>
          <pub-id pub-id-type="pii">1715374115</pub-id>
          <pub-id pub-id-type="pmcid">PMC5856527</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="web">
          <article-title>Authorship and AI tools</article-title>
          <source>COPE: Committee on Publication Ethics</source>
          <year>2023</year>
          <month>2</month>
          <day>13</day>
          <access-date>2023-07-03</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://publicationethics.org/cope-position-statements/ai-author">https://publicationethics.org/cope-position-statements/ai-author</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="web">
          <article-title>Chatbots, generative AI, and scholarly manuscripts</article-title>
          <source>WAME</source>
          <year>2023</year>
          <month>5</month>
          <day>31</day>
          <access-date>2023-07-03</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://wame.org/page3.php?id=106">https://wame.org/page3.php?id=106</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Flanagin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bibbins-Domingo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Berkwits</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Christiansen</surname>
              <given-names>SL</given-names>
            </name>
          </person-group>
          <article-title>Nonhuman "authors" and implications for the integrity of scientific publication and medical knowledge</article-title>
          <source>JAMA</source>
          <year>2023</year>
          <month>03</month>
          <day>28</day>
          <volume>329</volume>
          <issue>8</issue>
          <fpage>637</fpage>
          <lpage>639</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2023.1344</pub-id>
          <pub-id pub-id-type="medline">36719674</pub-id>
          <pub-id pub-id-type="pii">2801170</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Stokel-Walker</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT listed as author on research papers: many scientists disapprove</article-title>
          <source>Nature</source>
          <year>2023</year>
          <month>01</month>
          <volume>613</volume>
          <issue>7945</issue>
          <fpage>620</fpage>
          <lpage>621</lpage>
          <pub-id pub-id-type="doi">10.1038/d41586-023-00107-z</pub-id>
          <pub-id pub-id-type="medline">36653617</pub-id>
          <pub-id pub-id-type="pii">10.1038/d41586-023-00107-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosseini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Resnik</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The ethics of disclosing the use of artificial intelligence tools in writing scholarly manuscripts</article-title>
          <source>Research Ethics</source>
          <year>2023</year>
          <month>06</month>
          <day>15</day>
          <pub-id pub-id-type="doi">10.1177/17470161231180449</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <article-title>CRediT – Contributor Roles Taxonomy</article-title>
          <source>CRediT</source>
          <access-date>2023-07-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://credit.niso.org/">https://credit.niso.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Teixeira da Silva</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Tsigaris</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Human‐ and AI‐based authorship: principles and ethics</article-title>
          <source>Learn Publ</source>
          <year>2023</year>
          <month>06</month>
          <day>1</day>
          <volume>36</volume>
          <issue>3</issue>
          <fpage>453</fpage>
          <lpage>462</lpage>
          <pub-id pub-id-type="doi">10.1002/leap.1547</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Editorial Director</collab>
          </person-group>
          <article-title>Do you allow the the use of ChatGPT or other generative language models and how should this be reported? 2023</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/3t32zuvk">https://tinyurl.com/3t32zuvk</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Editorial Director</collab>
          </person-group>
          <article-title>Copyright, licensing, attribution of TOC images</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/articles/115001352708-Copyright-Licensing-Attribution-of-TOC-images">https://support.jmir.org/hc/en-us/articles/115001352708-Copyright-Licensing-Attribution-of-TOC-images</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>JMIR Copyediting Team</collab>
          </person-group>
          <article-title>How should the "Acknowledgments" section be formatted?</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/articles/360015982471-How-should-the-Acknowledgments-section-be-formatted-">https://support.jmir.org/hc/en-us/articles/360015982471-How-should-the-Acknowledgments-section-be-formatted-</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>The role of ChatGPT, generative language models, and artificial intelligence in medical education: a conversation with ChatGPT and a call for papers</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>03</month>
          <day>06</day>
          <volume>9</volume>
          <fpage>e46885</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e46885/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46885</pub-id>
          <pub-id pub-id-type="medline">36863937</pub-id>
          <pub-id pub-id-type="pii">v9i1e46885</pub-id>
          <pub-id pub-id-type="pmcid">PMC10028514</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ji</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Frieske</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ishii</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Bang</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Madotto</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fung</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Survey of hallucination in natural language generation</article-title>
          <source>ACM Comput Surv</source>
          <year>2023</year>
          <month>03</month>
          <day>03</day>
          <volume>55</volume>
          <issue>12</issue>
          <fpage>1</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1145/3571730</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Merz</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT just makes stuff up: a conversation on a controversial topic</article-title>
          <source>The Hastings Center</source>
          <year>2023</year>
          <month>4</month>
          <day>4</day>
          <access-date>2023-07-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.thehastingscenter.org/chatgpt-just-makes-stuff-up-a-conversation-on-a-controversial-topic/">https://www.thehastingscenter.org/chatgpt-just-makes-stuff-up-a-conversation-on-a-controversial-topic/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dada</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kleesiek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Egger</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT in healthcare: a taxonomy and systematic review</article-title>
          <source>medRxiv.</source>
          <comment>Preprint posted on online on March 30, 2023.</comment>
          <pub-id pub-id-type="doi">10.1101/2023.03.30.23287899</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>COPE Council</collab>
          </person-group>
          <article-title>COPE flowcharts and infographics — plagiarism in a published article — English</article-title>
          <source>Committee on Publication Ethics</source>
          <year>2006</year>
          <access-date>2023-08-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.24318/cope.2019.2.2">https://doi.org/10.24318/cope.2019.2.2</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="web">
          <article-title>Peer-review (FAQs for reviewers)</article-title>
          <source>JMIR Publications</source>
          <access-date>2023-08-04</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/sections/115000390167-Peer-Review-FAQs-for-Reviewers-">https://support.jmir.org/hc/en-us/sections/115000390167-Peer-Review-FAQs-for-Reviewers-</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Parsons</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Baglini</surname>
              <given-names>RB</given-names>
            </name>
          </person-group>
          <article-title>Peer review: the case for neutral language</article-title>
          <source>Trends Cogn Sci</source>
          <year>2021</year>
          <month>08</month>
          <volume>25</volume>
          <issue>8</issue>
          <fpage>639</fpage>
          <lpage>641</lpage>
          <pub-id pub-id-type="doi">10.1016/j.tics.2021.05.003</pub-id>
          <pub-id pub-id-type="medline">34090797</pub-id>
          <pub-id pub-id-type="pii">S1364-6613(21)00124-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosseini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Horbach</surname>
              <given-names>SPJM</given-names>
            </name>
          </person-group>
          <article-title>Fighting reviewer fatigue or amplifying bias? considerations and recommendations for use of ChatGPT and other large language models in scholarly peer review</article-title>
          <source>Res Integr Peer Rev</source>
          <year>2023</year>
          <month>05</month>
          <day>18</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>4</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://researchintegrityjournal.biomedcentral.com/articles/10.1186/s41073-023-00133-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s41073-023-00133-5</pub-id>
          <pub-id pub-id-type="medline">37198671</pub-id>
          <pub-id pub-id-type="pii">10.1186/s41073-023-00133-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC10191680</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Addington</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: cyber security threats and countermeasures</article-title>
          <source>SSRN Journal.</source>
          <comment>Preprint posted online on May 9, 2023.</comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.4425678</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Editorial Director</collab>
          </person-group>
          <article-title>What are JMIR Preprints?</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/articles/115001350367-What-are-JMIR-Preprints-">https://support.jmir.org/hc/en-us/articles/115001350367-What-are-JMIR-Preprints-</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Editorial Director</collab>
          </person-group>
          <article-title>What is open peer-review?</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/articles/115001908868-What-is-open-peer-review-">https://support.jmir.org/hc/en-us/articles/115001908868-What-is-open-peer-review-</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>JMIR Editorial Team</collab>
          </person-group>
          <article-title>(For reviewers) how to write a high-quality peer review</article-title>
          <source>JMIR Publications</source>
          <year>2023</year>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://support.jmir.org/hc/en-us/articles/16470162812827">https://support.jmir.org/hc/en-us/articles/16470162812827</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="web">
          <article-title>The use of generative artificial intelligence technologies is prohibited for the NIH peer review process</article-title>
          <source>National Institutes of Health</source>
          <year>2023</year>
          <month>6</month>
          <day>23</day>
          <access-date>2023-07-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://grants.nih.gov/grants/guide/notice-files/NOT-OD-23-149.html">https://grants.nih.gov/grants/guide/notice-files/NOT-OD-23-149.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <article-title>Publishing ethics</article-title>
          <source>Elsevier</source>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://beta.elsevier.com/about/policies-and-standards/publishing-ethics">https://beta.elsevier.com/about/policies-and-standards/publishing-ethics</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="web">
          <source>Taylor &#38; Francis Editor Resources</source>
          <year>2020</year>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/4dwu7p4s">https://tinyurl.com/4dwu7p4s</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Urbanowicz</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>PCN</given-names>
            </name>
            <name name-style="western">
              <surname>O'Connor</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bright</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Tatonetti</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Won</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez-Hernandez</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and large language models in academia: opportunities and challenges</article-title>
          <source>BioData Min</source>
          <year>2023</year>
          <month>07</month>
          <day>13</day>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>20</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://biodatamining.biomedcentral.com/articles/10.1186/s13040-023-00339-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13040-023-00339-9</pub-id>
          <pub-id pub-id-type="medline">37443040</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13040-023-00339-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC10339472</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <article-title>Terms of use</article-title>
          <source>OpenAI</source>
          <year>2023</year>
          <month>3</month>
          <day>14</day>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/policies/terms-of-use">https://openai.com/policies/terms-of-use</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>Terms of service</article-title>
          <source>Antropic Console</source>
          <year>2023</year>
          <month>7</month>
          <day>8</day>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://console.anthropic.com/legal/terms">https://console.anthropic.com/legal/terms</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abd-Alrazaq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>AlSaad</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alhuwail</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Healy</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Latifi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aziz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Damseh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alabed Alrazak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Large language models in medical education: opportunities, challenges, and future directions</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>9</volume>
          <fpage>e48291</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e48291/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48291</pub-id>
          <pub-id pub-id-type="medline">37261894</pub-id>
          <pub-id pub-id-type="pii">v9i1e48291</pub-id>
          <pub-id pub-id-type="pmcid">PMC10273039</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sallam</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT utility in healthcare education, research, and practice: systematic review on the promising perspectives and valid concerns</article-title>
          <source>Healthcare (Basel)</source>
          <year>2023</year>
          <month>03</month>
          <day>19</day>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>887</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=healthcare11060887"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/healthcare11060887</pub-id>
          <pub-id pub-id-type="medline">36981544</pub-id>
          <pub-id pub-id-type="pii">healthcare11060887</pub-id>
          <pub-id pub-id-type="pmcid">PMC10048148</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>FM</given-names>
            </name>
            <name name-style="western">
              <surname>Markov</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Dyer</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Ramesh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Pearson</surname>
              <given-names>AT</given-names>
            </name>
          </person-group>
          <article-title>Comparing scientific abstracts generated by ChatGPT to real abstracts with detectors and blinded human reviewers</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <month>04</month>
          <day>26</day>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>75</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00819-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00819-6</pub-id>
          <pub-id pub-id-type="medline">37100871</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00819-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC10133283</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <source>GPTZero</source>
          <access-date>2023-07-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://gptzero.me/">https://gptzero.me/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <article-title>EU AI Act: first regulation on artificial intelligence</article-title>
          <source>European Parliament</source>
          <year>2023</year>
          <month>6</month>
          <day>8</day>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.europarl.europa.eu/news/en/headlines/society/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence">https://www.europarl.europa.eu/news/en/headlines/society/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zakrzewski</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>FTC investigates OpenAI over data leak and ChatGPT's inaccuracy</article-title>
          <source>The Washington Post</source>
          <year>2023</year>
          <month>7</month>
          <day>13</day>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.washingtonpost.com/technology/2023/07/13/ftc-openai-chatgpt-sam-altman-lina-khan/">https://www.washingtonpost.com/technology/2023/07/13/ftc-openai-chatgpt-sam-altman-lina-khan/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coiera</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Verspoor</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>We need to chat about artificial intelligence</article-title>
          <source>Med J Aust</source>
          <year>2023</year>
          <month>08</month>
          <day>07</day>
          <volume>219</volume>
          <issue>3</issue>
          <fpage>98</fpage>
          <lpage>100</lpage>
          <pub-id pub-id-type="doi">10.5694/mja2.51992</pub-id>
          <pub-id pub-id-type="medline">37302124</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hira</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>NAM leadership consortium collaborates with leading health, tech, research, and bioethics organizations to develop health care AI code of conduct</article-title>
          <source>National Academy of Medicine</source>
          <year>2023</year>
          <month>6</month>
          <day>20</day>
          <access-date>2023-07-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/3c6hy4rh">https://tinyurl.com/3c6hy4rh</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
