<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "http://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd">
<article article-type="research-article" dtd-version="1.3" xml:lang="en" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<processing-meta>
<custom-meta-group content-type="composition">
<custom-meta specific-use="newgen" xlink:href="https://www.newgen.co/">
<meta-name>Composition Vendor</meta-name>
<meta-value>Newgen KnowledgeWorks (P) Ltd.</meta-value>
</custom-meta>
</custom-meta-group>
</processing-meta>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">PLoS One</journal-id>
<journal-id journal-id-type="publisher-id">plos</journal-id>
<journal-id journal-id-type="pmc">plosone</journal-id>
<journal-title-group>
<journal-title>PLOS One</journal-title>
</journal-title-group>
<issn pub-type="epub">1932-6203</issn>
<publisher>
<publisher-name>Public Library of Science</publisher-name>
<publisher-loc>San Francisco, CA USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.1371/journal.pone.0331368</article-id>
<article-id pub-id-type="publisher-id">PONE-D-25-02363</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Research Article</subject>
</subj-group>
<subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Radiology and imaging</subject></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>People and places</subject><subj-group><subject>Population groupings</subject><subj-group><subject>Professions</subject><subj-group><subject>Medical personnel</subject><subj-group><subject>Radiologists</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Health care</subject><subj-group><subject>Health education and awareness</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Health care</subject><subj-group><subject>Patients</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Diagnostic medicine</subject><subj-group><subject>Diagnostic radiology</subject><subj-group><subject>Magnetic resonance imaging</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Imaging techniques</subject><subj-group><subject>Diagnostic radiology</subject><subj-group><subject>Magnetic resonance imaging</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Radiology and imaging</subject><subj-group><subject>Diagnostic radiology</subject><subj-group><subject>Magnetic resonance imaging</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Computer and information sciences</subject><subj-group><subject>Information technology</subject><subj-group><subject>Natural language processing</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cognitive science</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Language</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Language</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Language</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Health care</subject><subj-group><subject>Health care providers</subject><subj-group><subject>Physicians</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>People and places</subject><subj-group><subject>Population groupings</subject><subj-group><subject>Professions</subject><subj-group><subject>Medical personnel</subject><subj-group><subject>Physicians</subject></subj-group></subj-group></subj-group></subj-group></subj-group></article-categories>
<title-group>
<article-title>Development, optimization, and preliminary evaluation of a novel artificial intelligence tool to promote patient health literacy in radiology reports: The Rads-Lit tool</article-title>
<alt-title alt-title-type="running-head">Development of Rads-Lit, an artificial-intelligence radiology health literacy tool</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes" xlink:type="simple">
<contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-2692-8195</contrib-id>
<name name-style="western">
<surname>Doshi</surname>
<given-names>Rushabh H.</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role content-type="http://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
<xref ref-type="aff" rid="aff001"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor001">*</xref>
</contrib>
<contrib contrib-type="author" corresp="yes" xlink:type="simple">
<name name-style="western">
<surname>Amin</surname>
<given-names>Kanhai</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
<xref ref-type="aff" rid="aff002"><sup>2</sup></xref>
<xref ref-type="fn" rid="econtrib001"><sup>☯</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Chan</surname>
<given-names>Shin Mei</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<xref ref-type="aff" rid="aff003"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Kaur</surname>
<given-names>Manroop</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<xref ref-type="aff" rid="aff004"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Bajaj</surname>
<given-names>Simar S.</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/writing-review-editing/">Writing – review &amp; editing</role>
<xref ref-type="aff" rid="aff005"><sup>5</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<contrib-id authenticated="true" contrib-id-type="orcid">https://orcid.org/0000-0002-6966-6084</contrib-id>
<name name-style="western">
<surname>Khosla</surname>
<given-names>Pavan</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/software/">Software</role>
<xref ref-type="aff" rid="aff001"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Kothari</surname>
<given-names>Veer T.</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role content-type="http://credit.niso.org/contributor-roles/software/">Software</role>
<xref ref-type="aff" rid="aff001"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Mozayan</surname>
<given-names>Ali</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<xref ref-type="aff" rid="aff004"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Tocino</surname>
<given-names>Irena</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<xref ref-type="aff" rid="aff004"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author" xlink:type="simple">
<name name-style="western">
<surname>Chheang</surname>
<given-names>Sophie</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role content-type="http://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<xref ref-type="aff" rid="aff004"><sup>4</sup></xref>
</contrib>
</contrib-group>
<aff id="aff001"><label>1</label> <addr-line>Yale School of Medicine, New Haven, Connecticut, United States of America</addr-line></aff>
<aff id="aff002"><label>2</label> <addr-line>Yale College, New Haven, Connecticut, United States of America</addr-line></aff>
<aff id="aff003"><label>3</label> <addr-line>UCSF Department of Radiology &amp; Biomedical Imaging, San Francisco, California, United States of America</addr-line></aff>
<aff id="aff004"><label>4</label> <addr-line>Department of Radiology and Biomedical Imaging, Yale School of Medicine, New Haven, Connecticut, United States of America</addr-line></aff>
<aff id="aff005"><label>5</label> <addr-line>Harvard College, Cambridge, Massachusetts, United States of America</addr-line></aff>
<contrib-group>
<contrib contrib-type="editor" xlink:type="simple">
<name name-style="western">
<surname>Mubuuke</surname>
<given-names>Aloysius Gonzaga</given-names>
</name>
<role>Editor</role>
<xref ref-type="aff" rid="edit1"/></contrib>
</contrib-group>
<aff id="edit1"><addr-line>Makere University College of Health Sciences, UGANDA</addr-line></aff>
<author-notes>
<fn fn-type="conflict" id="coi001">
<p>NO authors have competing interests.</p>
</fn>
<fn fn-type="other" id="econtrib001">
<p>☯ equal contribution, co-first authors</p>
</fn>
<corresp id="cor001">* E-mail: <email xlink:type="simple">r.doshi@yale.edu</email></corresp>
</author-notes>
<pub-date pub-type="epub"><day>3</day><month>9</month><year>2025</year></pub-date>
<pub-date pub-type="collection"><year>2025</year></pub-date>
<volume>20</volume>
<issue>9</issue>
<elocation-id>e0331368</elocation-id>
<history>
<date date-type="received"><day>15</day><month>1</month><year>2025</year></date>
<date date-type="accepted"><day>13</day><month>8</month><year>2025</year></date>
</history>
<permissions>
<copyright-year>2025</copyright-year>
<copyright-holder>Doshi et al</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">
<license-p>This is an open access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">Creative Commons Attribution License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.</license-p></license>
</permissions>
<self-uri content-type="pdf" xlink:href="info:doi/10.1371/journal.pone.0331368">
</self-uri>
<abstract>
<p>Radiology reports are an integral part of patient medical records; however, these reports often contain complex medical terminology that are difficult for patients to comprehend, potentially leading to anxiety, misunderstanding, and misinterpretation. The development of user-friendly instruments to improve understanding is thus critically important to enhance health literacy and empower patients. In this study, we introduce a novel artificial intelligence (AI) interface, the Rads-Lit Tool, which can simplify radiology reports for patients using natural language processing (NLP) techniques. This manuscript presents the development process, methodology, and results of the Rads-Lit Tool, demonstrating its potential to simplify radiology reports across various examination types and complexity levels. Our findings highlight that patient-facing AI-driven tools can enhance patient health literacy and foster improved patient-provider communication in radiology.</p>
</abstract>
<funding-group>
<funding-statement>The author(s) received no specific funding for this work.</funding-statement>
</funding-group>
<counts>
<fig-count count="4"/>
<table-count count="2"/>
<page-count count="14"/>
</counts>
<custom-meta-group>
<custom-meta id="data-availability">
<meta-name>Data Availability</meta-name>
<meta-value>All relevant data are within the manuscript and the figures (in the main text and supplementary files).</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="sec001" sec-type="intro">
<title>1. Introduction</title>
<p>A cornerstone of modern medicine, imaging has revolutionized diagnosis, treatment planning, and disease monitoring with radiology reports an indispensable component of patient medical records. Traditionally, these reports have been accessible only to radiologists and the referring providers, who then interpret findings for patients [<xref ref-type="bibr" rid="pone.0331368.ref001">1</xref>]. However, the rise of electronic portals has led to an increasing number of patients directly accessing their medical information, with the 21st Century Cures Act mandating access to all parts of the electronic health record (EHR) [<xref ref-type="bibr" rid="pone.0331368.ref002">2</xref>]. While this shift empowers patients to play a more active role in their care, the complexity of medical jargon and acronyms in radiology reports often leads to significant confusion, anxiety, and potential misinterpretation [<xref ref-type="bibr" rid="pone.0331368.ref003">3</xref>,<xref ref-type="bibr" rid="pone.0331368.ref004">4</xref>]. Simply looking up individual terms often fails to provide a cohesive understanding of the report’s narrative and its implications. This underscores a critical need for tools that can translate the entire report findings into plain language, thereby enhancing patient comprehension and facilitating more meaningful patient-provider communication Moreover, advanced imaging modalities continue to evolve rapidly, reinforcing the need for clear, transparent patient-facing reports.</p>
<p>Correspondingly, there has been growing attention toward health literacy and its associations with patient engagement, treatment adherence, and care disparities [<xref ref-type="bibr" rid="pone.0331368.ref005">5</xref>]. Advanced natural language processing (NLP), in turn, has emerged as one promising tool to help bridge the gap between complex medical information and ease of understanding [<xref ref-type="bibr" rid="pone.0331368.ref006">6</xref>–<xref ref-type="bibr" rid="pone.0331368.ref008">8</xref>]. In radiology in particular, NLP techniques have been used in everything from observation detection for diagnostic surveillance to quality assessment to clinical support services [<xref ref-type="bibr" rid="pone.0331368.ref009">9</xref>,<xref ref-type="bibr" rid="pone.0331368.ref010">10</xref>]. A few studies have sought to use NLP to simplify radiology reports, namely through linking terms to the consumer health vocabulary system [<xref ref-type="bibr" rid="pone.0331368.ref011">11</xref>] and the French lexical network [<xref ref-type="bibr" rid="pone.0331368.ref012">12</xref>].</p>
<p>With the rise of large language models (LLMs), OpenAI’s ChatGPT, Google Bard, and Microsoft Bing have also been explored for simplifying radiology reports [<xref ref-type="bibr" rid="pone.0331368.ref013">13</xref>,<xref ref-type="bibr" rid="pone.0331368.ref014">14</xref>]. The main advantage of these tools is their accessibility and comprehensiveness — freely available to anyone with an internet connection and able to simplify an entire radiology report, rather than simply appending a summary, linking to a glossary, or altering the structure. However, there are limitations. While general-purpose LLMs like ChatGPT are accessible, their effectiveness for specialized tasks like simplifying radiology reports heavily depends on user-crafted prompts, which can lead to variable quality and reliability [<xref ref-type="bibr" rid="pone.0331368.ref015">15</xref>,<xref ref-type="bibr" rid="pone.0331368.ref016">16</xref>]. Patients may not possess the expertise to formulate optimal prompts. Rads-Lit addresses this by embedding a systematically optimized prompt within a user-friendly interface, specifically designed for radiology reports. This aims to provide more consistent, reliable, and appropriately simplified outputs compared to ad-hoc use of general LLMs, thereby offering a more dependable solution for enhancing patient health literacy in this domain.</p>
<p>We have used a variety of prompts to assess these LLMs for accuracy and fidelity in simplifying radiology reports [<xref ref-type="bibr" rid="pone.0331368.ref015">15</xref>,<xref ref-type="bibr" rid="pone.0331368.ref016">16</xref>]; however, we have this data only for specific prompts and remain unsure if the LLMs would be inaccurate or inadequate if asked different prompts. Given the infinite possibility of prompts and the corresponding variable quality of responses, patients may not be able to take full advantage of these chatbots to improve their own health literacy. While general LLMs show promise, their direct application by patients for simplifying complex medical texts like radiology reports is fraught with challenges, including prompt variability and inconsistent output quality [<xref ref-type="bibr" rid="pone.0331368.ref013">13</xref>–<xref ref-type="bibr" rid="pone.0331368.ref016">16</xref>]. This highlights a critical gap: the need for specialized, optimized tools that can reliably simplify radiology reports to an appropriate health literacy level while maintaining clinical accuracy. Therefore, this study aimed to address the following research questions:</p>
<p>1.) Can a systematic prompt engineering process identify an optimal LLM prompt to simplify radiology reports to a target 5th-7th grade reading level?</p>
<p>2.) How does a specialized tool (Rads-Lit), utilizing an optimized prompt, perform in terms of readability improvement across various imaging modalities compared to original reports and a basic simplification prompt?</p>
<p>3.) What is the accuracy, completeness, and perceived safety (by radiologists) of the simplified reports generated by such a tool?</p>
<p>We present the development, optimization, and preliminary evaluation of the Rads-Lit Tool, an AI interface designed to address these questions. This work includes a novel methodology for LLM prompt assessment for health literacy, aiming to empower patients with understandable medical information, thereby fostering improved patient-provider communication and informed decision-making.“</p>
</sec>
<sec id="sec002" sec-type="materials|methods">
<title>2. Methods</title>
<sec id="sec003">
<title>2.1. Development of interface</title>
<p>We developed a proof-of-concept user interface for patients to input their radiology findings and receive a simplified version of their findings (<ext-link ext-link-type="uri" xlink:href="http://radiologyliteracy.org/" xlink:type="simple">http://radiologyliteracy.org/</ext-link>), utilizing OpenAI’s Davinci application programming interface (API). We have undergone an optimization process, as detailed below, to simplify patients’ imaging report at a reading level recommended by the American Medical Association and National Institutes of Health while maintaining accuracy [<xref ref-type="bibr" rid="pone.0331368.ref017">17</xref>,<xref ref-type="bibr" rid="pone.0331368.ref018">18</xref>].</p>
</sec>
<sec id="sec004">
<title>2.2. Dataset selection and modification</title>
<p>We sourced a random selection of 750 radiology reports across diverse examination types (150 MRI, CT, US [ultrasound], X-ray, and Mammogram each) from the MIMIC-III database, a comprehensive dataset available from Beth Israel Deaconess Medical Center [<xref ref-type="bibr" rid="pone.0331368.ref019">19</xref>,<xref ref-type="bibr" rid="pone.0331368.ref020">20</xref>]. (13, 19, 20, 25) A random sub-selection of 25 reports was initially chosen to test our prompts. Redacted physician names in the reports were changed to “Dr. Smith” and redacted dates were changed to “prior.” As this study used only de-identified, publicly available data, our specific analysis was deemed exempt from further institutional IRB review given our institution’s IRB involves analysis of publicly available de-identified data. Given the de-identified nature of the data, patient consent for this specific retrospective study was waived.</p>
</sec>
<sec id="sec005">
<title>2.3. Readability scores</title>
<p>To assess readability, we used the validated Gunning Fog (GF), Flesch-Kincaid Grade Level (FK), Automated Readability Index (ARI), and Coleman-Liau (CL) indices, as previous studies have done [<xref ref-type="bibr" rid="pone.0331368.ref016">16</xref>,<xref ref-type="bibr" rid="pone.0331368.ref021">21</xref>–<xref ref-type="bibr" rid="pone.0331368.ref023">23</xref>]. Each of these reading indices output a value related to a reading grade level (RGL) (i.e., output of 7 represents the 7<sup>th</sup> grade reading level.) Further, in line with previous studies, we averaged the scores across all 4 indices to get an average RGL, aRGL [<xref ref-type="bibr" rid="pone.0331368.ref024">24</xref>].</p>
</sec>
<sec id="sec006">
<title>2.4. Prompt engineering and optimization</title>
<p>Our prompt engineering involved a multi-stage iterative process (<xref ref-type="fig" rid="pone.0331368.g001">Fig 1</xref>). Stage 1 began with identifying five core simplifying stems (‘simplify’, ‘explain’, etc.) via a Delphi technique, tested with two modifiers for 15 initial prompts. Stage 2 focused on prompts that achieved below the median Stage 1 aRGL, adding grade level specifications and contextual phrases (e.g., ‘so I can understand’), resulting in 56 further prompts. Stage 3 took the top four stem/grade combinations and added two persona-based contexts (‘I am a patient,’ ‘you are a health literacy tool’) for 8 additional prompts. The full list of 79 prompts is detailed in <xref ref-type="supplementary-material" rid="pone.0331368.s001">S1 Fig</xref>. The five best-performing prompts (lowest median aRGLs) from these stages were then extensively tested on 750 reports, alongside a basic ‘simplify’ prompt, to select the final prompt for the Rads-Lit tool, again using a Delphi method for the final choice based on readability, fidelity and accuracy.</p>
<fig id="pone.0331368.g001" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.g001</object-id><label>Fig 1</label><caption><title>Prompt optimization process.</title></caption>
<graphic mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.g001" xlink:type="simple"/></fig>
</sec>
<sec id="sec007">
<title>2.5. Accuracy, completeness, and comprehension of Rads-lit tool</title>
<p>After selecting the best-performing prompt, three radiologists—2 attendings and 1 resident—assessed 62 of these reports and the corresponding simplified. Specifically, these radiologists evaluated the output for accuracy, completeness, and extraneous information using single-item Likert scales, ranging from 1 (Strongly Disagree, 0–20% agreement) to 5 (Strongly Agree, 80–100% agreement).</p>
</sec>
<sec id="sec008">
<title>2.6. Statistical analysis</title>
<p>The non-parametric Wilcoxon signed-rank and rank-sum tests were used for statistical analysis.</p>
</sec>
</sec>
<sec id="sec009" sec-type="results">
<title>3. Results</title>
<p>The prompt optimization process is summarized in <xref ref-type="fig" rid="pone.0331368.g001">Fig 1</xref> and <xref ref-type="table" rid="pone.0331368.t001">Table 1</xref>.</p>
<table-wrap id="pone.0331368.t001" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.t001</object-id><label>Table 1</label><caption><title>Prompt optimization process and corresponding average readability grade level (aRGL) scores.</title></caption>
<alternatives><graphic id="pone.0331368.t001g" mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.t001" xlink:type="simple"/><table><colgroup>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
</colgroup>
<thead>
<tr>
<th align="left">Stage</th>
<th align="left">Prompt</th>
<th align="left">Round 1</th>
<th align="left">Average Reading Grade Level</th>
<th align="left">Round 2</th>
<th align="left">Average Reading Grade Level</th>
<th align="left">Round 3</th>
<th align="left">Average Reading Grade Level</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">1</td>
<td align="left">Explain. ----</td>
<td align="left">X</td>
<td align="left">12.3277</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Simplify. ----</td>
<td align="left"/>
<td align="left">17.0265</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify. ----</td>
<td align="left"/>
<td align="left">14.0026</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe. ----</td>
<td align="left">X</td>
<td align="left">13.3636</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret. ----</td>
<td align="left"/>
<td align="left">14.2223</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this. ----</td>
<td align="left"/>
<td align="left">15.5284</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Simplify this. ----</td>
<td align="left"/>
<td align="left">15.4535</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this. ----</td>
<td align="left">X</td>
<td align="left">11.8043</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this. ----</td>
<td align="left">X</td>
<td align="left">13.0073</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this. ----</td>
<td align="left"/>
<td align="left">14.1201</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding. ----</td>
<td align="left">X</td>
<td align="left">13.6338</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Simplify this radiology finding. ----</td>
<td align="left"/>
<td align="left">17.875</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding. ----</td>
<td align="left">X</td>
<td align="left">13.6909</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this radiology finding. ----</td>
<td align="left"/>
<td align="left">14.3011</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding. ----</td>
<td align="left">X</td>
<td align="left">13.9849</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left">2</td>
<td align="left">Explain this radiology finding so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.9838</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.7333</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.1195</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.0755</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.6691</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.0847</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.3682</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain this radiology finding at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.323</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">14.8914</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.3825</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.5219</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.2292</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.4175</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.379</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.264</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this radiology finding at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.2593</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">14.3113</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.6786</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.0223</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">7.9129</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.2156</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.4567</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">7.5413</td>
<td align="left">X</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Interpret this radiology finding at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">6.805</td>
<td align="left">X</td>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.1571</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.7876</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.3177</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.2255</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.396</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.369</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.1338</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Explain at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.124</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">15.0518</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">13.3335</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.7826</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.0025</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.0836</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.4504</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.8969</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.6372</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">11.8043</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.4277</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.425</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.6958</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.7717</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.481</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.8041</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Clarify this at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.1064</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this so I can understand. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">14.308</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this in layman terms. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.1574</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 9th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.4075</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 8th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.8597</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 7th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.0875</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 6th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.2578</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">8.4589</td>
<td align="left"/>
<td align="left"/>
</tr>
<tr>
<td align="left"/>
<td align="left">Describe this at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">7.3842</td>
<td align="left">X</td>
<td align="left"/>
</tr>
<tr>
<td align="left">3</td>
<td align="left">I am a patient. Interpret this radiology finding at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.7724</td>
</tr>
<tr>
<td align="left"/>
<td align="left">You are a health literacy tool. Interpret this radiology finding at the 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">7.3258</td>
</tr>
<tr>
<td align="left"/>
<td align="left">I am a patient. Interpret this radiology finding at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">8.8212</td>
</tr>
<tr>
<td align="left"/>
<td align="left">You are a health literacy tool. Interpret this radiology finding at the 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">X</td>
<td align="left">7.7558</td>
</tr>
<tr>
<td align="left"/>
<td align="left">I am a patient. Describe this at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">12.3456</td>
</tr>
<tr>
<td align="left"/>
<td align="left">You are a health literacy tool. Describe this at a 5th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">10.3601</td>
</tr>
<tr>
<td align="left"/>
<td align="left">I am a patient. Describe this at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.423</td>
</tr>
<tr>
<td align="left"/>
<td align="left">You are a health literacy tool. Describe this at a 4th grade level. ----</td>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left"/>
<td align="left">9.5407</td>
</tr>
</tbody>
</table>
</alternatives></table-wrap>
<p>After determining the 5 best-performing prompts in the pilot analysis across Stages 1–3 (<xref ref-type="table" rid="pone.0331368.t001">Table 1</xref>), we re-calculated the average readability scores of these prompts across 750 radiology reports (<xref ref-type="table" rid="pone.0331368.t002">Table 2</xref>). These prompts, namely Prompts A-E, demonstrated significantly improved readability scores compared to the original radiology report and the common prompt, “Simplify:” across all four readability indexes tested (p &lt; 0.0001) (<xref ref-type="table" rid="pone.0331368.t001">Table 1</xref>, <xref ref-type="supplementary-material" rid="pone.0331368.s002">S2 Fig</xref>). The final five prompts had comparable, reading score differences, with aRGLs of 6.1 to 6.3, and were not statistically significant.</p>
<table-wrap id="pone.0331368.t002" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.t002</object-id><label>Table 2</label><caption><title>Average readability grade level (aRGL) scores of the original radiology report, and outputs from the basic prompt “simplify” and the 5 best prompts. <italic><italic>Average represents aRGL. Medians and Quartile 1 – Quartile 3 are depicted for each prompt tested on 255 radiology reports. Prompt A-E are significantly lower than both the Report and Simplify for all scales, p &lt; 0.0001. Simplify is significantly lower than the Report for all modalities, p &lt; 0.0001.</italic></italic></title></caption>
<alternatives><graphic id="pone.0331368.t002g" mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.t002" xlink:type="simple"/><table><colgroup>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
<col align="left" valign="middle"/>
</colgroup>
<thead>
<tr>
<th align="left"/>
<th align="left">Overall</th>
<th align="left">CT</th>
<th align="left">Mammogram</th>
<th align="left">MRI</th>
<th align="left">Ultrasound</th>
<th align="left">X-ray</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Raw Report</td>
<td align="left">13.7 (11.9-15.7)</td>
<td align="left">13.9 (12.0-15.9)</td>
<td align="left">13.2 (11.7-15.5)</td>
<td align="left">13.7 (11.7-15.5)</td>
<td align="left">13.6 (11.2-16.4)</td>
<td align="left">14.1 (12.1-15.8)</td>
</tr>
<tr>
<td align="left">Simplify</td>
<td align="left">11.3 (9.5-13.4)</td>
<td align="left">11.3 (9.4-14.3)</td>
<td align="left">10.8 (9.2-12.2)</td>
<td align="left">11.8 (10.1-13.0)</td>
<td align="left">11.3 (9.3-13.5)</td>
<td align="left">11.8 (9.8-14.2)</td>
</tr>
<tr>
<td align="left">Prompt A</td>
<td align="left">6.3 (5.2-7.4)</td>
<td align="left">6.3 (5.3-7.6)</td>
<td align="left">6.4 (5.4-7.5)</td>
<td align="left">6.3 (5.4-7.2)</td>
<td align="left">6.0 (4.6-7.6)</td>
<td align="left">6.2 (5.0-7.3)</td>
</tr>
<tr>
<td align="left">Prompt B</td>
<td align="left">6.1 (4.9-7.3)</td>
<td align="left">6.2 (4.9-7.2)</td>
<td align="left">6.4 (5.0-7.6)</td>
<td align="left">6.1 (5.1-7.1)</td>
<td align="left">5.9 (4.5-7.2)</td>
<td align="left">6.0 (4.7-7.5)</td>
</tr>
<tr>
<td align="left">Prompt C</td>
<td align="left">6.0 (4.8-7.1)</td>
<td align="left">6.0 (4.7-7.1)</td>
<td align="left">6.2 (5.1-7.5)</td>
<td align="left">5.9 (4.9-6.8)</td>
<td align="left">5.6 (4.4-6.9)</td>
<td align="left">6.2 (4.8-7.8)</td>
</tr>
<tr>
<td align="left">Prompt D</td>
<td align="left">6.2 (5.2-7.5)</td>
<td align="left">6.3 (5.5-7.4)</td>
<td align="left">5.9 (5.2-7.1)</td>
<td align="left">6.4 (5.6-7.7)</td>
<td align="left">6.2 (4.9-7.6)</td>
<td align="left">6.3 (4.7-7.4)</td>
</tr>
<tr>
<td align="left">Prompt E</td>
<td align="left">6.1 (4.9-7.3)</td>
<td align="left">6.1 (5.1-7.3)</td>
<td align="left">6.3 (5.0-7.2)</td>
<td align="left">6.2 (5.2-7.3)</td>
<td align="left">6.0 (4.7-7.4)</td>
<td align="left">5.8 (4.6-7.1)</td>
</tr>
</tbody>
</table>
</alternatives></table-wrap>
<p>Given that were no statistical differences between the five prompts, we utilized the Delphi method to decide our prompt. 2 reviewers blindly assessed 5 outputs from each of the 5 prompt on fidelity and accuracy and chose prompt D, “You are a health literacy tool. Interpret this radiology finding at the 5<sup>th</sup> grade level:”. Despite not having the lowest readability score, we chose this prompt because it best retained fidelity while clearly defined the role of the LLM and provided a goal grade level (<xref ref-type="table" rid="pone.0331368.t002">Table 2</xref>).</p>
<p>In simplifying 750 radiology reports, our selected prompt for our tool (utilizing Prompt D) shows that all four readability indices tested showed statistically significant improvement in readability scores of findings compared to raw radiologist reports (N = 750, p &lt; 0.0001) (<xref ref-type="table" rid="pone.0331368.t001">Table 1</xref>, <xref ref-type="fig" rid="pone.0331368.g002">Fig 2</xref>, <xref ref-type="supplementary-material" rid="pone.0331368.s003">S3 Fig</xref>). The chosen prompt for our interface produced output with a median of 55 [38–72.5] words.</p>
<fig id="pone.0331368.g002" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.g002</object-id><label>Fig 2</label><caption><title>Stratification between included modalities for the main primary prompt: Prompt D.</title><p>*, **, ***, **** correspond to p &lt; 0.05, p &lt; 0.01, p &lt; 0.001, and p &lt; 0.0001, respectively. aRGLs of the the final prompt and report were used.</p></caption>
<graphic mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.g002" xlink:type="simple"/></fig>
<p>Across all imaging types, our tool (utilizing prompt D) simplified radiology reports from an aRGL of 13.7 to an aRGL of 6.2 (<xref ref-type="fig" rid="pone.0331368.g002">Fig 2</xref>). CT radiology reports had an aRGL of 13.9, and our tool provided simplified outputs with an aRGL of 6.3. This simplification held across different examination types, including US (13.6 to 6.2), Mammogram (13.2 to 5.9), MRI (13.7 to 6.4), and X-ray (14.1 to 6.3; <xref ref-type="table" rid="pone.0331368.t001">Table 1</xref>).</p>
<p>With the optimized prompt, we assessed our tool for accuracy, completeness of information, inclusion of beneficial supplementary information not found, appropriate urgency, and comfort providing output without supervision using a 5-level likert-type evaluation (<xref ref-type="fig" rid="pone.0331368.g003">Fig 3</xref>). For accuracy of the information, both radiologists predominantly found most simplified reports to be free of inaccuracies or misleading details, with combined agreement or strong agreement recorded 255 times out of 300 outputs (85%). However, the radiologists indicated disagreement or strong disagreement with this sentiment for 24 outputs (8%). Regarding the inclusion of all pertinent or actionable details from the original reports in the simplified versions, 249 outputs (83%) conveyed agreement or strong agreement. They expressed disagreement or strong disagreement with 18 (6%) outputs. The provision of beneficial supplementary information in the simplified reports not found in the original impression was found in 81 outputs (27%), while 189 outputs (63%) disagreed or strongly disagreed. On the matter of comfortably sharing the simplified reports directly with patients without additional oversight, the radiologists felt expressed agreement or strong agreement to comfortably sharing reports without additional supervision in 230 outputs (76.7%), while they expressed reservations for 39 outputs (13%). The radiologists felt, in 261 of the outputs (87%), that the simplified reports adeptly communicated the required urgency. Disagreement or strong disagreement was recorded in only 8 instances (2.7%).</p>
<fig id="pone.0331368.g003" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.g003</object-id><label>Fig 3</label><caption><title>Accuracy, completeness, and inclusion of extraneous information of Rads-Lit tool.</title></caption>
<graphic mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.g003" xlink:type="simple"/></fig>
</sec>
<sec id="sec010" sec-type="conclusions">
<title>4. Discussion</title>
<p>Our proof-of-concept study presents the development and evaluation of the Rads-Lit patient interface. Through an iterative comparative assessment, we evaluated 79 distinct prompts to determine which five best simplified radiologist reports, achieving the lowest RGLs, relative to the common prompt (“Simplify”). Ultimately, we selected the prompt: “You are a health literacy tool. Interpret this radiology finding at the 5<sup>th</sup> grade level” for further testing based on our team’s previous study on the importance of context for OpenAI’s LLM [<xref ref-type="bibr" rid="pone.0331368.ref025">25</xref>]. Evaluations by 2 radiologists confirmed that the output from this prompt was accurate 85% of the time with little to no extraneous content relative to the original radiology report. While our findings suggest there is room for model refinement, they also underscore the importance of balancing the clarity of simplified reports with preserving nuanced clinical details integral to patient care. Importantly, simplified language does not equate to clinical guidance. Without proper framing, patients may misinterpret these outputs as standalone diagnostic conclusions.</p>
<p>Given the critical need to promote radiological literacy, approaches such as summary statements, [<xref ref-type="bibr" rid="pone.0331368.ref026">26</xref>] language glossaries, [<xref ref-type="bibr" rid="pone.0331368.ref027">27</xref>] structured templates with standardized lexicon [<xref ref-type="bibr" rid="pone.0331368.ref028">28</xref>,<xref ref-type="bibr" rid="pone.0331368.ref029">29</xref>], video radiology reports [<xref ref-type="bibr" rid="pone.0331368.ref030">30</xref>,<xref ref-type="bibr" rid="pone.0331368.ref031">31</xref>], and listing the radiologist’s phone number have been proposed [<xref ref-type="bibr" rid="pone.0331368.ref032">32</xref>]. This study highlights the utility of using NLP and LLMs to simplify radiology reports, as well as several areas of further research. For example, radiologists’ opinions diverged regarding the inclusion of supplementary information, with only 27% of the outputs found beneficial and a notable 63% disagreement, suggesting that a one-size-fits-all approach may not be ideal. This variability reinforces the need for clear communication that simplified reports are meant to enhance, rather than replace clinical consultation. Some patients might benefit from additional context, while others might find this supplementary information extraneous or confusing. Additionally, the collective comfort of the radiologists in sharing 76.7% of the simplified reports directly with patients indicates the tool’s potential in promoting patient autonomy and comprehension. Still, given the novelty of this technology and risk of hallucinations [<xref ref-type="bibr" rid="pone.0331368.ref033">33</xref>,<xref ref-type="bibr" rid="pone.0331368.ref034">34</xref>], it is unlikely that radiologists would trust the Rads-Lit patient interface to operate autonomously. Indeed, in a previous single-center survey, 76.9% of radiologists reported that they would not support AI-generated simplifications of reports without a manual check [<xref ref-type="bibr" rid="pone.0331368.ref035">35</xref>]. A safety net, possibly in the form of a reviewing radiologist, will likely still be necessary. Hospitals and health systems must consider embedding controls within such tools, such as requiring clinician review before release, clear disclaimers on patient-facing outputs, and integration with the electronic health record to ensure traceability. These institutional guardrails are critical to prevent misuse, including inappropriate self-management or misinformed clinical decision-making by patients.</p>
<p>Patients are already using LLMs like OpenAI’s ChatGPT, Google Bard, and Microsoft Bing to better understand their medical care; however, given the infinite possibility of prompts, most people are not providing additional context that they are a patient, that the chatbot should act as a health literacy tool, or that simplification should happen at a specific grade level [<xref ref-type="bibr" rid="pone.0331368.ref036">36</xref>–<xref ref-type="bibr" rid="pone.0331368.ref038">38</xref>]. Indeed, our previous work has shown that the prompt “Simplify” on ChatGPT-3.5 consistently produces excessively complicated outputs for the average American’s 7<sup>th</sup> grade reading level [<xref ref-type="bibr" rid="pone.0331368.ref013">13</xref>,<xref ref-type="bibr" rid="pone.0331368.ref017">17</xref>,<xref ref-type="bibr" rid="pone.0331368.ref018">18</xref>]. Other research from our group suggests that the level of simplification differs based on racial context: Open AI’s ChatGPT-3.5 and ChatGPT-4 simplified radiology reports at a higher reading level for those who self-identified as White or Asian, when compared to those who self-identified as Black or American Indian/Alaska Native [<xref ref-type="bibr" rid="pone.0331368.ref039">39</xref>]. These findings raise important concerns about equity and implicit bias in LLM-generated outputs. While the reasons for these disparities remain unclear, they may reflect systemic inequities embedded in the training data or variation in how different demographic identifiers interact with the model [<xref ref-type="bibr" rid="pone.0331368.ref040">40</xref>].</p>
<p>Some small studies have explored the accuracy of LLM-simplified radiology reports with variable results [<xref ref-type="bibr" rid="pone.0331368.ref041">41</xref>]. For example, using 3 fictitious radiology reports, Jeblick et al. created 15 simplified reports and found that one-third of these reports had incorrect statements, missing key medical information, or potentially misleading passages [<xref ref-type="bibr" rid="pone.0331368.ref042">42</xref>]. Tepe et al. analyzed 30 simplified radiology reports and found that their readability and understandability was significantly improved, although their accuracy in assessing the urgency of medical conditions was inadequate [<xref ref-type="bibr" rid="pone.0331368.ref043">43</xref>]. For contrast, analysis of simplified reports from 20 cardiovascular MRIs and 60 shoulder, knee, and lumbar spine MRIs found that GPT-4 produced highly accurate reports with minimal confusing or inaccurate output [<xref ref-type="bibr" rid="pone.0331368.ref044">44</xref>,<xref ref-type="bibr" rid="pone.0331368.ref045">45</xref>]. Our group has similarly shown that, in analysis of 150 mammography, X-ray, CT, MRI, and ultrasound scans, radiologists found that 83–86% of radiology reports simplified by GPT 3.5 and 4 had no errors and all essential information [<xref ref-type="bibr" rid="pone.0331368.ref016">16</xref>]. However, to our knowledge, there has been no previous work that has comprehensively assessed the accuracy, completeness of information, appropriate urgency, and comfort providing output without supervision, especially in such a large sample size with 750 radiology reports across diverse examination types. Our results suggest the import of implementing standard prompts and guidelines for LLM-based patient education in order to maximize the utility of these tools and to improve equity in health communication.</p>
<p>Our proof-of-concept tool explores the capabilities of LLMs and suggests that these tools can be safely incorporated into radiology practice. With a median count of 55 words in this study, a radiologist should be able to review the simplified output quickly, but the benefits to patients must be weighed against these disruptions to providers, as RVUs may not accommodate for this additional review time and a separate billing code is unlikely. An example of an implementation of such a technology is in a speech recognition and reporting platform, where LLMs and NLP could readily generate a simplified summary, which the radiologist can proof, and if needed, edit. Or, this technology could be implemented in EHRs allowing a radiologist to review simplified output before signing a report. There is evidence a simplified report or summary may benefit patients and improve patient satisfaction scores, but adoption by providers and impact on providers must also be assessed. Importantly, we must address concerns about the unintended consequences. What if, for instance, these simplified reports lead to heightened anxiety? And does it truly offer time-saving benefits to referring physicians when they discuss findings with their patients? Although our work is foundational, it’s essential to be proactive in our discussion and think about the potential implications [<xref ref-type="bibr" rid="pone.0331368.ref046">46</xref>].</p>
<p>This study is not without its limitations. For one, we relied primarily on readability metrics to guide our prompt engineering. Although we utilized the Delphi method to look at the outputs of the 5 prompts with the lowest readability scores to identify the best prompt and ensure that the prompts retained fidelity and accuracy, we did not use accuracy to identify our prompt until that point. Second, the readability metrics used in this study are language and structure-focused, so these measures may not necessarily capture comprehensibility from a medical perspective. Additionally, readability metrics may not adequately capture patient literacy needs. This highlights the need for further research to refine the interface and ensure its applicability across diverse patient populations and radiology subspecialties, as well as explore the potential for personalized approaches to simplifying radiology reports, considering individual patient characteristics, such as age, education, and prior medical knowledge. Moreover, future iterations must prioritize clear guardrails and user education to ensure that patients understand simplified reports are adjunctive, not directive, and that clinical follow-up remains essential. Finally, we have utilized 2 attendings and a resident with the likert scale to assess these prompts when in reality the incorporation of patient feedback and physician input on the simplified reports is crucial to evaluate the tool’s real-world usability and its potential for enhancing patient-provider communication. Future multi-site studies would validate these readability metrics in real-world settings, ensuring their reliability and relevance to patient outcomes.</p>
<p>It is crucial to reiterate that the Rads-Lit Tool is designed as a health literacy aid to simplify existing radiology report text, not as a diagnostic or clinical decision support system. As rightly noted by challenges in the broader AI field, these models, including the one underpinning Rads-Lit, currently lack the ‘Gestalt’ understanding required to interpret findings within the full, complex clinical context of an individual patient, especially concerning comorbidities or the relative intensity of multiple pathologies. While the tool improves access to comprehension, it does not obviate the need for follow-up with a referring clinician. Misinterpretation, such as assuming a non-urgent finding requires no action or self-treating based on the simplified language, poses real risks if safeguards are not in place. Our tool does not attempt this; its purpose is solely to make the concluded report more comprehensible to patients after it has been finalized by a radiologist.</p>
<p>We believe that this proof-of-concept tool can help begin a discussion of how to utilize such LLMs for patient-centered care in radiology. In our tool, we also showcase preliminary features (that remain untested) to allow patients to press “explain more” if they don’t understand or want to learn more about a particular sentence of the generated output and allow physicians to edit responses if they are using the tool to provide patients a better understanding of their report findings (<xref ref-type="fig" rid="pone.0331368.g004">Fig 4</xref>). As LLM solutions become more common, discussion regarding the implementation of such tools is of utmost importance.</p>
<fig id="pone.0331368.g004" position="float"><object-id pub-id-type="doi">10.1371/journal.pone.0331368.g004</object-id><label>Fig 4</label><caption><title>Rads-lit interface.</title></caption>
<graphic mimetype="image" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.g004" xlink:type="simple"/></fig>
<p>In conclusion, this study outlines the development and preliminary evaluation of Rads-Lit, demonstrating that an AI tool with optimized prompting can significantly improve the readability of radiology reports. Our findings suggest its potential as a specialty-specific health literacy aid, shifting the onus of prompt engineering from the patient to a systematically designed interface (<xref ref-type="fig" rid="pone.0331368.g004">Fig 4</xref>). While improved patient-provider communication and patient-centered care are key goals, this study represents an initial step. Extensive further research, including direct patient feedback, validation across diverse populations and clinical settings, and assessment of real-world impact on patient understanding and outcomes, is crucial before such tools can be broadly incorporated into clinical practice [<xref ref-type="bibr" rid="pone.0331368.ref047">47</xref>]. Most importantly, any implementation of such tools must prioritize patient safety by incorporating robust safeguards to prevent misinterpretation and inappropriate self-management, ensuring that simplified reports enhance rather than replace essential clinical relationships. Continued collaboration among patients, clinicians, developers, and policymakers will be essential to responsibly harness this technology to support, rather than replace, human-centered care.</p>
</sec>
<sec id="sec011" sec-type="supplementary-material">
<title>Supporting information</title>
<supplementary-material id="pone.0331368.s001" mimetype="image/png" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.s001" xlink:type="simple">
<label>S1 Fig</label>
<caption>
<title>All prompts tested are depicted.</title>
<p>Best prompts after each stage are denoted.</p>
<p>(PNG)</p>
</caption>
</supplementary-material>
<supplementary-material id="pone.0331368.s002" mimetype="image/png" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.s002" xlink:type="simple">
<label>S2 Fig</label>
<caption>
<title>5 best prompts compared to a basic prompt “simplify.</title>
<p>” *, **, ***, **** correspond to p &lt; 0.05, p &lt; 0.01, p &lt; 0.001, and p &lt; 0.0001, respectively. Dashed line depicts 8<sup>th</sup> grade level.</p>
<p>(PNG)</p>
</caption>
</supplementary-material>
<supplementary-material id="pone.0331368.s003" mimetype="image/png" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.s003" xlink:type="simple">
<label>S3 Fig</label>
<caption>
<title>Rads lit vs radiologist report.</title>
<p>*, **, ***, **** correspond to p &lt; 0.05, p &lt; 0.01, p &lt; 0.001, and p &lt; 0.0001, respectively.</p>
<p>(PNG)</p>
</caption>
</supplementary-material>
<supplementary-material id="pone.0331368.s004" mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" position="float" xlink:href="info:doi/10.1371/journal.pone.0331368.s004" xlink:type="simple">
<label>S1 Data</label>
<caption>
<title>Data Plos.</title>
<p>(XLSX)</p>
</caption>
</supplementary-material>
</sec>
</body>
<back>
<ack>
<p>We thank Yale Department of Radiology for their support. We thank Arav Doshi for his help with data visualization and analysis.</p>
</ack>
<ref-list>
<title>References</title>
<ref id="pone.0331368.ref001"><label>1</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Olthof</surname> <given-names>AW</given-names></name>, <name name-style="western"><surname>de Groot</surname> <given-names>JC</given-names></name>, <name name-style="western"><surname>Zorgdrager</surname> <given-names>AN</given-names></name>, <name name-style="western"><surname>Callenbach</surname> <given-names>PMC</given-names></name>, <name name-style="western"><surname>van Ooijen</surname> <given-names>PMA</given-names></name>. <article-title>Perception of radiology reporting efficacy by neurologists in general and university hospitals</article-title>. <source>Clin Radiol</source>. <year>2018</year>;<volume>73</volume>(<issue>7</issue>):675.e1–<lpage>675.e7</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.crad.2018.03.001" xlink:type="simple">10.1016/j.crad.2018.03.001</ext-link></comment> <object-id pub-id-type="pmid">29622361</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref002"><label>2</label><mixed-citation publication-type="journal" xlink:type="simple"><article-title>Provider Obligations For Patient Portals Under The 21st Century Cures Act. Forefront Group</article-title>. <source>Health Affairs (Project Hope)</source>. <year>2022</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1377/forefront.20220513.923426" xlink:type="simple">10.1377/forefront.20220513.923426</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref003"><label>3</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Bruno</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Steele</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Carbone</surname> <given-names>J</given-names></name>, <name name-style="western"><surname>Schneider</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Posk</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Rose</surname> <given-names>SL</given-names></name>. <article-title>Informed or anxious: patient preferences for release of test results of increasing sensitivity on electronic patient portals</article-title>. <source>Health Technol (Berl)</source>. <year>2022</year>;<volume>12</volume>(<issue>1</issue>):<fpage>59</fpage>–<lpage>67</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s12553-021-00628-5" xlink:type="simple">10.1007/s12553-021-00628-5</ext-link></comment> <object-id pub-id-type="pmid">35036280</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref004"><label>4</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Mehan</surname> <given-names>WA</given-names> <suffix>Jr</suffix></name>, <name name-style="western"><surname>Gee</surname> <given-names>MS</given-names></name>, <name name-style="western"><surname>Egan</surname> <given-names>N</given-names></name>, <name name-style="western"><surname>Jones</surname> <given-names>PE</given-names></name>, <name name-style="western"><surname>Brink</surname> <given-names>JA</given-names></name>, <name name-style="western"><surname>Hirsch</surname> <given-names>JA</given-names></name>. <article-title>Immediate Radiology Report Access: A Burden to the Ordering Provider</article-title>. <source>Curr Probl Diagn Radiol</source>. <year>2022</year>;<volume>51</volume>(<issue>5</issue>):<fpage>712</fpage>–<lpage>6</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1067/j.cpradiol.2022.01.012" xlink:type="simple">10.1067/j.cpradiol.2022.01.012</ext-link></comment> <object-id pub-id-type="pmid">35193795</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref005"><label>5</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Berkman</surname> <given-names>ND</given-names></name>, <name name-style="western"><surname>Sheridan</surname> <given-names>SL</given-names></name>, <name name-style="western"><surname>Donahue</surname> <given-names>KE</given-names></name>. <article-title>Health literacy interventions and outcomes: an updated systematic review</article-title>. <source>Evid Rep Technol Assess (Full Rep)</source>. <year>2011</year>;<volume>199</volume>:<fpage>1</fpage>–<lpage>941</lpage>.</mixed-citation></ref>
<ref id="pone.0331368.ref006"><label>6</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Doshi</surname> <given-names>RH</given-names></name>, <name name-style="western"><surname>Bajaj</surname> <given-names>SS</given-names></name>, <name name-style="western"><surname>Krumholz</surname> <given-names>HM</given-names></name>. <article-title>ChatGPT: Temptations of Progress</article-title>. <source>Am J Bioeth</source>. <year>2023</year>;<volume>23</volume>(<issue>4</issue>):<fpage>6</fpage>–<lpage>8</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1080/15265161.2023.2180110" xlink:type="simple">10.1080/15265161.2023.2180110</ext-link></comment> <object-id pub-id-type="pmid">36853242</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref007"><label>7</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Sabet</surname> <given-names>CJ</given-names></name>, <name name-style="western"><surname>Bajaj</surname> <given-names>SS</given-names></name>, <name name-style="western"><surname>Stanford</surname> <given-names>FC</given-names></name>, <name name-style="western"><surname>Celi</surname> <given-names>LA</given-names></name>. <article-title>Equity in Scientific Publishing: Can Artificial Intelligence Transform the Peer Review Process?</article-title> <source>Mayo Clin Proc Digit Health</source>. <year>2023</year>;<volume>1</volume>(<issue>4</issue>):<fpage>596</fpage>–<lpage>600</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.mcpdig.2023.10.002" xlink:type="simple">10.1016/j.mcpdig.2023.10.002</ext-link></comment> <object-id pub-id-type="pmid">40206303</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref008"><label>8</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Patel</surname> <given-names>AV</given-names></name>, <name name-style="western"><surname>Jasani</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>AlAshqar</surname> <given-names>A</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>RH</given-names></name>, <name name-style="western"><surname>Amin</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Panakam</surname> <given-names>A</given-names></name>, <etal>et al</etal>. <article-title>Comparative Evaluation of Artificial Intelligence Models for Contraceptive Counseling</article-title>. <source>Digital</source>. <year>2025</year>;<volume>5</volume>(<issue>2</issue>):<fpage>10</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/digital5020010" xlink:type="simple">10.3390/digital5020010</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref009"><label>9</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Chheang</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <article-title>Artificial Intelligence to Improve Patient Understanding of Radiology Reports</article-title>. <source>Yale J Biol Med</source>. <year>2023</year>;<volume>96</volume>(<issue>3</issue>):<fpage>407</fpage>–<lpage>17</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.59249/NKOY5498" xlink:type="simple">10.59249/NKOY5498</ext-link></comment> <object-id pub-id-type="pmid">37780992</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref010"><label>10</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Lakhani</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Langlotz</surname> <given-names>CP</given-names></name>. <article-title>Automated detection of radiology reports that document non-routine communication of critical or significant results</article-title>. <source>J Digit Imaging</source>. <year>2010</year>;<volume>23</volume>(<issue>6</issue>):<fpage>647</fpage>–<lpage>57</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10278-009-9237-1" xlink:type="simple">10.1007/s10278-009-9237-1</ext-link></comment> <object-id pub-id-type="pmid">19826871</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref011"><label>11</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Qenam</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Kim</surname> <given-names>TY</given-names></name>, <name name-style="western"><surname>Carroll</surname> <given-names>MJ</given-names></name>, <name name-style="western"><surname>Hogarth</surname> <given-names>M</given-names></name>. <article-title>Text Simplification Using Consumer Health Vocabulary to Generate Patient-Centered Radiology Reporting: Translation and Evaluation</article-title>. <source>J Med Internet Res</source>. <year>2017</year>;<volume>19</volume>(<issue>12</issue>):e417. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2196/jmir.8536" xlink:type="simple">10.2196/jmir.8536</ext-link></comment> <object-id pub-id-type="pmid">29254915</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref012"><label>12</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Ramadier</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Lafourcade</surname> <given-names>M.</given-names></name> <article-title>Radiological Text Simplification Using a General Knowledge Base.</article-title> In: <name name-style="western"><surname>Gelbukh</surname> <given-names>A</given-names></name>, ed. <source>Computational Linguistics and Intelligent Text Processing. Lecture Notes in Computer Science. Springer International Publishing</source>; <year>2018</year>:<fpage>617</fpage>–<lpage>27</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-319-77116-8_46" xlink:type="simple">10.1007/978-3-319-77116-8_46</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref013"><label>13</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Amin</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Bajaj</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Chheang</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <source>Utilizing Large Language Models to Simplify Radiology Reports: a comparative analysis of ChatGPT3.5, ChatGPT4.0, Google Bard, and Microsoft Bing. Cold Spring Harbor Laboratory</source>. <year>2023</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1101/2023.06.04.23290786" xlink:type="simple">10.1101/2023.06.04.23290786</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref014"><label>14</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Sun</surname> <given-names>Z</given-names></name>, <name name-style="western"><surname>Ong</surname> <given-names>H</given-names></name>, <name name-style="western"><surname>Kennedy</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Tang</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Chen</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Elias</surname> <given-names>J</given-names></name>, <etal>et al</etal>. <article-title>Evaluating GPT4 on Impressions Generation in Radiology Reports</article-title>. <source>Radiology</source>. <year>2023</year>;<volume>307</volume>(<issue>5</issue>):e231259. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1148/radiol.231259" xlink:type="simple">10.1148/radiol.231259</ext-link></comment> <object-id pub-id-type="pmid">37367439</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref015"><label>15</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Mayes</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>. <article-title>ChatGPT-3.5, ChatGPT-4, Google Bard, and Microsoft Bing to improve health literacy and communication in pediatric populations and beyond</article-title>. <year>2023</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.2311.10075" xlink:type="simple">10.48550/arXiv.2311.10075</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref016"><label>16</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Davis</surname> <given-names>MA</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Haims</surname> <given-names>AH</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <article-title>Accuracy of ChatGPT, Google Bard, and Microsoft Bing for Simplifying Radiology Reports</article-title>. <source>Radiology</source>. <year>2023</year>;<volume>309</volume>(<issue>2</issue>):e232561. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1148/radiol.232561" xlink:type="simple">10.1148/radiol.232561</ext-link></comment> <object-id pub-id-type="pmid">37987662</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref017"><label>17</label><mixed-citation publication-type="book" xlink:type="simple"><name name-style="western"><surname>Weiss</surname> <given-names>BD</given-names></name>. <source>Health Literacy and Patient Safety: Help Patients Understand: Manual for Clinicians</source>. <publisher-name>AMA Foundation</publisher-name>. <year>2007</year>.</mixed-citation></ref>
<ref id="pone.0331368.ref018"><label>18</label><mixed-citation publication-type="book" xlink:type="simple">How to Write Easy-to-Read Health Materials. <publisher-name>U.S. National Library of Medicine</publisher-name>. <year>2016</year>. <ext-link ext-link-type="uri" xlink:href="https://www.nlm.nih.gov/medlineplus/etr.html" xlink:type="simple">https://www.nlm.nih.gov/medlineplus/etr.html</ext-link></mixed-citation></ref>
<ref id="pone.0331368.ref019"><label>19</label><mixed-citation publication-type="other" xlink:type="simple">Johnson A, Pollard T, Mark R. MIMIC-III Clinical Database. 2016. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.13026/C2XW26" xlink:type="simple">https://doi.org/10.13026/C2XW26</ext-link></mixed-citation></ref>
<ref id="pone.0331368.ref020"><label>20</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Johnson</surname> <given-names>AEW</given-names></name>, <name name-style="western"><surname>Pollard</surname> <given-names>TJ</given-names></name>, <name name-style="western"><surname>Shen</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Lehman</surname> <given-names>L-WH</given-names></name>, <name name-style="western"><surname>Feng</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Ghassemi</surname> <given-names>M</given-names></name>, <etal>et al</etal>. <article-title>MIMIC-III, a freely accessible critical care database</article-title>. <source>Sci Data</source>. <year>2016</year>;<volume>3</volume>:<fpage>160035</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/sdata.2016.35" xlink:type="simple">10.1038/sdata.2016.35</ext-link></comment> <object-id pub-id-type="pmid">27219127</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref021"><label>21</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Chen</surname> <given-names>W</given-names></name>, <name name-style="western"><surname>Durkin</surname> <given-names>C</given-names></name>, <name name-style="western"><surname>Huang</surname> <given-names>Y</given-names></name>, <name name-style="western"><surname>Adler</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Rust</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Lin</surname> <given-names>S</given-names></name>. <article-title>Simplified Readability Metric Drives Improvement of Radiology Reports: an Experiment on Ultrasound Reports at a Pediatric Hospital</article-title>. <source>J Digit Imaging</source>. <year>2017</year>;<volume>30</volume>(<issue>6</issue>):<fpage>710</fpage>–<lpage>7</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10278-017-9972-7" xlink:type="simple">10.1007/s10278-017-9972-7</ext-link></comment> <object-id pub-id-type="pmid">28484918</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref022"><label>22</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Mayes</surname> <given-names>LC</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>RH</given-names></name>. <article-title>Assessing the Efficacy of Large Language Models in Health Literacy: A Comprehensive Cross-Sectional Study</article-title>. <source>Yale J Biol Med</source>. <year>2024</year>;<volume>97</volume>(<issue>1</issue>):<fpage>17</fpage>–<lpage>27</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.59249/ZTOZ1966" xlink:type="simple">10.59249/ZTOZ1966</ext-link></comment> <object-id pub-id-type="pmid">38559461</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref023"><label>23</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Doshi</surname> <given-names>RH</given-names></name>, <name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Kapadia</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>McKenzie</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Preda-Naumescu</surname> <given-names>A</given-names></name>, <name name-style="western"><surname>Tkachenko</surname> <given-names>E</given-names></name>, <etal>et al</etal>. <article-title>Characteristics of information on inflammatory skin diseases produced by four large language models</article-title>. <source>Int J Dermatol</source>. <year>2025</year>;<volume>64</volume>(<issue>4</issue>):<fpage>773</fpage>–<lpage>5</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1111/ijd.17560" xlink:type="simple">10.1111/ijd.17560</ext-link></comment> <object-id pub-id-type="pmid">39523532</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref024"><label>24</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Pearson</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Ngo</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Ekpo</surname> <given-names>E</given-names></name>, <name name-style="western"><surname>Sarraju</surname> <given-names>A</given-names></name>, <name name-style="western"><surname>Baird</surname> <given-names>G</given-names></name>, <name name-style="western"><surname>Knowles</surname> <given-names>J</given-names></name>, <etal>et al</etal>. <article-title>Online Patient Education Materials Related to Lipoprotein(a): Readability Assessment</article-title>. <source>J Med Internet Res</source>. <year>2022</year>;<volume>24</volume>(<issue>1</issue>):e31284. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2196/31284" xlink:type="simple">10.2196/31284</ext-link></comment> <object-id pub-id-type="pmid">35014955</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref025"><label>25</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Bajaj</surname> <given-names>SS</given-names></name>, <name name-style="western"><surname>Chheang</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <article-title>Quantitative Evaluation of Large Language Models to Streamline Radiology Report Impressions: A Multimodal Retrospective Analysis</article-title>. <source>Radiology</source>. <year>2024</year>;<volume>310</volume>(<issue>3</issue>):e231593. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1148/radiol.231593" xlink:type="simple">10.1148/radiol.231593</ext-link></comment> <object-id pub-id-type="pmid">38530171</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref026"><label>26</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Kadom</surname> <given-names>N</given-names></name>, <name name-style="western"><surname>Tamasi</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Vey</surname> <given-names>BL</given-names></name>, <name name-style="western"><surname>Safdar</surname> <given-names>N</given-names></name>, <name name-style="western"><surname>Applegate</surname> <given-names>KE</given-names></name>, <name name-style="western"><surname>Sadigh</surname> <given-names>G</given-names></name>, <etal>et al</etal>. <article-title>Info-RADS: Adding a Message for Patients in Radiology Reports</article-title>. <source>J Am Coll Radiol</source>. <year>2021</year>;<volume>18</volume>(1 Pt A):<fpage>128</fpage>–<lpage>32</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jacr.2020.09.049" xlink:type="simple">10.1016/j.jacr.2020.09.049</ext-link></comment> <object-id pub-id-type="pmid">33068534</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref027"><label>27</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Martin-Carreras</surname> <given-names>T</given-names></name>, <name name-style="western"><surname>Kahn</surname> <given-names>CE</given-names> <suffix>Jr</suffix></name>. <article-title>Coverage and Readability of Information Resources to Help Patients Understand Radiology Reports</article-title>. <source>J Am Coll Radiol</source>. <year>2018</year>;<volume>15</volume>(<issue>12</issue>):<fpage>1681</fpage>–<lpage>6</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jacr.2017.11.019" xlink:type="simple">10.1016/j.jacr.2017.11.019</ext-link></comment> <object-id pub-id-type="pmid">29310924</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref028"><label>28</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Panicek</surname> <given-names>DM</given-names></name>, <name name-style="western"><surname>Hricak</surname> <given-names>H</given-names></name>. <article-title>How Sure Are You, Doctor? A Standardized Lexicon to Describe the Radiologist’s Level of Certainty</article-title>. <source>AJR Am J Roentgenol</source>. <year>2016</year>;<volume>207</volume>(<issue>1</issue>):<fpage>2</fpage>–<lpage>3</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2214/AJR.15.15895" xlink:type="simple">10.2214/AJR.15.15895</ext-link></comment> <object-id pub-id-type="pmid">27065212</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref029"><label>29</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Vincoff</surname> <given-names>NS</given-names></name>, <name name-style="western"><surname>Barish</surname> <given-names>MA</given-names></name>, <name name-style="western"><surname>Grimaldi</surname> <given-names>G</given-names></name>. <article-title>The patient-friendly radiology report: history, evolution, challenges and opportunities</article-title>. <source>Clin Imaging</source>. <year>2022</year>;<volume>89</volume>:<fpage>128</fpage>–<lpage>35</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.clinimag.2022.06.018" xlink:type="simple">10.1016/j.clinimag.2022.06.018</ext-link></comment> <object-id pub-id-type="pmid">35803159</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref030"><label>30</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Cook</surname> <given-names>TS</given-names></name>, <name name-style="western"><surname>Oh</surname> <given-names>SC</given-names></name>, <name name-style="western"><surname>Kahn</surname> <given-names>CE</given-names> <suffix>Jr</suffix></name>. <article-title>Patients’ Use and Evaluation of an Online System to Annotate Radiology Reports with Lay Language Definitions</article-title>. <source>Acad Radiol</source>. <year>2017</year>;<volume>24</volume>(<issue>9</issue>):<fpage>1169</fpage>–<lpage>74</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.acra.2017.03.005" xlink:type="simple">10.1016/j.acra.2017.03.005</ext-link></comment> <object-id pub-id-type="pmid">28433519</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref031"><label>31</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Recht</surname> <given-names>MP</given-names></name>, <name name-style="western"><surname>Westerhoff</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>AM</given-names></name>, <name name-style="western"><surname>Young</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Ostrow</surname> <given-names>D</given-names></name>, <name name-style="western"><surname>Swahn</surname> <given-names>D-M</given-names></name>, <etal>et al</etal>. <article-title>Video Radiology Reports: A Valuable Tool to Improve Patient-Centered Radiology</article-title>. <source>AJR Am J Roentgenol</source>. <year>2022</year>;<volume>219</volume>(<issue>3</issue>):<fpage>509</fpage>–<lpage>19</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2214/AJR.22.27512" xlink:type="simple">10.2214/AJR.22.27512</ext-link></comment> <object-id pub-id-type="pmid">35441532</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref032"><label>32</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Cross</surname> <given-names>NM</given-names></name>, <name name-style="western"><surname>Wildenberg</surname> <given-names>J</given-names></name>, <name name-style="western"><surname>Liao</surname> <given-names>G</given-names></name>, <name name-style="western"><surname>Novak</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Bevilacqua</surname> <given-names>T</given-names></name>, <name name-style="western"><surname>Chen</surname> <given-names>J</given-names></name>, <etal>et al</etal>. <article-title>The voice of the radiologist: Enabling patients to speak directly to radiologists</article-title>. <source>Clin Imaging</source>. <year>2020</year>;<volume>61</volume>:<fpage>84</fpage>–<lpage>9</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.clinimag.2019.09.014" xlink:type="simple">10.1016/j.clinimag.2019.09.014</ext-link></comment> <object-id pub-id-type="pmid">31986355</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref033"><label>33</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Sallam</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Al-Mahzoum</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Alaraji</surname> <given-names>H</given-names></name>, <name name-style="western"><surname>Albayati</surname> <given-names>N</given-names></name>, <name name-style="western"><surname>Alenzei</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>AlFarhan</surname> <given-names>F</given-names></name>, <etal>et al</etal>. <source>Apprehension Toward Generative Artificial Intelligence in Healthcare: A Multinational Study among Health Sciences Students. MDPI AG</source>. <year>2024</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.20944/preprints202412.0340.v1" xlink:type="simple">10.20944/preprints202412.0340.v1</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref034"><label>34</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Kim</surname> <given-names>Y</given-names></name>, <name name-style="western"><surname>Jeong</surname> <given-names>H</given-names></name>, <name name-style="western"><surname>Chen</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Li</surname> <given-names>SS</given-names></name>, <name name-style="western"><surname>Lu</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Alhamoud</surname> <given-names>K</given-names></name>, <etal>et al</etal>. <source>Medical Hallucination in Foundation Models and Their Impact on Healthcare. Cold Spring Harbor Laboratory</source>. <year>2025</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1101/2025.02.28.25323115" xlink:type="simple">10.1101/2025.02.28.25323115</ext-link></comment></mixed-citation></ref>
<ref id="pone.0331368.ref035"><label>35</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Davis</surname> <given-names>MA</given-names></name>, <name name-style="western"><surname>Naderi</surname> <given-names>A</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <article-title>Release of complex imaging reports to patients, do radiologists trust AI to help?</article-title> <source>Curr Probl Diagn Radiol</source>. <year>2025</year>;<volume>54</volume>(<issue>2</issue>):<fpage>147</fpage>–<lpage>50</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1067/j.cpradiol.2024.12.008" xlink:type="simple">10.1067/j.cpradiol.2024.12.008</ext-link></comment> <object-id pub-id-type="pmid">39676024</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref036"><label>36</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>. <article-title>Large language models as a source of health information: Are they patient-centered? A longitudinal analysis</article-title>. <source>Healthc (Amst)</source>. <year>2024</year>;<volume>12</volume>(<issue>1</issue>):<fpage>100731</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.hjdsi.2023.100731" xlink:type="simple">10.1016/j.hjdsi.2023.100731</ext-link></comment> <object-id pub-id-type="pmid">38141269</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref037"><label>37</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Tandar</surname> <given-names>CE</given-names></name>, <name name-style="western"><surname>Bajaj</surname> <given-names>SS</given-names></name>, <name name-style="western"><surname>Stanford</surname> <given-names>FC</given-names></name>. <article-title>Social Media and Artificial Intelligence-Understanding Medical Misinformation Through Snapchat’s New Artificial Intelligence Chatbot</article-title>. <source>Mayo Clin Proc Digit Health</source>. <year>2024</year>;<volume>2</volume>(<issue>2</issue>):<fpage>252</fpage>–<lpage>4</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.mcpdig.2024.04.004" xlink:type="simple">10.1016/j.mcpdig.2024.04.004</ext-link></comment> <object-id pub-id-type="pmid">38962215</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref038"><label>38</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Stokel-Walker</surname> <given-names>C</given-names></name>. <article-title>How patients are using AI</article-title>. <source>BMJ</source>. <year>2024</year>;<volume>387</volume>:q2393. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj.q2393" xlink:type="simple">10.1136/bmj.q2393</ext-link></comment> <object-id pub-id-type="pmid">39562011</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref039"><label>39</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Amin</surname> <given-names>KS</given-names></name>, <name name-style="western"><surname>Forman</surname> <given-names>HP</given-names></name>, <name name-style="western"><surname>Davis</surname> <given-names>MA</given-names></name>. <article-title>Even with ChatGPT, race matters</article-title>. <source>Clin Imaging</source>. <year>2024</year>;<volume>109</volume>:<fpage>110113</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.clinimag.2024.110113" xlink:type="simple">10.1016/j.clinimag.2024.110113</ext-link></comment> <object-id pub-id-type="pmid">38552383</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref040"><label>40</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Jain</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Nundy</surname> <given-names>S</given-names></name>. <article-title>Leveraging Artificial Intelligence to Advance Health Equity in America’s Safety Net</article-title>. <source>J Gen Intern Med</source>. <year>2025</year>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11606-025-09606-3" xlink:type="simple">10.1007/s11606-025-09606-3</ext-link></comment> <object-id pub-id-type="pmid">40375041</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref041"><label>41</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Parillo</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Vaccarino</surname> <given-names>F</given-names></name>, <name name-style="western"><surname>Beomonte Zobel</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Mallio</surname> <given-names>CA</given-names></name>. <article-title>ChatGPT and radiology report: potential applications and limitations</article-title>. <source>Radiol Med</source>. <year>2024</year>;<volume>129</volume>(<issue>12</issue>):<fpage>1849</fpage>–<lpage>63</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11547-024-01915-7" xlink:type="simple">10.1007/s11547-024-01915-7</ext-link></comment> <object-id pub-id-type="pmid">39508933</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref042"><label>42</label><mixed-citation publication-type="other" xlink:type="simple">Jeblick K, Schachtner B, Dexl J. ChatGPT Makes Medicine Easy to Swallow: An Exploratory Case Study on Simplified Radiology Reports. 2022.</mixed-citation></ref>
<ref id="pone.0331368.ref043"><label>43</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Tepe</surname> <given-names>M</given-names></name>, <name name-style="western"><surname>Emekli</surname> <given-names>E</given-names></name>. <article-title>Decoding medical jargon: The use of AI language models (ChatGPT-4, BARD, microsoft copilot) in radiology reports</article-title>. <source>Patient Educ Couns</source>. <year>2024</year>;<volume>126</volume>:<fpage>108307</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.pec.2024.108307" xlink:type="simple">10.1016/j.pec.2024.108307</ext-link></comment> <object-id pub-id-type="pmid">38743965</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref044"><label>44</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Salam</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Kravchenko</surname> <given-names>D</given-names></name>, <name name-style="western"><surname>Nowak</surname> <given-names>S</given-names></name>, <name name-style="western"><surname>Sprinkart</surname> <given-names>AM</given-names></name>, <name name-style="western"><surname>Weinhold</surname> <given-names>L</given-names></name>, <name name-style="western"><surname>Odenthal</surname> <given-names>A</given-names></name>, <etal>et al</etal>. <article-title>Generative Pre-trained Transformer 4 makes cardiovascular magnetic resonance reports easy to understand</article-title>. <source>J Cardiovasc Magn Reson</source>. <year>2024</year>;<volume>26</volume>(<issue>1</issue>):<fpage>101035</fpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jocmr.2024.101035" xlink:type="simple">10.1016/j.jocmr.2024.101035</ext-link></comment> <object-id pub-id-type="pmid">38460841</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref045"><label>45</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Kuckelman</surname> <given-names>IJ</given-names></name>, <name name-style="western"><surname>Wetley</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Yi</surname> <given-names>PH</given-names></name>, <name name-style="western"><surname>Ross</surname> <given-names>AB</given-names></name>. <article-title>Translating musculoskeletal radiology reports into patient-friendly summaries using ChatGPT-4</article-title>. <source>Skeletal Radiol</source>. <year>2024</year>;<volume>53</volume>(<issue>8</issue>):<fpage>1621</fpage>–<lpage>4</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00256-024-04599-2" xlink:type="simple">10.1007/s00256-024-04599-2</ext-link></comment> <object-id pub-id-type="pmid">38270616</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref046"><label>46</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Khosla</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Amin</surname> <given-names>K</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>. <article-title>Combating Chronic Disease with Barbershop Health Interventions: A Review of Current Knowledge and Potential for Big Data</article-title>. <source>Yale J Biol Med</source>. <year>2024</year>;<volume>97</volume>(<issue>2</issue>):<fpage>239</fpage>–<lpage>45</lpage>. <comment>doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.59249/OTFP5065" xlink:type="simple">10.59249/OTFP5065</ext-link></comment> <object-id pub-id-type="pmid">38947107</object-id></mixed-citation></ref>
<ref id="pone.0331368.ref047"><label>47</label><mixed-citation publication-type="journal" xlink:type="simple"><name name-style="western"><surname>Jain</surname> <given-names>P</given-names></name>, <name name-style="western"><surname>Jain</surname> <given-names>B</given-names></name>, <name name-style="western"><surname>Doshi</surname> <given-names>R</given-names></name>, <name name-style="western"><surname>Jain</surname> <given-names>U</given-names></name>, <name name-style="western"><surname>Claypoof</surname> <given-names>H</given-names></name>, <name name-style="western"><surname>Aboulatta</surname> <given-names>A</given-names></name>, <etal>et al</etal>. <article-title>Digital Health: An Opportunity to Advance Health Equity for People with Disabilities</article-title>. <source>Milbank Quarterly</source>. <year>2025</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>