<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "http://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd">
<article article-type="review-article" dtd-version="1.3" xml:lang="en" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">PJS</journal-id>
<journal-id journal-id-type="publisher-id">Premier Journal of Science</journal-id>
<journal-id journal-id-type="pmc">PJS</journal-id>
<journal-title-group>
<journal-title>PJ Science</journal-title>
</journal-title-group>
<issn pub-type="epub">3049-9011</issn>
<publisher>
<publisher-name>Premier Science</publisher-name>
<publisher-loc>London, UK</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.70389/PJS.100268</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>REVIEW</subject>
</subj-group>
<subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cognitive science</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Linguistics</subject><subj-group><subject>Grammar</subject><subj-group><subject>Phonology</subject><subj-group><subject>Syllables</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Engineering and technology</subject><subj-group><subject>Signal processing</subject><subj-group><subject>Speech signal processing</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cognitive science</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Mental health and psychiatry</subject><subj-group><subject>Schizophrenia</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Bioassays and physiological analysis</subject><subj-group><subject>Electrophysiological techniques</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Physiology</subject><subj-group><subject>Electrophysiology</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Brain mapping</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Clinical medicine</subject><subj-group><subject>Clinical neurophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Imaging techniques</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Cell biology</subject><subj-group><subject>Cellular types</subject><subj-group><subject>Animal cells</subject><subj-group><subject>Neurons</subject><subj-group><subject>Interneurons</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cellular neuroscience</subject><subj-group><subject>Neurons</subject><subj-group><subject>Interneurons</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Bioassays and physiological analysis</subject><subj-group><subject>Electrophysiological techniques</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Physiology</subject><subj-group><subject>Electrophysiology</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Brain mapping</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Clinical medicine</subject><subj-group><subject>Clinical neurophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Imaging techniques</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group>
</article-categories>
<title-group>
<article-title>A Systematic Review on Leveraging Artificial Intelligence for Pancreatic Cancer Diagnosis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<contrib-id contrib-id-type="orcid">https://orcid.org/0009-0001-1760-2476</contrib-id>
<name>
<surname>Suneja</surname>
<given-names>Sonia</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft">Writing &#x2013; original draft</role>
<role content-type="http://credit.niso.org/contributor-roles/review-editing">Review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Talwar</surname>
<given-names>Rajneesh</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft">Writing &#x2013; original draft</role>
<role content-type="http://credit.niso.org/contributor-roles/review-editing">Review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sharma</surname>
<given-names>Manvinder</given-names>
</name>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft">Writing &#x2013; original draft</role>
<role content-type="http://credit.niso.org/contributor-roles/review-editing">Review and editing</role>
</contrib>
<aff id="aff1"><sup>1</sup><institution>Department of Computer Science Engineering, Chitkara University</institution>, <city>Punjab</city>, <country>India</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor001"><bold>Correspondence to:</bold> Sonia Suneja, <email>dca.soniasuneja@gmail.com</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>xx</day>
<month>xx</month>
<year>xxxx</year>
</pub-date>
<pub-date pub-type="collection">
<month>03</month>
<year>2026</year>
</pub-date>
<volume>20</volume>
<issue>1</issue>
<elocation-id>100268</elocation-id>
<history>
<date date-type="received">
<day>xx</day>
<month>xx</month>
<year>xxxx</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>01</day>
<month>03</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-year>2026</copyright-year>
<copyright-holder>Sonia Suneja, Rajneesh Talwar, Manvinder Sharma</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">
<license-p>This is an open access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">Creative Commons Attribution License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="info:doi/10.70389/PJS.100268"/>
<abstract>
<p>Pancreatic ductal adenocarcinoma (PDAC) remains among the most lethal solid malignancies globally, largely due to delayed diagnosis and limited sensitivity of conventional imaging for early-stage lesions. The substantial mortality burden underscores the urgent need for improved diagnostic strategies capable of identifying subtle radiological patterns in contrast-enhanced computed tomography (CT) scans. In a structured way, all AI-based techniques developed to analyze pancreatic tumors using CT Images from 2018 to 2025 have been evaluated in this review. By conducting systematic searches of various databases, including PubMed, Web of Science, Scopus and IEEE Xplore, the eligible studies were determined. The methods for identifying studies were conducted under the principles of PRISMA 2020. Moreover, the imaging modality used was CT only. The conducted electronic searches of PubMed, Web of Science, Scopus, and IEEE Xplore identified 236 records. Screening was conducted on 195 records, after removal of 41 duplicates. Seventy-eight records were excluded following title and abstract screening. It was sought to retrieve full-text reports for 117 studies; however, 36 could not be retrieved. Out of 81 reports assessed for eligibility, 48 were excluded (20 non-CT imaging modalities; 9 non-pancreatic cancer/PDAC specific; 19 insufficient methodological detail). In total, 33 studies were included. The 33 included studies were systematically categorized into four distinct themes based on their primary focus: (i) AI-driven segmentation for pancreas tumor localization, (ii) deep learning-based tumor classification, (iii) CT-based radiomics and feature-driven analysis, and (iv) early detection models. This review consolidates current advancements in AI-driven frameworks that integrate CT imaging data, thereby enhancing diagnostic accuracy and enabling earlier identification of PDAC. This review aims to bridge this gap by consolidating recent advances in CT-based AI methods for pancreatic disease diagnosis.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Pancreatic tumor</kwd>
<kwd>Pancreatic cancer diagnosis</kwd>
<kwd>Pancreatic ductal adenocarcinoma</kwd>
<kwd>Deep learning</kwd>
<kwd>Artificial intelligence</kwd>
</kwd-group>
<counts>
<fig-count count="2"/>
<table-count count="3"/>
<page-count count="10"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>Version accepted</meta-name>
<meta-value>6</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec>
<title><ext-link ext-link-type="uri" xlink:href="https://premierscience.com/wp-content/uploads/2026/18/pjs-25-1533.pdf">Source-File: pjs-25-1533.pdf</ext-link></title>
</sec>
<sec id="sec001" sec-type="intro">
<title>Introduction</title>
<p>Pancreatic cancer is one of the deadliest cancers. Its prognosis is usually bad. Most patients get diagnosed at an advanced stage because of this.<sup><xref ref-type="bibr" rid="ref1">1</xref></sup>. Although relatively less common than other malignancies, pancreatic cancer accounts for a disproportionately high number of cancer-related deaths. In developed nations, lung cancer ranks seventh in cancer-driven deaths, according to the World Health Organization. According to predictions, the second leading cause of death from cancer will be lung cancer by 2030, mainly due to its frequency.<sup><xref ref-type="bibr" rid="ref1">1</xref>&#x2013;<xref ref-type="bibr" rid="ref3">3</xref></sup> Gender differences are evident, as men exhibit a slightly higher incidence rate of 5.7 per 100,000 (34,530 cases) and a mortality rate of 4.5% (27,270 deaths), compared to women, who have an incidence rate of 4.9 per 100,000 (31,910 cases), and a mortality rate of 4.0%.<sup><xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref></sup> . This underscores the urgent need for improved diagnostic and treatment methodologies.<sup><xref ref-type="bibr" rid="ref4">4</xref></sup> Traditional imaging modalities, including CT scans, have been integral in diagnosing pancreatic cancer; however, they are often limited by their inability to detect small or early-stage tumors accurately.<sup><xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref></sup> Over the last few years, particularly with deep learning (DL), image analysis by means of artificial intelligence (AI) has shown very favorable performance as a routine radiological assessment aid. Systems powered by AI may be able to reliably and repeatedly perform important feature identification, fine texture assessment, volumetric quantification, and risk classification that is not immediately visible to the human eye. The new developments will assist in many important activities, such as early detection of tumors and localization of lesions. This review assesses the performance of these techniques, outlining their respective strengths and limitations. Distinct from earlier reviews, this paper systematically classifies AI models based on their functional approaches, such as segmentation, classification, or hybrid techniques, while also emphasizing the datasets employed, evaluation metrics applied, and their practical relevance to clinical settings. Furthermore, the study proposes future research avenues that will help to bridge the divergence between technological innovation and clinical applications.</p>
<p>The primary objective of this study is to assess the current state of AI models for pancreatic cancer diagnosis and to compare different AI frameworks and techniques. It also identifies key challenges and opens up new avenues for future research. It highlights a few essential gaps that need to be filled for the appropriate design and implementation of innovative AI-based solutions in the clinical setting.</p>
<p>The organization of the remaining paper is as follows. Specifically, the section &#x201C;Pancreas Tumors in the Human Body&#x201D; outlines pancreatic tumor in the human body. &#x201C;Methods&#x201D; section presents the selected studies and summarizes their characteristics. The &#x201C;Results&#x201D; section reports the results of the systematic analysis. Finally, there is a comparative analysis of AI models used in the section &#x201C;Comparative Analysis of AI Techniques&#x201D; . The section &#x201C;Challenges&#x201D; discusses the challenges and limitations associated with current diagnostic approaches. Sections &#x201C;Future Directions and Scope&#x201D; and &#x201C;Conclusions&#x201D; outline the future research directions and conclude the study.</p>
</sec>
<sec id="sec002">
<title>Pancreas Tumors in the Human Body</title>
<p>Tumors are masses of abnormal pancreatic cells that proliferate uncontrollably and disrupt normal tissue function, leading to malignant transformation and pancreatic cancer. The most common type is ductal adenocarcinoma. In this type, the cells lining the ducts of the pancreas are involved.<sup><xref ref-type="bibr" rid="ref7">7</xref></sup> Patients with pancreatic cancer may experience symptoms such as abdominal pain, decrease in appetite, reduced energy levels, weight loss, and jaundice. Unfortunately, the disease is often detected after it has metastasized.<sup><xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref7">7</xref></sup></p>
<sec id="sec002-1">
<title>Exocrine Pancreatic Cancer</title>
<p>The pancreas is an elongated retroperitoneal gland located in the upper abdomen, extending from the duodenal loop to the splenic hilum, and anatomically divided into the head, neck, body, and tail. Its exocrine component constitutes the majority of pancreatic tissue and is responsible for the production and secretion of digestive enzymes into the pancreatic ducts, facilitating nutrient breakdown and absorption in the duodenum. Exocrine pancreatic cancer arises from the epithelial cells involved in enzyme production and ductal transport. The exocrine portion comprises acinar cells and ductal structures, both of which contribute to digestive enzyme synthesis and secretion. The most common histological subtype is pancreatic ductal adenocarcinoma (PDAC), which originates from the epithelial lining of the pancreatic ducts. PDAC is highly aggressive and frequently diagnosed at advanced stages due to the nonspecific or subtle nature of early clinical symptoms. These tumors demonstrate a strong propensity for early metastasis, particularly to the liver, lungs, and peritoneal cavity.<sup><xref ref-type="bibr" rid="ref8">8</xref></sup> </p>
</sec>
<sec id="sec002-2">
<title>Endocrine Pancreatic Cancer</title>
<p>The endocrine pancreas is the part of the pancreas made of cells that make hormones. Moreover, these cells are arranged in &#x201C;islets of Langerhans.&#x201D; These are the isolated groups or islets of cells in the pancreas. Endocrine cells secrete hormones into the bloodstream. Insulin and glucagon are among them, which control glucose metabolism and overall metabolism.<sup><xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref9">9</xref></sup> Daher et al., 2024;</p>
<p>Endocrine pancreatic tumors are derived from the islet cells of the pancreas. Pancreatic neuroendocrine tumors are another name for them. The illness of the pancreas&#x2019;s endocrine function is rare compared to the exocrine tumors. Certain tumors produce hormones in excess and are associated with a clinical syndrome. Other tumors may be non-functional, which may be asymptomatic over a long time. Prognosis and treatment options vary significantly; certain tumors may be indolent and possibly asymptomatic.<sup><xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref10">10</xref></sup></p>
</sec>
</sec>
<sec id="sec003">
<title>Methods</title>
<sec id="sec003-1">
<title>Study Selection Process</title>
<p>In accordance with PRISMA 2020 guidelines, a systematic literature search was executed in four electronic databases: IEEE Xplore, PubMed, Scopus, and Web of Science to find relevant studies. Initially, there were 236 records identified for inclusion, of which 41 duplicate or clearly irrelevant records were removed before screening, and 195 records were screened based on title and abstract. A total of 78 records were excluded as they were either not related to AI-based pancreatic cancer diagnosis or did not involve CT imaging. The remaining 117 reports were sought for full-text retrieval, of which full text for 36 articles was not available as access was restricted. An eligibility assessment was done for 81 full-text articles, of which 48 investigations were excluded according to the predefined exclusion criteria. Ultimately, 33 studies were found to have met all eligibility criteria and were thus included in qualitative synthesis.</p>
<fig id="F1" position="float">
<object-id pub-id-type="doi">10.70389/journal.pjs.100268.g001</object-id>
<label>Fig 1 |</label>
<caption><title>Number of included papers per year</title></caption>
<graphic xlink:href="pjs-25-1533-Figure-1.jpg" mime-subtype="jpg"/>
</fig>
<p>As illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref>, a significant upward trend is observed, indicating an increasing number of publications over time. These studies were shortlisted based on their title, abstract, and full text. The study followed the PRISMA 2020 flow diagram in <xref ref-type="fig" rid="F2">Figure 2</xref>, which outlines the identification, screening, eligibility assessment, and inclusion process.<sup><xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref></sup> The review protocol was not prospectively registered. Full-text articles of potentially eligible studies were independently assessed by two reviewers against the predefined inclusion criteria.</p>
<fig id="F2" position="float">
<object-id pub-id-type="doi">10.70389/journal.pjs.100268.g002</object-id>
<label>Fig 2 |</label>
<caption><title>Flow chart of the study selection process according to the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA).<sup><xref ref-type="bibr" rid="ref12">12</xref></sup></title></caption>
<graphic xlink:href="pjs-25-1533-Figure-2.jpg" mime-subtype="jpg"/>
</fig>
</sec>
<sec id="sec003-2">
<title>Search String</title>
<p>A systematic review of the literature identified studies of AI-based pancreatic cancer diagnosis with CT imaging. A core search strategy was created and modified for each database to conform to specific syntax and indexing rules. All searches were conducted manually on 15 July 2025. No automation tools were used in any of the searches. All databases were consistently filtered for language (English) and publication year (2018&#x2013;2025). <xref ref-type="table" rid="T1">Table 1</xref> summarizes the search strategy and the filters applied for each database.</p>
<table-wrap id="T1">
<label>Table 1 |</label>
<caption><title>Database-specific Search String and Filters</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="left"><bold>Database</bold></th>
<th valign="top" align="left"><bold>Search Date</bold></th>
<th valign="top" align="left"><bold>Search String</bold></th>
<th valign="top" align="left"><bold>Filters Applied</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><p>PubMed</p></td>
<td valign="top" align="left"><p>15 July 2025</p></td>
<td valign="top" align="left"><p>((&#x201C;Pancreatic Cancer&#x201D; OR &#x201C;Pancreatic Ductal Adenocarcinoma&#x201D; OR &#x201C;PDAC&#x201D;)</p>
<p>AND</p>
<p>(&#x201C;deep learning&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;artificial intelligence&#x201D;)</p>
<p>AND</p>
<p>(&#x201C;CT scan&#x201D; OR &#x201C;computed tomography&#x201D;))</p>
<p> </p></td>
<td valign="top" align="left"><p>Language = English; Publication Years = 2018&#x2013;2025</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>Scopus</p></td>
<td valign="top" align="left"><p>15 July 2025</p></td>
<td valign="top" align="left"><p>(TITLE-ABS-KEY (&#x201C;Pancreatic Cancer&#x201D; OR &#x201C;Pancreatic Ductal Adenocarcinoma&#x201D; OR &#x201C;PDAC&#x201D;)</p>
<p>AND</p>
<p>TITLE-ABS-KEY (&#x201C;deep learning&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;artificial intelligence&#x201D;)</p>
<p>AND</p>
<p>TITLE-ABS-KEY(&#x201C;CT scan&#x201D; OR &#x201C;computed tomography&#x201D;))</p></td>
<td valign="top" align="left"><p>Language = English; publication years = 2018&#x2013;2025</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>Web of Science</p></td>
<td valign="top" align="left"><p>15 July 2025</p></td>
<td valign="top" align="left"><p>TS= (&#x201C;Pancreatic Cancer&#x201D; OR &#x201C;Pancreatic Ductal Adenocarcinoma&#x201D; OR &#x201C;PDAC&#x201D;)</p>
<p>AND</p>
<p>TS= (&#x201C;CT scan&#x201D; OR &#x201C;computed tomography&#x201D;)</p>
<p>AND</p>
<p>TS= (&#x201C;deep learning&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;artificial intelligence&#x201D;)</p></td>
<td valign="top" align="left"><p>Language = English; publication years = 2018&#x2013;2025; source type = Article</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>IEEE Xplore</p></td>
<td valign="top" align="left"><p>15 July 2025</p></td>
<td valign="top" align="left"><p>(&#x201C;Pancreatic Cancer&#x201D; OR &#x201C;Pancreatic Ductal Adenocarcinoma&#x201D; OR &#x201C;PDAC&#x201D;)</p>
<p>AND</p>
<p>(&#x201C;CT scan&#x201D; OR &#x201C;computed tomography&#x201D;)</p>
<p>AND</p>
<p>(&#x201C;deep learning&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;artificial intelligence&#x201D;)</p></td>
<td valign="top" align="left"><p>Language = English; publication years = 2018&#x2013;2025; Source type = Journal and Conference</p>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec003-3">
<title>Inclusion Criteria</title>
<p>Studies were included in the systematic review if they met all of the following criteria:</p>
<p>(a) The study was published in peer-reviewed journals or presented at reputable international conferences.</p>
<p>(b) The article was written in the English Language.</p>
<p>(c) The study was published between January 2018 and July 2025, reflecting recent advances in AI.</p>
<p>(d) The title or abstract explicitly contained keywords related to pancreatic cancer, PDAC and AI or DL.</p>
<p>(e) The study focused on AI-based diagnosis, detection, segmentation, or classification of pancreatic cancer.</p>
<p>(f) The imaging modality used was CT only.</p>
<p>(g) The study reported original experimental results, including model architecture, dataset characteristics, and performance metrics.</p>
</sec>
<sec id="sec003-4">
<title>Exclusion Criteria</title>
<p>Studies were excluded if any of the following conditions applied:</p>
<p>(a) The article was published in a language other than English.</p>
<p>(b) The study was published before 2018.</p>
<p>(c) Full-text availability was unavailable.</p>
<p>(d) The study did not involve pancreatic cancer or PDAC.</p>
<p>(e) The study relied exclusively on non-CT imaging modalities, which were excluded (e.g., Magnetic Resonance Imaging (MRI), Positron Emission Tomography (PET) or Endoscopic Ultrasound (EUS)).</p>
<p>(f) The study focused on biopsy-based, genomic, histopathological, or non-imaging approaches.</p>
<p>(g) The study did not employ machine learning (ML) or deep learning (DL) techniques.</p>
<p>(h) Review articles, editorials, letters, abstracts without full papers, and non-experimental studies were excluded.</p>
</sec>
<sec id="sec003-5">
<title>Risk of Bias Assessment Using QUADAS-2</title>
<p>An assessment of diagnostic accuracy studies was conducted using the QUADAS-2 tool in terms of methodological quality and risk of bias. The authors performed descriptive comparisons across included studies for the risk of bias of the four QUADAS-2 domains: patient selection, index test, reference standard, and flow and timing. Some risk of bias was identified across domains, and concerns regarding applicability were noted for the first three domains. The level of included studies was assessed using QUADAS-2. Results of this assessment are presented with descriptive summaries. The outcome of this evaluation assisted in the interpretation of reported findings.<sup><xref ref-type="bibr" rid="ref13">13</xref></sup></p>
</sec>
<sec id="sec003-6">
<title>PROBAST Assessment for Prediction Models</title>
<p>Prediction and classification studies were evaluated using the Prediction Model Risk of Bias Assessment Tool (PROBAST). The assessment covered four domains: participants, predictors, outcome, and analysis. Given the heterogeneity of included studies, PROBAST was applied descriptively at the review level to identify common sources of bias rather than to generate pooled scores. The findings were used to contextualize the reliability and generalizability of reported model performance.<sup><xref ref-type="bibr" rid="ref14">14</xref></sup></p>
</sec>
<sec id="sec003-7">
<title>Radiomics Quality Score Assessment</title>
<p>The radiomics quality score (RQS) was applied only to radiomics-based studies to assess their methodological rigor and reproducibility. Assessment was done on important factors like quality of imaging protocol, stability of radiomics features, validation strategy, demonstration of clinical utility, and transparency of the protocol. The overall methodological quality was qualitatively interpreted based on RQS criteria.<sup><xref ref-type="bibr" rid="ref15">15</xref></sup></p>
</sec>
<sec id="sec003-8">
<title>Data Extraction and Synthesis</title>
<p>Data were extracted independently from each included study using a predefined structured format. The variables extracted included year of publication, dataset features (sample size, source, type), imaging details, model type (ML, DL, hybrid), segmentation/classification, performance metrics (accuracy, sensitivity, specificity, area under curve (AUC), dice similarity coefficient (DSC), f1-score), and validation strategy.<sup><xref ref-type="bibr" rid="ref16">16</xref></sup> Due to this methodological heterogeneity in studies (e.g., datasets, model architectures, evaluation metrics), a meta-analysis of results was neither possible nor desirable. The review synthesized studies in tabular form, developed for this review.</p>
</sec>
</sec>
<sec id="sec004">
<title>Results</title>
<p>This paper surveys a total of 33 research studies from 2018 to 2025, focusing on PDAC diagnosis. In this section, we describe various AI-Driven Segmentation models for tumor localization, DL models for tumor classification, radiomics-based approaches using CT imaging, and early detection models. The performance of each category was evaluated using standard metrics such as DSC, Sensitivity, Specificity and AUC for clinical utility and feasibility of deployment.</p>
<sec id="sec004-1">
<title>AI-Driven Segmentation for Pancreas Tumor Localization</title>
<p>This review examined how linear self-attention was integrated with the nnU-net. Using the MSD dataset, a DSC of 88.3% was achieved. The performance of the ADAU-Net was assessed as well. A DSC of 83.76% was achieved on the NIH dataset.<sup><xref ref-type="bibr" rid="ref17">17</xref></sup> Deep Q-networks<sup><xref ref-type="bibr" rid="ref18">18</xref></sup> combined with U-Net architectures achieved a DSC of 86.93% on the NIH pancreas segmentation dataset.<sup><xref ref-type="bibr" rid="ref10">10</xref></sup> An average DSC of 0.70% was achieved for pancreatic subregion segmentation.</p>
</sec>
<sec id="sec004-2">
<title>Tumor Classification Using Deep Learning Models</title>
<p>Hybrid architectures such as the Mutual Information Minimization and Cross-Modal Fusion Network (MIM-CMFNet) framework, which combined mutual information minimization and cross-model fusion, achieved a dice coefficient of 73.14%.<sup><xref ref-type="bibr" rid="ref19">19</xref></sup> . A CAD tool integrating five Convolutional Neural Network (CNN) classifiers<sup><xref ref-type="bibr" rid="ref20">20</xref></sup> reported 89.9% sensitivity and 95.9% specificity for detecting small pancreatic malignancies (&#x003C;2 cm), demonstrating the strength of ensemble learning for identifying minute and otherwise easily missed tumors. Despite high classification metrics, the models often struggle with imbalanced datasets and generalizability across different imaging centers, pointing to the need for larger and more diverse training cohorts.</p>
</sec>
<sec id="sec004-3">
<title>Radiomics in Pancreatic Cancer Diagnosis</title>
<p>Radiomics-based studies extracted quantitative imaging features (QIFs) from pre-diagnostic CT scans, using neighborhood component analysis (NCA) and principal component analysis (PCA), achieving 94%&#x2013;95% accuracy for tumor detection.<sup><xref ref-type="bibr" rid="ref21">21</xref></sup> Radiomics-based analysis of texture, shape and volumetric features has effectively utilized in tumor heterogeneity assessment, as seen in hybrid architectures like DSD-ASPP-Net<sup><xref ref-type="bibr" rid="ref22">22</xref></sup> , achieving a DSC of 91.64% on the local hospital dataset, suggesting that texture and spatial information can significantly enhance both segmentation and classification performance. However, reproducibility remains a concern due to differences in imaging protocols, scanner types and feature extraction pipelines, emphasizing the need for standardization prior to the clinical translation.</p>
</sec>
<sec id="sec004-4">
<title>Early Detection Models</title>
<p>The large-scale pancreatic detection model addressed the challenge of identifying small and isodense tumors using non-contrast CT scans, achieving an AUC of 0.986 to 0.996.<sup><xref ref-type="bibr" rid="ref23">23</xref></sup> Such high diagnostic accuracy at early stages of disease progression suggests that AI models can significantly enhance the likelihood of successful therapeutic interventions. Nevertheless, the reliance on subtle imaging features demands highly sensitive models, and the generalizability of these early detection models needs further validation across multicenter datasets and diverse demographic representation.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup></p>
</sec>
<sec id="sec004-5">
<title>Reporting Quality Assessment (CLAIM 2024)</title>
<p>Assessment using the CLAIM 2024 checklist revealed sub-optimal adherence to several essential reporting standards among the 33 included studies. External validation was explicitly reported in only 5 of 33 studies (15.2%), while calibration analyses were described in 2 studies (6.1%). Measures of statistical uncertainty, such as confidence intervals or variance estimates for performance metrics, were provided in 8 studies (24.2%). Public availability of source code, trained model weights, or reproducible pipelines was reported in 3 studies (9.1%). Robustness analyses, including evaluation under dataset shift conditions such as multicenter or cross-institutional validation, were performed in only 4 studies (12.1%). Collectively, these findings highlight significant limitations in transparency, reproducibility, and clinical generalizability across current CT-based AI research for pancreatic cancer detection.</p>
</sec>
<sec id="sec004-6">
<title>Stratified Performance by Validation Type</title>
<p>Only a small subset of studies was externally validated (<italic>n </italic>= 5). Performance trends on external validation datasets were heterogeneous, rather than displaying a clear pattern of inferior performance. According to Javed et al.<sup><xref ref-type="bibr" rid="ref17">17</xref></sup>, a minor decrease in classification accuracy was noted for internal (93%) and external (89.3%) datasets.<sup><xref ref-type="bibr" rid="ref25">25</xref></sup> The public cohort from Chen et al.<sup><xref ref-type="bibr" rid="ref21">21</xref></sup> was relatively stable compared to a large institutional cohort.<sup><xref ref-type="bibr" rid="ref20">20</xref></sup> Cao et al.<sup><xref ref-type="bibr" rid="ref23">23</xref></sup> reported highly consistent AUC values (0.986&#x2013;0.996) on large multi-institutional validation datasets.Click or tap here to enter text. According to Ramaekers et al.<sup><xref ref-type="bibr" rid="ref26">26</xref></sup> , a powerful high AUC on an external dataset (0.99) showed reduced specificity. According to experts, studies involving external evaluation showed classification accuracy 89.3%&#x2013;93%, AUC 0.81&#x2013;0.99, and segmentation DSC 0.64&#x2013;0.86.<sup><xref ref-type="bibr" rid="ref27">27</xref></sup></p>
</sec>
</sec>
<sec id="sec005">
<title>Comparative Analysis of AI Techniques</title>
<p>The area of pancreatic cancer diagnosis using AI and advanced CT imaging has evolved rapidly in recent years. A multitude of research organizations have contributed significantly to this field. <xref ref-type="table" rid="T2">Tables 2</xref> and <xref ref-type="table" rid="T3">3</xref> summarize studies published between 2018 and 2025, focusing on detection, segmentation, and classification of pancreatic cancer using DL and ML techniques.</p>
<table-wrap id="T2">
<label>Table 2 |</label>
<caption><title>Overview of CT-based machine learning and deep learning studies for pancreatic tumor segmentation</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="left"><p><bold>Study/</bold><bold>Year</bold></p></th>
<th valign="top" align="left"><p><bold>Dataset Source</bold></p></th>
<th valign="top" align="left"><p><bold>Patient (<italic>n</italic>)</bold></p></th>
<th valign="top" align="left"><p><bold>CT scans (<italic>n</italic>)</bold></p></th>
<th valign="top" align="left"><p><bold>Ground Truth</bold></p></th>
<th valign="top" align="left"><p><bold>Validation Type</bold></p></th>
<th valign="top" align="left"><p><bold>Model and Architecture</bold></p></th>
<th valign="top" align="left"><p><bold>Primary Metric (DSC)</bold></p></th>
<th valign="top" align="left"><p><bold>95% CI</bold></p></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><p>(Qiu et al., 2024)<sup><xref ref-type="bibr" rid="ref28">28</xref></sup></p></td>
<td valign="top" align="left"><p>MSD + NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>2537 (MSD), 82 (NIH)</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Cross-dataset validation</p></td>
<td valign="top" align="left"><p>DL cascading model</p></td>
<td valign="top" align="left"><p>59.24% (MSD), 87.63% (NIH)</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Amiri et al., 2024)<sup><xref ref-type="bibr" rid="ref29">29</xref></sup></p></td>
<td valign="top" align="left"><p>Pancreas sub-region + Duct dataset</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82 +37</p></td>
<td valign="top" align="left"><p>Single radiologist manual annotation</p></td>
<td valign="top" align="left"><p>Internal dataset validation</p></td>
<td valign="top" align="left"><p>Reinforcement learning-based anatomical maps for pancreas sub-region and duct segmentation</p></td>
<td valign="top" align="left"><p>70%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Mukherjee et al., 2023)<sup><xref ref-type="bibr" rid="ref27">27</xref></sup></p></td>
<td valign="top" align="left"><p>TCIA + MSD</p></td>
<td valign="top" align="left"><p>41 (TCIA), 152 (MSD)</p></td>
<td valign="top" align="left"><p>41 + 152 Volumes</p></td>
<td valign="top" align="left"><p>Public dataset expert annotation</p></td>
<td valign="top" align="left"><p>Public cross-dataset validation</p></td>
<td valign="top" align="left"><p>Bounding-box-based 3D convolutional neural network (CNN)</p></td>
<td valign="top" align="left"><p> 84.0 &#x00B1; 8% (TCIA),</p>
<p>82 &#x00B1; 6 % (MSD)</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Mahmoudi et al., 2022)<sup><xref ref-type="bibr" rid="ref30">30</xref></sup></p></td>
<td valign="top" align="left"><p>MSD + University Hospital</p></td>
<td valign="top" align="left"><p>138 (MSD) + 19 (Hospital)</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Histopathology + Radiologist annotation</p></td>
<td valign="top" align="left"><p>External institutional validation</p></td>
<td valign="top" align="left"><p>Hybrid model that combines Attention U-Net and Texture Attention U-Net (TAU-Net)</p></td>
<td valign="top" align="left"><p>72.7% (Pancreas), 60.6%. (PDAC)</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(M. Li et al., 2022)<sup><xref ref-type="bibr" rid="ref31">31</xref></sup> (AX-Unet)</p></td>
<td valign="top" align="left"><p>NIH + MSD</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82 (NIH), NR MSD</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Cross-dataset validation</p></td>
<td valign="top" align="left"><p>AX-Unet</p></td>
<td valign="top" align="left"><p>87.7 &#x00B1; 3.8% (NIH), 85.9 &#x00B1; 5.1% (MSD)</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(M. Li et al., 2022)<sup><xref ref-type="bibr" rid="ref31">31</xref></sup> (ADAU-Net)</p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Attention-guided Duplex Adversarial U-Net (ADAU-Net)</p></td>
<td valign="top" align="left"><p>83.76%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(M. Li et al., 2021)<sup><xref ref-type="bibr" rid="ref32">32</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Multi-level pyramidal pooling residual U-Net integrated with an adversarial mechanism</p></td>
<td valign="top" align="left"><p>81.36%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Wang et al., 2021)<sup><xref ref-type="bibr" rid="ref33">33</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>View Adaptive 3D U-Net (VA-3DUNet)</p></td>
<td valign="top" align="left"><p>86.19%.</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Tian et al., 2021)<sup><xref ref-type="bibr" rid="ref34">34</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Markov Chain Monte Carlo (MCMC) guided convolutional neural network (CNN) approach</p></td>
<td valign="top" align="left"><p>78.13%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Hu et al., 2021)<sup><xref ref-type="bibr" rid="ref22">22</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>DenseASPP model that learns the pancreas location and probability map</p></td>
<td valign="top" align="left"><p>67.19% - 91.64%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(W. Li et al., 2021)<sup><xref ref-type="bibr" rid="ref24">24</xref></sup></p></td>
<td valign="top" align="left"><p>NIH + MSD</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82 + 281</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Cross-dataset validation</p></td>
<td valign="top" align="left"><p>MAD-Unet</p></td>
<td valign="top" align="left"><p>88.52%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Xue et al., 2021)<sup><xref ref-type="bibr" rid="ref35">35</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Cascaded multitask 3-D fully convolutional network (FCN)</p></td>
<td valign="top" align="left"><p>86.4%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Z. Chen et al., 2020)<sup><xref ref-type="bibr" rid="ref36">36</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Multi-scale feature fusion (MsFF) model</p></td>
<td valign="top" align="left"><p>87.26%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Boers et al., 2020)<sup><xref ref-type="bibr" rid="ref37">37</xref></sup></p></td>
<td valign="top" align="left"><p>Internal dataset</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>100</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>iUnet - interactive version of the U-net architecture</p></td>
<td valign="top" align="left"><p>78%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Liu et al., 2020)<sup><xref ref-type="bibr" rid="ref38">38</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Ensemble model that combines five different CNNs based on the U-Net architecture</p></td>
<td valign="top" align="left"><p> 84.10 &#x00B1; 4.91%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Man et al., 2019)<sup><xref ref-type="bibr" rid="ref18">18</xref></sup></p></td>
<td valign="top" align="left"><p>NIH</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>82</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Deep Q Network (DQN) for localization and a deformable U-Net for segmentation</p></td>
<td valign="top" align="left"><p>86.93 &#x00B1; 4.92%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T2">Table 2</xref> highlights that encoder&#x2013;decoder frameworks based on U-Net architectures are used in the majority of segmentation studies. U-Net-like architectures preserve spatial hierarchies through skip connections, which are particularly beneficial for anatomical structures with irregular boundaries. Boers et al.<sup><xref ref-type="bibr" rid="ref37">37</xref></sup> developed an interactive 3D U-Net with the capability of reducing the slice-wise inconsistencies and user re-annotating. Using volumetric convolutions and the Adam optimizer to study a private cohort of CT scans, DSC = 78%. The subsequent studies mainly focused on enhancing the representation of features through salience awareness and multi-scale refinement. Hu et al.<sup><xref ref-type="bibr" rid="ref22">22</xref></sup> proposed a framework based on DenseASPP to iteratively refine dissimilarity between the region of interest and the background. The DSC value on the NIH dataset ranges from 67.19% to 91.64%. Z. Chen et al.<sup><xref ref-type="bibr" rid="ref36">36</xref></sup> and W. Li et al.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup> employed the methods of multi-scale feature fusion strategies. More recently, studies were conducted with emphasis on cross-dataset generalization and architectural sophistication. In this regard, Mahmoudi et al.<sup><xref ref-type="bibr" rid="ref30">30</xref></sup> combined the outputs of Attention U-Net and Texture Attention U-Net (TAU-Net) for the pancreas and PDAC mass regions, respectively. They initially identified the performance gap related to organ and tumor segmentation. The reported mean DSC was approximately 0.64 for PDAC mass segmentation, highlighting the persistent performance gap between organ and tumor segmentation. The study by Amiri et al.<sup><xref ref-type="bibr" rid="ref29">29</xref></sup> was proposed to extend the previously mentioned segmentation to pancreatic subregions and pancreatic ducts. Reinforcement learning-based anatomical navigation proposed by Amiri et al.<sup><xref ref-type="bibr" rid="ref29">29</xref></sup>, further extended the segmentation to pancreatic subregions and ducts, achieving a mean DSC of 0.70. Large-scale evaluations across Medical Segmentation Decathlon (MSD), The Cancer Imaging Archive (TCIA), and National Institute of Health (NIH) datasets by Mukherjee et al.<sup><xref ref-type="bibr" rid="ref27">27</xref></sup> and Yang et al.<sup><xref ref-type="bibr" rid="ref39">39</xref></sup> demonstrated that segmentation performance remains highly dataset-dependent, underscoring challenges related to domain shift and annotation variability.</p>
<table-wrap id="T3">
<label>Table 3 |</label>
<caption><title>Overview of CT-based machine learning and deep learning studies for pancreatic cancer classification</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="left"><p><bold>Study/Year</bold></p></th>
<th valign="top" align="left"><p><bold>Patients (<italic>n</italic>)</bold></p></th>
<th valign="top" align="left"><p><bold>CT scans (<italic>n</italic>)</bold></p></th>
<th valign="top" align="left"><p><bold>Dataset and N</bold></p></th>
<th valign="top" align="left"><p><bold>Ground Truth</bold></p></th>
<th valign="top" align="left"><p><bold>Validation Type</bold></p></th>
<th valign="top" align="left"><p><bold>Model /Architecture</bold></p></th>
<th valign="top" align="left"><p><bold>Primary Metrics</bold></p></th>
<th valign="top" align="left"><p><bold>95% CI</bold></p></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><p>(Nadeem et al., 2025)<sup><xref ref-type="bibr" rid="ref6">6</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>&#x201C;Pancreatic CT Images&#x201D; dataset on Kaggle</p></td>
<td valign="top" align="left"><p>Dataset-provided labels</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>AlexNet</p></td>
<td valign="top" align="left"><p>Acc 98.72%,</p>
<p>AUC 0.9979</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Alaca andAkme&#x015F;e, 2025)<sup><xref ref-type="bibr" rid="ref5">5</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>&#x201C;Pancreatic CT Images&#x201D; dataset on Kaggle</p></td>
<td valign="top" align="left"><p>Dataset-provided labels</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>DenseNet 121 + KNN/ RF/ SVM /Inception V3 hybrids</p></td>
<td valign="top" align="left"><p>Balanced ACC 77.5%&#x2013;92.5% </p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Alaca, 2025)<sup><xref ref-type="bibr" rid="ref7">7</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>&#x201C;Pancreatic CT Images&#x201D; dataset on Kaggle</p></td>
<td valign="top" align="left"><p>Dataset-provided labels</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>DARTS-MobileViT</p></td>
<td valign="top" align="left"><p>Acc 97.33%, F1-score 96.25%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Thanya and Jeslin, 2025)<sup><xref ref-type="bibr" rid="ref40">40</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Institutional CT</p></td>
<td valign="top" align="left"><p>Reference Standard NR</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>DeepOptimalNet</p></td>
<td valign="top" align="left"><p>Acc 98.78% &#x2013; 99.87%. F1-Score &#x2013; 97%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(J. Li et al., 2025)<sup><xref ref-type="bibr" rid="ref8">8</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Huaihe Hospital, China</p></td>
<td valign="top" align="left"><p>Reference standard NR</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>RPMSNet</p></td>
<td valign="top" align="left"><p>Acc 73.67%, Sensitivity 71.54%, precision 76.78%, F1-score 72.61%, AUC 81.03%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Mahendran et al., 2025)<sup><xref ref-type="bibr" rid="ref41">41</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Kaggle dataset Pancreatic CT Images</p></td>
<td valign="top" align="left"><p>Dataset-provided labels (NR)</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>Transformer-based PancreasNet</p></td>
<td valign="top" align="left"><p>Acc 92.4%, Specificity 90.7%, Recall 93.1%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Babaei et al., 2024)<sup><xref ref-type="bibr" rid="ref42">42</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>281</p></td>
<td valign="top" align="left"><p>MSD</p></td>
<td valign="top" align="left"><p>Public dataset annotation</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>Denoising Diffusion models (DDPMs) anomaly detection method</p></td>
<td valign="top" align="left"><p>Acc 81.6%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Mitrea et al., 2024)<sup><xref ref-type="bibr" rid="ref43">43</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Institutional CT</p></td>
<td valign="top" align="left"><p>Reference standard NR</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>Hybrid neural recognition pipeline</p></td>
<td valign="top" align="left"><p>Acc 98%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Ramaekers et al., 2024)<sup><xref ref-type="bibr" rid="ref26">26</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Internal dataset (The Netherlands), MSD dataset (USA)</p></td>
<td valign="top" align="left"><p>Radiologist + Pathology confirmation</p></td>
<td valign="top" align="left"><p>Public cross-dataset validation</p></td>
<td valign="top" align="left"><p>3D U-Net (DL Model)</p></td>
<td valign="top" align="left"><p>AUC 0.99, Sensitivity 1.00, Specificity 0.86</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(W. Chen et al., 2023)<sup><xref ref-type="bibr" rid="ref21">21</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>694</p></td>
<td valign="top" align="left"><p>Internal Dataset</p></td>
<td valign="top" align="left"><p>Reference standard NR</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>Quantitative imaging features (QIFs) extracted using NCA and PCA</p></td>
<td valign="top" align="left"><p>Acc 94%&#x2013;95%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(P. T. Chen et al., 2023)<sup><xref ref-type="bibr" rid="ref20">20</xref></sup></p></td>
<td valign="top" align="left"><p>546 (NTUH)</p></td>
<td valign="top" align="left"><p>281 (MSD), 82 (TCIA), 30 (Synapse)</p></td>
<td valign="top" align="left"><p>National Taiwan University Hospital + MSD (USA) + TCIA (USA) + Synapse (China)</p></td>
<td valign="top" align="left"><p>Pathology-confirmed PDAC</p></td>
<td valign="top" align="left"><p>Multi-center validation</p></td>
<td valign="top" align="left"><p>5CNN CAD</p></td>
<td valign="top" align="left"><p>Sensitivity 89.9%, Specificity 95.9%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Cao et al., 2023b)<sup><xref ref-type="bibr" rid="ref44">44</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>3208</p></td>
<td valign="top" align="left"><p>Shanghai Institution of Pancreatic Diseases (SIPD) Dataset</p></td>
<td valign="top" align="left"><p>Pathology-confirmed PDAC</p></td>
<td valign="top" align="left"><p>Multi-institutional validation</p></td>
<td valign="top" align="left"><p>PANDA (DL)</p></td>
<td valign="top" align="left"><p>AUC 0.986 - 0.996.</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Shi et al., 2023)<sup><xref ref-type="bibr" rid="ref45">45</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>71 (UNMC), 103 (MSD), 80 (TCIA)</p></td>
<td valign="top" align="left"><p>University of Nebraska Medical Centre (USA), MSD (USA), TCIA (USA)</p></td>
<td valign="top" align="left"><p>Reference standard NR</p></td>
<td valign="top" align="left"><p>Cross-dataset validation</p></td>
<td valign="top" align="left"><p>3DGAUnet + GAN classifier</p></td>
<td valign="top" align="left"><p>Not stated</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Javed et al., 2022)<sup><xref ref-type="bibr" rid="ref17">17</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>58 + 42</p></td>
<td valign="top" align="left"><p>Internal + External Dataset (USA)</p></td>
<td valign="top" align="left"><p>Histopathology-confirmed PDAC</p></td>
<td valign="top" align="left"><p>External independent validation</p></td>
<td valign="top" align="left"><p>Subregional risk prediction model</p></td>
<td valign="top" align="left"><p>Acc &#x2013; 93% (Int), 89.3% (Ext)</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Zhang et al., 2020)<sup><xref ref-type="bibr" rid="ref46">46</xref></sup></p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>2890</p></td>
<td valign="top" align="left"><p>Affiliated Hospital of Qingdao University (China)</p></td>
<td valign="top" align="left"><p>Reference standard NR</p></td>
<td valign="top" align="left"><p>Internal validation</p></td>
<td valign="top" align="left"><p>ResNet-101, Augmented Feature Pyramid Networks, Self-adaptive Feature Fusion and Dependencies Computation Module</p></td>
<td valign="top" align="left"><p>Acc &#x2013; 90.18%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Choi et al., 2020)<sup><xref ref-type="bibr" rid="ref47">47</xref></sup></p></td>
<td valign="top" align="left"><p>183</p></td>
<td valign="top" align="left"><p>NR</p></td>
<td valign="top" align="left"><p>Seoul St. Mary&#x2019;s Hospital (South Korea)</p></td>
<td valign="top" align="left"><p>Pathology + CA 19-9 levels</p></td>
<td valign="top" align="left"><p>Internal Validation</p></td>
<td valign="top" align="left"><p>Clinical-imaging predictive model</p></td>
<td valign="top" align="left"><p>AUC &#x2013; 0.71</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
<tr>
<td valign="top" align="left"><p>(Chu et al., 2019)<sup><xref ref-type="bibr" rid="ref48">48</xref></sup></p></td>
<td valign="top" align="left"><p>190</p></td>
<td valign="top" align="left"><p>380</p></td>
<td valign="top" align="left"><p>Johns Hopkins University (USA)</p></td>
<td valign="top" align="left"><p>Histopathology confirmed PDAC</p></td>
<td valign="top" align="left"><p>Internal train- validation split (255 train / 125 validation)</p></td>
<td valign="top" align="left"><p>Radiomics + Random forest</p></td>
<td valign="top" align="left"><p>Acc &#x2013;99.2%, Sensitivity &#x2013;100%, Specificity &#x2013;98.5%, AUC &#x2013;99.9%</p></td>
<td valign="top" align="left"><p>CI NR</p></td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T3">Table 3</xref> shows a considerably broader range of model architectures. They include classical ML/DL, transformers and hybrid pipelines. Several CNN-based classifiers were trained on publicly available CT-image datasets hosted on Kaggle. Nadeem et al.<sup><xref ref-type="bibr" rid="ref6">6</xref></sup> proposed a multi-stage pipeline for multi-class pancreatic lesion classification, which involved anisotropic diffusion filtering-based preprocessing, U-Net-based watershed segmentation, and AlexNet classification, and reported very high accuracy and AUC. There has been an increase in hybrid strategies involving transfer learning combined with traditional classifiers. For example, Alaca and Akme&#x015F;e<sup><xref ref-type="bibr" rid="ref5">5</xref></sup> utilized feature extractors DenseNet121 and InceptionV3, followed by nearest neighbors, support vector machines, and random forests. The authors found balanced accuracies of up to 92.5%. In the same way, Alaca<sup><xref ref-type="bibr" rid="ref7">7</xref></sup> used DARTS-optimized MobileViT models. Institutional and clinically curated datasets remain relatively limited in the current literature. However, such studies offer a more realistic estimation of the performance. Javed, Qureshi, Deng, et al.<sup><xref ref-type="bibr" rid="ref17">17</xref></sup> and Mitrea et al.<sup><xref ref-type="bibr" rid="ref43">43</xref></sup> internally externally validated data source cohort study. Although the reported accuracy values were slightly lower in externally validated cohorts, these findings provide more clinically meaningful evidence of generalizability. Prior works in these domains, like Choi et al.<sup><xref ref-type="bibr" rid="ref47">47</xref></sup> and Chu et al.<sup><xref ref-type="bibr" rid="ref48">48</xref></sup>, feature-engineering driven, included texture descriptors, CA19-9 biomarkers, volumetric and radiomics features in their studies. Therefore, conventional ML approaches may still yield competitive results in highly clinically constrained settings with limited data availability. The classification literature shows extensive variability across datasets and validation designs.</p>
<p>Overall, segmentation studies yield relatively stable DSC standardized datasets, such as NIH and MSD. Segmenting a tumor is harder than segmenting a whole organ. Classification studies indicate high accuracy values leveraging publicly available datasets, with high variability in institutional cohorts of patients. This demonstrates that dataset heterogeneity and external validation are, to a greater extent, an obstacle to clinical translation.</p>
</sec>
<sec id="sec006">
<title>Challenges</title>
<p>Despite notable advancements, there remains a significant challenge in detecting very small (&#x003C;1 cm) or isodense tumors, which is crucial for improving patient outcomes. Although reinforcement learning-based anatomical maps utilize attention mechanisms and probability maps to segment pancreas regions, a notable gap remains in research specifically targeting fine-grained segmentation of the pancreatic duct, which is essential for diagnosing conditions such as PDAC.<sup><xref ref-type="bibr" rid="ref11">11</xref></sup> The <italic>3DGAUnet approach </italic>enhances volumetric feature representation and provides more detailed tumor segmentation, addressing the gap in accurate and effective segmentation of PDAC and its subregions. The large-scale pancreatic cancer detection model addresses the challenge of detecting very small or isodense tumors, a significant gap in early stage detection accuracy. The model showed good potential applicability to other non-contrast CT types of early pancreatic cancer.<sup><xref ref-type="bibr" rid="ref48">48</xref></sup></p>
<sec id="sec006-1">
<title>Clinical Translations and Limitations</title>
<p>So far, most of the AI models reviewed show promising performance, but most are not clinically ready. Single-center datasets, particularly retrospective datasets, have limited generalizability due to restricted demographic and scanner diversity. External validation was infrequently performed, increasing vulnerability to domain shift when the models were used on data from different scanners, institutions, or patient groups. A lack of reporting for calibration analyses undermines confidence in probabilistic clinical decision making. Moreover, the regulatory and reporting frameworks, such as TRIPOD-AI, STARD-AI, and DECIDE-AI, were inconsistently followed, and reporting was variable. Future studies should prioritize prospective multicenter validation, calibration analysis, and decision-curve assessment to enhance clinical reliability.</p>
</sec>
<sec id="sec006-2">
<title>Generalizability and Deployment Considerations</title>
<p>In externally validated studies, performance was more heterogeneous, suggesting that generalizability is not necessarily negatively impacted. However, generalizability depends on the diversity of datasets, model training scale, architecture, and the design of validation experiments. The increased size of the institutions in the dataset improved the performance and their consistency with the model. Essentially, models developed with single-institution datasets showed a mild decrease in performance on independent datasets. This finding indicates that multi-center prospective validation sets are necessary for controlling dataset shift and improving clinical reliability.</p>
</sec>
</sec>
<sec id="sec007">
<title>Future Directions and Scope</title>
<p>The previously reviewed literature has indicated the challenges involved in pancreatic cancer detection and segmentation. Numerous studies report similar challenges, including limited data availability, heterogeneous imaging data, generalizability, clinical translation issues, etc. In order for state-of-the-art technologies to become obsolete, new methodologies, multimodal investigations, and more clinically relevant diagnostic models must advance the field. A promising direction involves advanced DL architectures, particularly 3D convolutional networks integrated with attention mechanisms. Recent studies show that attention-based frameworks positively impact the simultaneous pancreas and its subregions and tumors segmentation. To illustrate this concept, PanSegNet was proposed by Zhang et al.<sup><xref ref-type="bibr" rid="ref50">50</xref></sup>, for integrating linear self-attention modules into the encoder&#x2013;decoder of nnU-Net, which gave DSCs over 88% for all pancreatic organs on multi-center CT images of 140 cases from the MSD challenge.<sup><xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref></sup>. Future research should explore hierarchical and multi-level attention, transformer-based encoders, and lightweight attention modules for clinical deployment. There is also an emerging research trend around building hybrid and efficiency-oriented models. Li et al.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup> proposed attention-augmented adversarial U-Nets<bold>Click or tap here to enter text.</bold> and Amiri et al.<sup><xref ref-type="bibr" rid="ref29">29</xref></sup> stated a reinforcement learning-based pancreas anatomical mapping that enables promising accuracy<bold>Click or tap here to enter text.</bold>. Moreover, they found that they can maintain a high accuracy with fewer parameters and less computation, which means the optimized model can run in a real environment with limitations. Going forward, hybrid model development and application on larger datasets, with patient data from multiple institutions, should be explored. Combining the different data types should be an important direction for the future that would allow a holistic disease characterization for early disease prediction and intervention. Once again, a large-scale ensemble detection model with CT, and clinical features by Chen et al.<sup><xref ref-type="bibr" rid="ref20">20</xref></sup> achieved an AUC of 0.95 with good sensitivities.<sup><xref ref-type="bibr" rid="ref8">8</xref></sup> Besides, a radiomics-based early prediction framework predicted pancreatic cancer with an advance of up to 36 months before clinical diagnosis, given by Chen W. et al.<sup><xref ref-type="bibr" rid="ref21">21</xref></sup>. Further research concentrating on effective multimodal fusion methods and related longitudinal modeling can pave the way for personalized risk stratification and early intervention in the future. After this, we need architectural designs for model generalization and interpretability. Above all, developing such strategies depends on the availability of data from large cohorts and variation among institutions. Hence, a critical milestone in a translation pathway is leveraging the federated learning (FL) paradigms. FL provides a setting to carry out multi-institutional model training without private data sharing. Importantly, future research must ensure rigorous clinical validation, explainability, and seamless workflow integration. Though there is a reported high accuracy for some experimental settings, most of these studies are based on retrospective evaluation that lacks prospective validation and an assessment of interpretability. Aligning future research directions explicitly with identified limitations&#x2014;such as poor external validation, limited subregion focus, and absence of decision-support integration&#x2014;may facilitate the translation of AI models into meaningful improvements in pancreatic cancer screening, diagnosis, and patient outcomes.</p>
</sec>
<sec id="sec008">
<title>Conclusion</title>
<p>The recent emergence of AI-assisted detection and characterization targeting pancreatic cancer has become commonplace in modern times. The growing interest in clinical translation represents more than a methodological advancement. According to an analysis of the research carried out, a significant improvement has been achieved in ML and DL architectures. CNN, attention-based methodologies, and hybrid frameworks have demonstrated significant progress. Pancreas segmentation, tumor detection, and early risk prediction represent major advancements in the field. In addition, these approaches have demonstrated performance characteristics in controlled study settings. There also exist performance indicators for the long-standing issues of pancreatic cancer early detection. Early tumor detection remains technically challenging due to class imbalance, subtle imaging features, and anatomical heterogeneity. Moreover, anatomical variability and tumor subtype heterogeneity further complicate model development. These challenges are commonly observed in pancreatic cancer and other oncological conditions. Significant challenges remain for large-scale clinical translation. In multi-institutional studies, prospective challenges include model interpretability, clinical evaluation, etc. Additionally, other challenges include integration into clinical workflows, domain adaptation, data harmonization, and fixed model generalization. We also face a challenge concerning across-protocol generalization and changes in scanners. This includes the use of complex DL architectures, attention and transformer-based mechanisms, compensation for lack of data by means of federated learning and generative modelling, and setting common evaluation protocols across heterogeneous populations and imaging platforms. Most importantly, the use of AI should not be restricted to detection and segmentation. It must also evaluate treatment response, characterize the tumor and predict disease progression. In summary, translating AI technologies successfully could radically change how pancreatic cancer is managed, leading to better outcomes and curbing mortality in patients. Such advancements may contribute to earlier detection, personalized treatment planning, and improved patient survival outcomes.</p>
</sec>
<sec id="sec009">
<title>List of Abbreviations</title>
<def-list>
<def-item><term>AI</term> <def><p>Artificial Intelligence</p></def></def-item>
<def-item><term>AUC</term> <def><p>Area Under Curve</p></def></def-item>
<def-item><term>CNN</term> <def><p>Convolutional Neural Network</p></def></def-item>
<def-item><term>CT</term> <def><p>Computed Tomography</p></def></def-item>
<def-item><term>DL</term> <def><p>Deep Learning</p></def></def-item>
<def-item><term>DSC</term> <def><p>Dice Similarity Coefficient</p></def></def-item>
<def-item><term>EUS</term> <def><p>Endoscopic Ultrasound</p></def></def-item>
<def-item><term>FL</term> <def><p>Federated Learning</p></def></def-item>
<def-item><term>MIM-CMFNet</term> <def><p>Mutual Information Minimization and Cross-Modal Fusion Network</p></def></def-item>
<def-item><term>ML</term> <def><p>Machine Learning</p></def></def-item>
<def-item><term>MRI</term> <def><p>Magnetic Resonance Imaging</p></def></def-item>
<def-item><term>MSD</term> <def><p>Medical Segmentation Decathlon</p></def></def-item>
<def-item><term>NCA</term> <def><p>Neighborhood Component Analysis</p></def></def-item>
<def-item><term>NIH</term> <def><p>National Institute of Health</p></def></def-item>
<def-item><term>PCA</term> <def><p>Principal Component Analysis</p></def></def-item>
<def-item><term>PDAC</term> <def><p>Pancreatic Ductal Adenocarcinoma</p></def></def-item>
<def-item><term>PET</term> <def><p>Positron Emission Tomography </p></def></def-item>
<def-item><term>PROBAST</term> <def><p>Prediction Model Risk of Bias Assessment Tool</p></def></def-item>
<def-item><term>QIFs</term> <def><p>Quantitative Imaging Features</p></def></def-item>
<def-item><term>RQS</term> <def><p>Radiomics Quality Score</p></def></def-item>
<def-item><term>TCIA</term> <def><p>The Cancer Imaging Archive</p></def></def-item>
</def-list>
</sec>
</body>
<back>
<fn-group>
<fn id="n1" fn-type="other">
<p>Additional material is published online only. To view please visit the journal online.</p>
<p><bold>Cite this as:</bold> Sonia Suneja. A Systematic Review on Leveraging Artificial Intelligence for Pancreatic Cancer Diagnosis: REVIEW. Premier Journal of Science 2026;20:100268</p>
<p><bold>DOI:</bold> <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.70389/PJS.100268">https://doi.org/10.70389/PJS.100268</ext-link></p>
</fn>
<fn id="n2" fn-type="other">
<p><bold>Ethical approval</bold></p>
<p>N/a</p>
</fn>
<fn id="n3" fn-type="other">
<p><bold>Consent</bold></p>
<p>N/a</p>
</fn>
<fn id="n4" fn-type="other">
<p><bold>Funding</bold></p>
<p>No industry funding</p>
</fn>
<fn id="n5" fn-type="conflict">
<p><bold>Conflicts of interest</bold></p>
<p>N/a</p>
</fn>
<fn id="n6" fn-type="other">
<p><bold>Author contribution</bold></p>
<p>Sonia Suneja &#x2013; Writing Review &amp; Editing</p>
</fn>
<fn id="n7" fn-type="other">
<p><bold>Guarantor</bold></p>
<p>Dr. Manvinder Sharma</p>
</fn>
<fn id="n8" fn-type="other">
<p><bold>Provenance and peer-review</bold></p>
<p>This article was unsolicited</p>
</fn>
<fn id="n9" fn-type="other">
<p><bold>Data availability statement</bold></p>
<p>N/a</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1</label><mixed-citation publication-type="journal"><string-name><surname>Siegel</surname> <given-names>RL</given-names></string-name>, <string-name><surname>Giaquinto</surname> <given-names>AN</given-names></string-name>, <string-name><surname>Jemal</surname> <given-names>A</given-names></string-name>. <article-title>Cancer statistics 2024</article-title>. <source>CA Cancer J Clin</source>. <year>2024</year>;<volume>74</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3322/caac.21820">https://doi.org/10.3322/caac.21820</ext-link></mixed-citation></ref>
<ref id="ref2"><label>2</label><mixed-citation publication-type="journal"><string-name><surname>Bray</surname> <given-names>F</given-names></string-name>, <string-name><surname>Laversanne</surname> <given-names>M</given-names></string-name>, <string-name><surname>Sung</surname> <given-names>H</given-names></string-name>, <string-name><surname>Ferlay</surname> <given-names>J</given-names></string-name>, <string-name><surname>Siegel</surname> <given-names>RL</given-names></string-name>, <string-name><surname>Soerjomataram</surname> <given-names>I</given-names></string-name>, <etal>et al</etal>. <article-title>Global cancer statistics 2022: GLOBOCAN estimates of incidence and mortality worldwide for 36 cancers in 185 countries</article-title>. <source>CA Cancer J Clin</source>. <year>2024</year>;<volume>74</volume>(<issue>3</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3322/caac.21834">https://doi.org/10.3322/caac.21834</ext-link></mixed-citation></ref>
<ref id="ref3"><label>3</label><mixed-citation publication-type="journal"><string-name><surname>Desai</surname> <given-names>K</given-names></string-name>, <string-name><surname>Baralo</surname> <given-names>B</given-names></string-name>, <string-name><surname>Kulkarni</surname> <given-names>A</given-names></string-name>, <string-name><surname>Keshava</surname> <given-names>VE</given-names></string-name>, <string-name><surname>Iqbal</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ali</surname> <given-names>H</given-names></string-name>, <string-name><surname>Prabhakaran</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Thirumaran</surname> <given-names>R</given-names></string-name>. <article-title>Cancer statistics: The United States vs. worldwide</article-title>; <year>2025</year>.</mixed-citation></ref>
<ref id="ref4"><label>4</label><mixed-citation publication-type="journal"><string-name><surname>Gerasimenko</surname> <given-names>JV</given-names></string-name>, <string-name><surname>Gerasimenko</surname> <given-names>OV</given-names></string-name>. <article-title>The role of Ca<sup>2+</sup> signalling in the pathology of exocrine pancreas</article-title>. <source>Cell Calcium</source>. <year>2023</year>;<volume>112</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ceca.2023.102740">https://doi.org/10.1016/j.ceca.2023.102740</ext-link></mixed-citation></ref>
<ref id="ref5"><label>5</label><mixed-citation publication-type="journal"><string-name><surname>Alaca</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Akme&#x015F;e</surname> <given-names>&#x00D6;F</given-names></string-name>. <article-title>Pancreatic tumor detection from CT images converted to graphs using Whale Optimization and Classification Algorithms with Transfer Learning</article-title>. <source>Int J Imaging Syst Technol</source>. <year>2025</year>;<volume>35</volume>(<issue>2</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/ima.70040">https://doi.org/10.1002/ima.70040</ext-link></mixed-citation></ref>
<ref id="ref6"><label>6</label><mixed-citation publication-type="journal"><string-name><surname>Nadeem</surname> <given-names>A</given-names></string-name>, <string-name><surname>Ashraf</surname> <given-names>R</given-names></string-name>, <string-name><surname>Mahmood</surname> <given-names>T</given-names></string-name>, <string-name><surname>Parveen</surname> <given-names>S</given-names></string-name>. <article-title>Automated CAD system for early detection and classification of pancreatic cancer using deep learning model</article-title>. <source>PLoS ONE</source>. <year>2025</year>;<volume>20</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1371/journal.pone.0307900">https://doi.org/10.1371/journal.pone.0307900</ext-link></mixed-citation></ref>
<ref id="ref7"><label>7</label><mixed-citation publication-type="journal"><string-name><surname>Alaca</surname> <given-names>Y</given-names></string-name>. <article-title>Machine learning via DARTS-optimized MobileViT models for pancreatic cancer diagnosis with graph-based deep learning</article-title>. <source>BMC Med Inform Decis Mak</source>. <year>2025</year>;<volume>25</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12911-025-02923-x">https://doi.org/10.1186/s12911-025-02923-x</ext-link></mixed-citation></ref>
<ref id="ref8"><label>8</label><mixed-citation publication-type="journal"><string-name><surname>Li</surname> <given-names>J</given-names></string-name>, <string-name><surname>Li</surname> <given-names>X</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>B</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>X</given-names></string-name>, <etal>et al</etal>. <article-title>Mesothelin expression prediction in pancreatic cancer based on multimodal stochastic configuration networks</article-title>. <source>Med Biol Eng Comput</source>. <year>2025</year>;<volume>63</volume>(<issue>4</issue>):<fpage>1117</fpage>&#x2013;<lpage>29</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11517-024-03253-2">https://doi.org/10.1007/s11517-024-03253-2</ext-link></mixed-citation></ref>
<ref id="ref9"><label>9</label><mixed-citation publication-type="journal"><string-name><surname>Daher</surname></string-name> <etal>et al</etal>., <year>2024</year></mixed-citation></ref>
<ref id="ref10"><label>10</label><mixed-citation publication-type="journal"><string-name><surname>Jannin</surname> <given-names>A</given-names></string-name>, <string-name><surname>Dessein</surname> <given-names>AF</given-names></string-name>, <string-name><surname>Do Cao</surname> <given-names>C</given-names></string-name>, <string-name><surname>Vantyghem</surname> <given-names>MC</given-names></string-name>, <string-name><surname>Chevalier</surname> <given-names>B</given-names></string-name>, <string-name><surname>Van Seuningen</surname> <given-names>I</given-names></string-name>, <etal>et al</etal>.(<year>2023</year>). <article-title>Metabolism of pancreatic neuroendocrine tumors: what can omics tell us?</article-title> <source>Front Endocrinol</source>. <year>2023</year>;vol.<volume>14</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fendo.2023.1248575">https://doi.org/10.3389/fendo.2023.1248575</ext-link></mixed-citation></ref>
<ref id="ref11"><label>11</label><mixed-citation publication-type="journal"><string-name><surname>Jin</surname> <given-names>D</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>NU</given-names></string-name>, <string-name><surname>Gu</surname> <given-names>W</given-names></string-name>, <string-name><surname>Lei</surname> <given-names>H</given-names></string-name>, <string-name><surname>Goel</surname> <given-names>A</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>T</given-names></string-name>. <article-title>Informatics strategies for early detection and risk mitigation in pancreatic cancer patients</article-title>. <source>Neoplasia (US)</source>. <year>2025</year>;vol.<volume>60</volume>. <publisher-name>Elsevier Inc</publisher-name>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.neo.2025.101129">https://doi.org/10.1016/j.neo.2025.101129</ext-link></mixed-citation></ref>
<ref id="ref12"><label>12</label><mixed-citation publication-type="journal"><string-name><surname>Page</surname> <given-names>MJ</given-names></string-name>, <string-name><surname>McKenzie</surname> <given-names>JE</given-names></string-name>, <string-name><surname>Bossuyt</surname> <given-names>PM</given-names></string-name>, <string-name><surname>Boutron</surname> <given-names>I</given-names></string-name>, <string-name><surname>Hoffmann</surname> <given-names>TC</given-names></string-name>, <string-name><surname>Mulrow</surname> <given-names>CD</given-names></string-name>, <etal>et al</etal>. (<year>2021</year>). <article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title>. <source>BMJ</source> <year>2021</year>;vol.<volume>372</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj.n71">https://doi.org/10.1136/bmj.n71</ext-link></mixed-citation></ref>
<ref id="ref13"><label>13</label><mixed-citation publication-type="journal"><string-name><surname>Wade</surname> <given-names>R</given-names></string-name>, <string-name><surname>Corbett</surname> <given-names>M</given-names></string-name>, <string-name><surname>Eastwood</surname> <given-names>A</given-names></string-name>. <article-title>Quality assessment of comparative diagnostic accuracy studies: our experience using a modified version of the QUADAS-2 tool</article-title>. <source>Res Synth Methods</source>. <year>2013</year>;<volume>4</volume>(<issue>3</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/jrsm.1080">https://doi.org/10.1002/jrsm.1080</ext-link></mixed-citation></ref>
<ref id="ref14"><label>14</label><mixed-citation publication-type="journal"><string-name><surname>Kaul</surname> <given-names>T</given-names></string-name>, <string-name><surname>Damen</surname> <given-names>JA</given-names></string-name>, <string-name><surname>Wynants</surname> <given-names>L</given-names></string-name>, <string-name><surname>Van Calster</surname> <given-names>B</given-names></string-name>, <string-name><surname>van Smeden</surname> <given-names>M</given-names></string-name>, <string-name><surname>Hooft</surname> <given-names>L</given-names></string-name>, <etal>et al</etal>. <article-title>Assessing the quality of prediction models in health care using the Prediction model Risk Of Bias ASsessment Tool (PROBAST): an evaluation of its use and practical application</article-title>. <source>J Clin Epidemiol</source>. <year>2025</year>;<volume>181</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jclinepi.2025.111732">https://doi.org/10.1016/j.jclinepi.2025.111732</ext-link></mixed-citation></ref>
<ref id="ref15"><label>15</label><mixed-citation publication-type="journal"><string-name><surname>Spadarella</surname> <given-names>G</given-names></string-name>, <string-name><surname>Stanzione</surname> <given-names>A</given-names></string-name>, <string-name><surname>Akinci D&#x2019;Antonoli</surname> <given-names>T</given-names></string-name>, <string-name><surname>Andreychenko</surname> <given-names>A</given-names></string-name>, <string-name><surname>Fanni</surname> <given-names>SC</given-names></string-name>, <string-name><surname>Ugga</surname> <given-names>L</given-names></string-name>, <etal>et al</etal>. <article-title>Systematic review of the radiomics quality score applications: an EuSoMII Radiomics Auditing Group Initiative</article-title>. <source>Eur Radiol</source>. <year>2023</year>;<volume>33</volume>(<issue>3</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00330-022-09187-3">https://doi.org/10.1007/s00330-022-09187-3</ext-link></mixed-citation></ref>
<ref id="ref16"><label>16</label><mixed-citation publication-type="journal"><string-name><surname>Erickson</surname> <given-names>BJ</given-names></string-name>, <string-name><surname>Kitamura</surname> <given-names>F</given-names></string-name>. <article-title>Magician&#x2019;s corner: 9. performance metrics for machine learning models</article-title>. <source>Radiol Artif Intell</source>. <year>2021</year>;<volume>3</volume>(<issue>3</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1148/ryai.2021200126">https://doi.org/10.1148/ryai.2021200126</ext-link></mixed-citation></ref>
<ref id="ref17"><label>17</label><mixed-citation publication-type="journal"><string-name><surname>Javed</surname> <given-names>S</given-names></string-name>, <string-name><surname>Qureshi</surname> <given-names>TA</given-names></string-name>, <string-name><surname>Deng</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Wachsman</surname> <given-names>A</given-names></string-name>, <string-name><surname>Raphael</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Gaddam</surname> <given-names>S</given-names></string-name>, <etal>et al</etal>. <article-title>Segmentation of pancreatic subregions in computed tomography images</article-title>. <source>J Imaging</source>. <year>2022</year>;<volume>8</volume>(<issue>7</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/jimaging8070195">https://doi.org/10.3390/jimaging8070195</ext-link></mixed-citation></ref>
<ref id="ref18"><label>18</label><mixed-citation publication-type="journal"><string-name><surname>Man</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Feng</surname> <given-names>J</given-names></string-name>, <string-name><surname>Li</surname> <given-names>X</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>F</given-names></string-name>. <article-title>Deep Q learning driven CT pancreas segmentation with geometry-aware U-Net</article-title>. <source>IEEE Trans Med Imaging</source>. <year>2019</year>;<volume>38</volume>(<issue>8</issue>):<fpage>1971</fpage>&#x2013;<lpage>80</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/TMI.2019.2911588">https://doi.org/10.1109/TMI.2019.2911588</ext-link></mixed-citation></ref>
<ref id="ref19"><label>19</label><mixed-citation publication-type="journal"><string-name><surname>Karpi&#x0144;ska</surname> <given-names>M</given-names></string-name>, <string-name><surname>Czauderna</surname> <given-names>M</given-names></string-name>. <article-title>Pancreas&#x2014;its functions, disorders, and physiological impact on the mammals&#x2019; organism</article-title>. <source>Front Physiol</source>. <year>2022</year>;vol.<volume>13</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fphys.2022.807632">https://doi.org/10.3389/fphys.2022.807632</ext-link></mixed-citation></ref>
<ref id="ref20"><label>20</label><mixed-citation publication-type="journal"><string-name><surname>Chen</surname> <given-names>PT</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>T</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>P</given-names></string-name>, <string-name><surname>Chang</surname> <given-names>D</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>KL</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>MS</given-names></string-name>, <etal>et al</etal>. <article-title>Pancreatic cancer detection on CT scans with deep learning: a nationwide population-based study</article-title>. <source>Radiol</source>. <year>2023</year>;<volume>306</volume>(<issue>1</issue>):<fpage>172</fpage>&#x2013;<lpage>82</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1148/radiol.220152">https://doi.org/10.1148/radiol.220152</ext-link></mixed-citation></ref>
<ref id="ref21"><label>21</label><mixed-citation publication-type="journal"><string-name><surname>Chen</surname> <given-names>W</given-names></string-name>, <string-name><surname>Zhou</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Asadpour</surname> <given-names>V</given-names></string-name>, <string-name><surname>Parker</surname> <given-names>RA</given-names></string-name>, <string-name><surname>Puttock</surname> <given-names>EJ</given-names></string-name>, <string-name><surname>Lustigova</surname> <given-names>E</given-names></string-name>, <etal>et al</etal>. <article-title>Quantitative radiomic features from computed tomography can predict pancreatic cancer up to 36 months before diagnosis</article-title>. <source>Clinical Transl Gastroenterol</source>. <year>2023</year>;<volume>14</volume>(<issue>1</issue>):<elocation-id>e00548</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.14309/ctg.0000000000000548">https://doi.org/10.14309/ctg.0000000000000548</ext-link></mixed-citation></ref>
<ref id="ref22"><label>22</label><mixed-citation publication-type="journal"><string-name><surname>Hu</surname> <given-names>P</given-names></string-name>, <string-name><surname>Li</surname> <given-names>X</given-names></string-name>, <string-name><surname>Tian</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Tang</surname> <given-names>T</given-names></string-name>, <string-name><surname>Zhou</surname> <given-names>T</given-names></string-name>, <string-name><surname>Bai</surname> <given-names>X</given-names></string-name>, <etal>et al</etal>. <article-title>Automatic pancreas segmentation in CT images with distance-based saliency-aware DenseASPP network</article-title>. <source>IEEE J Biomed Health Inform</source>. <year>2021</year>;<volume>25</volume>(<issue>5</issue>):<fpage>1601</fpage>&#x2013;<lpage>11</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/JBHI.2020.3023462">https://doi.org/10.1109/JBHI.2020.3023462</ext-link></mixed-citation></ref>
<ref id="ref23"><label>23</label><mixed-citation publication-type="journal"><string-name><surname>Cao</surname> <given-names>K</given-names></string-name>, <string-name><surname>Xia</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Yao</surname> <given-names>J</given-names></string-name>, <string-name><surname>Han</surname> <given-names>X</given-names></string-name>, <string-name><surname>Lambert</surname> <given-names>L</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>T</given-names></string-name>, <etal>et al</etal>. <article-title>Large-scale pancreatic cancer detection via non-contrast CT and deep learning</article-title>. <source>Nat Med</source>. <year>2023a</year>;<volume>29</volume>(<issue>12</issue>):<fpage>3033</fpage>&#x2013;<lpage>43</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41591-023-02640-w">https://doi.org/10.1038/s41591-023-02640-w</ext-link></mixed-citation></ref>
<ref id="ref24"><label>24</label><mixed-citation publication-type="journal"><string-name><surname>Li</surname> <given-names>W</given-names></string-name>, <string-name><surname>Qin</surname> <given-names>S</given-names></string-name>, <string-name><surname>Li</surname> <given-names>F</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>L</given-names></string-name>. <article-title>MAD-UNet: a deep U-shaped network combined with an attention mechanism for pancreas segmentation in CT images</article-title>. <source>Med Phys</source>. <year>2021</year>;<volume>48</volume>(<issue>1</issue>):<fpage>329</fpage>&#x2013;<lpage>41</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/mp.14617">https://doi.org/10.1002/mp.14617</ext-link></mixed-citation></ref>
<ref id="ref25"><label>25</label><mixed-citation publication-type="journal"><string-name><surname>Javed</surname> <given-names>S</given-names></string-name>, <string-name><surname>Qureshi</surname> <given-names>TA</given-names></string-name>, <string-name><surname>Gaddam</surname> <given-names>S</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>L</given-names></string-name>, <string-name><surname>Azab</surname> <given-names>L</given-names></string-name>, <string-name><surname>Wachsman</surname> <given-names>AM</given-names></string-name>, <etal>et al</etal>. <article-title>Risk prediction of pancreatic cancer using AI analysis of pancreatic subregions in computed tomography images</article-title>. <source>Front Oncol</source>. <year>2022</year>;<volume>12</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fonc.2022.1007990">https://doi.org/10.3389/fonc.2022.1007990</ext-link></mixed-citation></ref>
<ref id="ref26"><label>26</label><mixed-citation publication-type="journal"><string-name><surname>Ramaekers</surname> <given-names>M</given-names></string-name>, <string-name><surname>Viviers</surname> <given-names>CGA</given-names></string-name>, <string-name><surname>Hellstr&#x00F6;m</surname> <given-names>TAE</given-names></string-name>, <string-name><surname>Ewals</surname> <given-names>LJS</given-names></string-name>, <string-name><surname>Tasios</surname> <given-names>N</given-names></string-name>, <string-name><surname>Jacobs</surname> <given-names>I</given-names></string-name>, <etal>et al</etal>. <article-title>Improved pancreatic cancer detection and localization on CT scans: a computer-aided detection model utilizing secondary features</article-title>. <source>Cancers</source>. <year>2024</year>;<volume>16</volume>(<issue>13</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers16132403">https://doi.org/10.3390/cancers16132403</ext-link></mixed-citation></ref>
<ref id="ref27"><label>27</label><mixed-citation publication-type="journal"><string-name><surname>Mukherjee</surname> <given-names>S</given-names></string-name>, <string-name><surname>Korfiatis</surname> <given-names>P</given-names></string-name>, <string-name><surname>Khasawneh</surname> <given-names>H</given-names></string-name>, <string-name><surname>Rajamohan</surname> <given-names>N</given-names></string-name>, <string-name><surname>Patra</surname> <given-names>A</given-names></string-name>, <string-name><surname>Suman</surname> <given-names>G</given-names></string-name>, <etal>et al</etal>. <article-title>Bounding box-based 3D AI model for user-guided volumetric segmentation of pancreatic ductal adenocarcinoma on standard-of-care CTs</article-title>. <year>2023</year>;<volume>23</volume>(<issue>5</issue>):<fpage>522</fpage>&#x2013;<lpage>9</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi:10.1016/j.pan.2023.05.008">https://doi:10.1016/j.pan.2023.05.008</ext-link></mixed-citation></ref>
<ref id="ref28"><label>28</label><mixed-citation publication-type="journal"><string-name><surname>Qiu</surname> <given-names>D</given-names></string-name>, <string-name><surname>Ju</surname> <given-names>J</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>S</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>T</given-names></string-name>, <string-name><surname>Tu</surname> <given-names>H</given-names></string-name>, <string-name><surname>Tan</surname> <given-names>X</given-names></string-name>, <etal>et al</etal>. <article-title>A deep learning-based cascade algorithm for pancreatic tumor segmentation</article-title>. <source>Front Oncol</source>. <year>2024</year>;<volume>14</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fonc.2024.1328146">https://doi.org/10.3389/fonc.2024.1328146</ext-link></mixed-citation></ref>
<ref id="ref29"><label>29</label><mixed-citation publication-type="journal"><string-name><surname>Amiri</surname> <given-names>S</given-names></string-name>, <string-name><surname>Vrtovec</surname> <given-names>T</given-names></string-name>, <string-name><surname>Mustafaev</surname> <given-names>T</given-names></string-name>, <string-name><surname>Deufel</surname> <given-names>CL</given-names></string-name>, <string-name><surname>Thomsen</surname> <given-names>HS</given-names></string-name>, <string-name><surname>Sillesen</surname> <given-names>MH</given-names></string-name>, <etal>et al</etal>. <article-title>Reinforcement learning-based anatomical maps for pancreas subregion and duct segmentation</article-title>. <source>Med Phys</source>. <year>2024</year>;<volume>51</volume>(<issue>10</issue>):<fpage>7378</fpage>&#x2013;<lpage>92</lpage>.</mixed-citation></ref>
<ref id="ref30"><label>30</label><mixed-citation publication-type="journal"><string-name><surname>Mahmoudi</surname> <given-names>T</given-names></string-name>, <string-name><surname>Kouzahkanan</surname> <given-names>ZM</given-names></string-name>, <string-name><surname>Radmard</surname> <given-names>AR</given-names></string-name>, <string-name><surname>Kafieh</surname> <given-names>R</given-names></string-name>, <string-name><surname>Salehnia</surname> <given-names>A</given-names></string-name>, <string-name><surname>Davarpanah</surname> <given-names>AH</given-names></string-name>, <etal>et al</etal>. <article-title>Segmentation of pancreatic ductal adenocarcinoma (PDAC) and surrounding vessels in CT images using deep convolutional neural networks and texture descriptors</article-title>. <source>Sci Rep</source>. <year>2022</year>;<volume>12</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41598-022-07111-9">https://doi.org/10.1038/s41598-022-07111-9</ext-link></mixed-citation></ref>
<ref id="ref31"><label>31</label><mixed-citation publication-type="journal"><string-name><surname>Li</surname> <given-names>M</given-names></string-name>, <string-name><surname>Lian</surname> <given-names>F</given-names></string-name>, <string-name><surname>Li</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Guo</surname> <given-names>S</given-names></string-name>. <article-title>Attention-guided duplex adversarial U-net for pancreatic segmentation from computed tomography images</article-title>. <source>J Appl Clin Med Phys</source>. <year>2022</year>;<volume>23</volume>(<issue>4</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/acm2.13537">https://doi.org/10.1002/acm2.13537</ext-link></mixed-citation></ref>
<ref id="ref32"><label>32</label><mixed-citation publication-type="journal"><string-name><surname>Li</surname> <given-names>M</given-names></string-name>, <string-name><surname>Lian</surname> <given-names>F</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>C</given-names></string-name>, <string-name><surname>Guo</surname> <given-names>S</given-names></string-name>. <article-title>Accurate pancreas segmentation using multi-level pyramidal pooling residual U-Net with adversarial mechanism</article-title>. <source>BMC Med Imaging</source>. <year>2021</year>;<volume>21</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12880-021-00694-1">https://doi.org/10.1186/s12880-021-00694-1</ext-link></mixed-citation></ref>
<ref id="ref33"><label>33</label><mixed-citation publication-type="journal"><string-name><surname>Wang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>J</given-names></string-name>, <string-name><surname>Cui</surname> <given-names>H</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Xia</surname> <given-names>Y</given-names></string-name>. <article-title>View adaptive learning for pancreas segmentation</article-title>. <source>Biomed Signal Process Control</source>. <year>2021</year>;<volume>66</volume>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.bspc.2020.102347">https://doi.org/10.1016/j.bspc.2020.102347</ext-link></mixed-citation></ref>
<ref id="ref34"><label>34</label><mixed-citation publication-type="journal"><string-name><surname>Tian</surname> <given-names>M</given-names></string-name>, <string-name><surname>He</surname> <given-names>J</given-names></string-name>, <string-name><surname>Yu</surname> <given-names>X</given-names></string-name>, <string-name><surname>Cai</surname> <given-names>C</given-names></string-name>, <string-name><surname>Gao</surname> <given-names>Y</given-names></string-name>. <article-title>MCMC-guided CNN training and segmentation for pancreas extraction</article-title>. <source>IEEE Access</source>. <year>2021</year>;<volume>9</volume>:<fpage>90539</fpage>&#x2013;<lpage>54</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/ACCESS.2021.3070391">https://doi.org/10.1109/ACCESS.2021.3070391</ext-link></mixed-citation></ref>
<ref id="ref35"><label>35</label><mixed-citation publication-type="journal"><string-name><surname>Xue</surname> <given-names>J</given-names></string-name>, <string-name><surname>He</surname> <given-names>K</given-names></string-name>, <string-name><surname>Nie</surname> <given-names>D</given-names></string-name>, <string-name><surname>Adeli</surname> <given-names>E</given-names></string-name>, <string-name><surname>Shi</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>SW</given-names></string-name>, <etal>et al</etal>. <article-title>Cascaded multitask 3-D fully convolutional networks for pancreas segmentation</article-title>. <source>IEEE Trans Cybern</source>. <year>2021</year>;<volume>51</volume>(<issue>4</issue>):<fpage>2153</fpage>&#x2013;<lpage>65</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/TCYB.2019.2955178">https://doi.org/10.1109/TCYB.2019.2955178</ext-link></mixed-citation></ref>
<ref id="ref36"><label>36</label><mixed-citation publication-type="journal"><string-name><surname>Chen</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Yan</surname> <given-names>K</given-names></string-name>, <string-name><surname>Zheng</surname> <given-names>J</given-names></string-name>. <article-title>Deep multi-scale feature fusion for pancreas segmentation from CT images</article-title>. <source>Int J Comput Assist Radiol Surg</source>. <year>2020</year>;<volume>15</volume>(<issue>3</issue>):<fpage>415</fpage>&#x2013;<lpage>23</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11548-020-02117-y">https://doi.org/10.1007/s11548-020-02117-y</ext-link></mixed-citation></ref>
<ref id="ref37"><label>37</label><mixed-citation publication-type="journal"><string-name><surname>Boers</surname> <given-names>TGW</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Gibson</surname> <given-names>E</given-names></string-name>, <string-name><surname>Barratt</surname> <given-names>DC</given-names></string-name>, <string-name><surname>Bonmati</surname> <given-names>E</given-names></string-name>, <string-name><surname>Krdzalic</surname> <given-names>J</given-names></string-name>, <etal>et al</etal>. <article-title>Interactive 3D U-net for the segmentation of the pancreas in computed tomography scans</article-title>. <source>Phys Med Biol</source>. <year>2020</year>;<volume>65</volume>(<issue>6</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1088/1361-6560/ab6f99">https://doi.org/10.1088/1361-6560/ab6f99</ext-link></mixed-citation></ref>
<ref id="ref38"><label>38</label><mixed-citation publication-type="journal"><string-name><surname>Liu</surname> <given-names>S</given-names></string-name>, <string-name><surname>Yuan</surname> <given-names>X</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Liang</surname> <given-names>S</given-names></string-name>, <string-name><surname>Feng</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ai</surname> <given-names>Y</given-names></string-name>, <etal>et al</etal>. <article-title>Automatic pancreas segmentation via coarse location and ensemble learning</article-title>. <source>IEEE Access</source> <year>2020</year>;<volume>8</volume>:<fpage>2906</fpage>&#x2013;<lpage>14</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/ACCESS.2019.2961125">https://doi.org/10.1109/ACCESS.2019.2961125</ext-link></mixed-citation></ref>
<ref id="ref39"><label>39</label><mixed-citation publication-type="journal"><string-name><surname>Yang</surname> <given-names>J</given-names></string-name>, <string-name><surname>Qiu</surname> <given-names>P</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Marcus</surname> <given-names>DS</given-names></string-name>, <string-name><surname>Sotiras</surname> <given-names>A</given-names></string-name>. <article-title>D-net: dynamic large kernel with dynamic feature fusion for volumetric medical image segmentation</article-title>. <source>Biomed Signal Process Control</source>. <year>2026</year>;<volume>113</volume>. <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2403.10674">http://arxiv.org/abs/2403.10674</ext-link></mixed-citation></ref>
<ref id="ref40"><label>40</label><mixed-citation publication-type="journal"><string-name><surname>Thanya</surname> <given-names>T</given-names></string-name>, <string-name><surname>Jeslin</surname> <given-names>T</given-names></string-name>. <article-title>DeepOptimalNet: optimized deep learning model for early diagnosis of pancreatic tumor classification in CT imaging</article-title>. <source>Abdom Radiol</source>. <year>2025</year>;<volume>50</volume>(<issue>9</issue>):<fpage>4181</fpage>&#x2013;<lpage>211</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00261-025-04860-9">https://doi.org/10.1007/s00261-025-04860-9</ext-link></mixed-citation></ref>
<ref id="ref41"><label>41</label><mixed-citation publication-type="journal"><string-name><surname>Mahendran</surname> <given-names>RK</given-names></string-name>, <string-name><surname>Aniruddhan</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>P</given-names></string-name>. <article-title>PancreasNet: a transformer-based progressive residual network for comprehensive pancreatic cancer detection using CT images</article-title>. <source>10th International Conference on Wireless Communications Signal Processing and Networking WiSPNET</source> <year>2025</year>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/WiSPNET64060.2025.11004859">https://doi.org/10.1109/WiSPNET64060.2025.11004859</ext-link></mixed-citation></ref>
<ref id="ref42"><label>42</label><mixed-citation publication-type="journal"><string-name><surname>Babaei</surname> <given-names>R</given-names></string-name>, <string-name><surname>Cheng</surname> <given-names>S</given-names></string-name>, <string-name><surname>Thai</surname> <given-names>T</given-names></string-name>, <string-name><surname>Zhao</surname> <given-names>S</given-names></string-name>. <article-title>Pancreatic tumor segmentation as anomaly detection in ct images using denoising diffusion models</article-title>. <year>2024</year>. <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2406.02653">http://arxiv.org/abs/2406.02653</ext-link></mixed-citation></ref>
<ref id="ref43"><label>43</label><mixed-citation publication-type="journal"><string-name><surname>Mitrea</surname> <given-names>D</given-names></string-name>, <string-name><surname>Brehar</surname> <given-names>R</given-names></string-name>, <string-name><surname>Itu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Nedevschi</surname> <given-names>S</given-names></string-name>, <string-name><surname>Socaciu</surname> <given-names>M</given-names></string-name>, <string-name><surname>Badea</surname> <given-names>R</given-names></string-name>. <article-title>Pancreatic tumor recognition from CT images through advanced deep learning techniques</article-title>. <source>Proceedings of the 24th IEEE International Conference on Automation Quality and Testing Robotics AQTR</source> <year>2024</year>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/AQTR61889.2024.10554139">https://doi.org/10.1109/AQTR61889.2024.10554139</ext-link></mixed-citation></ref>
<ref id="ref44"><label>44</label><mixed-citation publication-type="journal"><string-name><surname>Cao</surname></string-name> <etal>et al</etal>. (<year>2023b</year>)</mixed-citation></ref>
<ref id="ref45"><label>45</label><mixed-citation publication-type="journal"><string-name><surname>Shi</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Tang</surname> <given-names>H</given-names></string-name>, <string-name><surname>Baine</surname> <given-names>MJ</given-names></string-name>, <string-name><surname>Hollingsworth</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Du</surname> <given-names>H</given-names></string-name>, <string-name><surname>Zheng</surname> <given-names>D</given-names></string-name>, <etal>et al</etal>. <article-title>3DGAUnet: 3D generative adversarial networks with a 3D U-net based generator to achieve the accurate and effective synthesis of clinical tumor image data for pancreatic cancer</article-title>. <source>Cancers</source>. <year>2023</year>;<volume>15</volume>(<issue>23</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers15235496">https://doi.org/10.3390/cancers15235496</ext-link></mixed-citation></ref>
<ref id="ref46"><label>46</label><mixed-citation publication-type="journal"><string-name><surname>Zhang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Li</surname> <given-names>S</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Lu</surname> <given-names>Y</given-names></string-name>. <article-title>A novel and efficient tumor detection framework for pancreatic cancer via CT images</article-title>. <source>Proceedings of the Annual International Conference of the IEEE Engineering in Medicine and Biology Society, EMBS</source>. <year>2020</year>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/EMBC44109.2020.9176172">https://doi.org/10.1109/EMBC44109.2020.9176172</ext-link></mixed-citation></ref>
<ref id="ref47"><label>47</label><mixed-citation publication-type="journal"><string-name><surname>Choi</surname> <given-names>MH</given-names></string-name>, <string-name><surname>Yoon</surname> <given-names>SB</given-names></string-name>, <string-name><surname>Song</surname> <given-names>M</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>IS</given-names></string-name>, <string-name><surname>Hong</surname> <given-names>TH</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>MA</given-names></string-name>, <etal>et al</etal>. <article-title>Benefits of the multiplanar and volumetric analyses of pancreatic cancer using computed tomography</article-title>. <source>PLoS ONE</source>. <year>2020</year>;<volume>15</volume>(<issue>10</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1371/journal.pone.0240318">https://doi.org/10.1371/journal.pone.0240318</ext-link></mixed-citation></ref>
<ref id="ref48"><label>48</label><mixed-citation publication-type="journal"><string-name><surname>Chu</surname> <given-names>LC</given-names></string-name>, <string-name><surname>Park</surname> <given-names>S</given-names></string-name>, <string-name><surname>Kawamoto</surname> <given-names>S</given-names></string-name>, <string-name><surname>Fouladi</surname> <given-names>DF</given-names></string-name>, <string-name><surname>Shayesteh</surname> <given-names>S</given-names></string-name>, <string-name><surname>Zinreich</surname> <given-names>ES</given-names></string-name>, <etal>et al</etal>. <article-title>Utility of CT radiomics features in differentiation of pancreatic ductal adenocarcinoma from normal pancreatic tissue</article-title>. <source>Am J Roentgenol</source>. <year>2019</year>;<volume>213</volume>(<issue>2</issue>):<fpage>349</fpage>&#x2013;<lpage>57</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2214/AJR.18.20901">https://doi.org/10.2214/AJR.18.20901</ext-link></mixed-citation></ref>
<ref id="ref49"><label>49</label><mixed-citation publication-type="journal"><string-name><surname>Antonelli</surname> <given-names>M</given-names></string-name>, <string-name><surname>Reinke</surname> <given-names>A</given-names></string-name>, <string-name><surname>Bakas</surname> <given-names>S</given-names></string-name>, <string-name><surname>Farahani</surname> <given-names>K</given-names></string-name>, <string-name><surname>Kopp-Schneider</surname> <given-names>A</given-names></string-name>, <string-name><surname>Landman</surname> <given-names>BA</given-names></string-name>, <etal>et al</etal>. <article-title>The medical segmentation decathlon</article-title>. <source>Nat Commun</source>. <year>2022</year>;<volume>13</volume>(<issue>1</issue>). <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41467-022-30695-9">https://doi.org/10.1038/s41467-022-30695-9</ext-link></mixed-citation></ref>
<ref id="ref50"><label>50</label><mixed-citation publication-type="journal"><string-name><surname>Zhang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Keles</surname> <given-names>E</given-names></string-name>, <string-name><surname>Durak</surname> <given-names>G</given-names></string-name>, <string-name><surname>Taktak</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Susladkar</surname> <given-names>O</given-names></string-name>, <string-name><surname>Gorade</surname> <given-names>V</given-names></string-name>, <etal>et al</etal>. <article-title>Large-scale multi-center CT and MRI segmentation of pancreas with deep learning</article-title>. <source>Med Image Anal</source>. <year>2025</year>;<volume>99</volume>:<fpage>103382</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.media.2024.103382">https://doi.org/10.1016/j.media.2024.103382</ext-link></mixed-citation></ref>
</ref-list>
</back>
</article>