<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "http://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd">
<article article-type="review-article" dtd-version="1.3" xml:lang="en" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">PJS</journal-id>
<journal-id journal-id-type="publisher-id">Premier Journal of Science</journal-id>
<journal-id journal-id-type="pmc">PJS</journal-id>
<journal-title-group>
<journal-title>PJ Science</journal-title>
</journal-title-group>
<issn pub-type="epub">3049-9011</issn>
<publisher>
<publisher-name>Premier Science</publisher-name>
<publisher-loc>London, UK</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.70389/PJS.100269</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>REVIEW</subject>
</subj-group>
<subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cognitive science</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Sensory perception</subject><subj-group><subject>Hallucinations</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Linguistics</subject><subj-group><subject>Grammar</subject><subj-group><subject>Phonology</subject><subj-group><subject>Syllables</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Engineering and technology</subject><subj-group><subject>Signal processing</subject><subj-group><subject>Speech signal processing</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cognitive science</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Social sciences</subject><subj-group><subject>Psychology</subject><subj-group><subject>Cognitive psychology</subject><subj-group><subject>Perception</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Sensory perception</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Mental health and psychiatry</subject><subj-group><subject>Schizophrenia</subject></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Bioassays and physiological analysis</subject><subj-group><subject>Electrophysiological techniques</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Physiology</subject><subj-group><subject>Electrophysiology</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Brain mapping</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Clinical medicine</subject><subj-group><subject>Clinical neurophysiology</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Imaging techniques</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject><subj-group><subject>Event-related potentials</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Cell biology</subject><subj-group><subject>Cellular types</subject><subj-group><subject>Animal cells</subject><subj-group><subject>Neurons</subject><subj-group><subject>Interneurons</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Cellular neuroscience</subject><subj-group><subject>Neurons</subject><subj-group><subject>Interneurons</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Bioassays and physiological analysis</subject><subj-group><subject>Electrophysiological techniques</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Physiology</subject><subj-group><subject>Electrophysiology</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neurophysiology</subject><subj-group><subject>Brain electrophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Brain mapping</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Medicine and health sciences</subject><subj-group><subject>Clinical medicine</subject><subj-group><subject>Clinical neurophysiology</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Research and analysis methods</subject><subj-group><subject>Imaging techniques</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group><subj-group subj-group-type="Discipline-v3">
<subject>Biology and life sciences</subject><subj-group><subject>Neuroscience</subject><subj-group><subject>Neuroimaging</subject><subj-group><subject>Electroencephalography</subject></subj-group></subj-group></subj-group></subj-group>
</article-categories>
<title-group>
<article-title>Artificial Intelligence Across the Surgical Oncology Continuum: Decision Support, Operative Intelligence, and a Translation-First Roadmap</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0001-5835-5179</contrib-id>
<name>
<surname>Abikenari</surname>
<given-names>Matthew</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff15"><sup>*</sup></xref>
<role content-type="http://credit.niso.org/contributor-roles/conceptualization">Conceptualization</role>
<role content-type="http://credit.niso.org/contributor-roles/writing-original-draft">Writing &#x2013; original draft</role>
<role content-type="http://credit.niso.org/contributor-roles/review-editing">Review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Enayati</surname>
<given-names>Iman</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mohseni</surname>
<given-names>Kimia</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sanker</surname>
<given-names>Vivek</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ablyazova</surname>
<given-names>Faina</given-names>
</name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Vargas</surname>
<given-names>Luis</given-names>
</name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Himic</surname>
<given-names>Vratko</given-names>
</name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Yijiang</given-names>
</name>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Baibussinov</surname>
<given-names>Alisher</given-names>
</name>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Polkampally</surname>
<given-names>Sri</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Liu</surname>
<given-names>Shirley</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Poe</surname>
<given-names>James</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Claire</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Davani</surname>
<given-names>Danial</given-names>
</name>
<xref ref-type="aff" rid="aff10"><sup>10</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jain</surname>
<given-names>Aryan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Freichel</surname>
<given-names>Rene</given-names>
</name>
<xref ref-type="aff" rid="aff11"><sup>11</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Abikenari</surname>
<given-names>Derek</given-names>
</name>
<xref ref-type="aff" rid="aff12"><sup>12</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kerwan</surname>
<given-names>Ahmed</given-names>
</name>
<xref ref-type="aff" rid="aff13"><sup>13</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Agha</surname>
<given-names>Riaz</given-names>
</name>
<xref ref-type="aff" rid="aff14"><sup>14</sup></xref>
</contrib>
<aff id="aff1"><sup>1</sup><institution-wrap><institution-id institution-id-type="ror">https://ror.org/03mtd9a03</institution-id><institution>Department of Neurosurgery, Stanford University School of Medicine</institution></institution-wrap>, <city>Stanford</city>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff2"><sup>2</sup><institution>Nuffield Department of Clinical Neurosciences, University of Oxford</institution>, <city>Oxford</city>, <country>UK</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Orthopaedic Surgery, University of California</institution>, <city>Los Angeles</city>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Medicine, University of California</institution>, <city>Los Angeles</city>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Neurosurgery, Northwell, Zucker School of Medicine at Hofstra/Northwell</institution>, <city>New Hyde Park</city>, <state>NY</state>, <country>USA</country></aff>
<aff id="aff6"><sup>6</sup><institution>Department of Neurosurgery, University of Buffalo Jacobs School of Medicine and Biomedical Sciences</institution>, <city>Buffalo</city>, <state>NY</state>, <country>USA</country></aff>
<aff id="aff7"><sup>7</sup><institution>Department of Neurological Surgery, University of Miami Miller School of Medicine</institution>, <state>FL</state>, <country>USA</country></aff>
<aff id="aff8"><sup>8</sup><institution>Department of Radiation Oncology, Stanford University School of Medicine</institution>, <city>Stanford</city>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff9"><sup>9</sup><institution>Department of Veteran Affairs, University of California</institution>, Los Angeles, <state>CA</state>, <country>USA</country></aff>
<aff id="aff10"><sup>10</sup><institution>Viterbi School of Engineering, University of Southern California</institution>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff11"><sup>11</sup><institution>Department of Psychology, University of Amsterdam, Amsterdam</institution>, <city>The Netherlands</city></aff>
<aff id="aff12"><sup>12</sup><institution>Department of Philosophy, California State Polytechnic University</institution>, <city>Pomona</city>, <state>CA</state>, <country>USA</country></aff>
<aff id="aff13"><sup>13</sup><institution>Department of Public Health, Harvard TH Chan School of Public Health</institution>, <city>Harvard University</city>, <state>MA</state>, <country>USA</country></aff>
<aff id="aff14"><sup>14</sup><institution>Premier Science</institution>, <city>London</city>, <country>UK</country></aff>
<aff id="aff15"><sup>*</sup><institution>Indicates Co-first authorship</institution></aff>
</contrib-group>
<author-notes>
<corresp id="cor001"><bold>Correspondence to:</bold> Matthew Abikenari, MS <email>mattabi@stanford.edu</email></corresp>
<fn fn-type="other"><p>Peer Review</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>09</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date pub-type="collection">
<month>03</month>
<year>2026</year>
</pub-date>
<volume>20</volume>
<issue>1</issue>
<elocation-id>100269</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>01</day>
<month>03</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>03</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-year>2026</copyright-year>
<copyright-holder>Matthew Abikenari, Iman Enayati, Kimia Mohseni, Vivek Sanker, Faina Ablyazova, Luis Vargas, Vratko Himic, Yijiang Chen, Alisher Baibussinov, Sri Polkampally, Shirley Liu, James Poe, Claire Kim, Danial Davani, Aryan Jain, Rene Freichel, Derek Abikenari, Ahmed Kerwan and Riaz Agha</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">
<license-p>This is an open access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/" xlink:type="simple">Creative Commons Attribution License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="info:doi.org/10.70389/PJS.100269"/>
<abstract>
<p>Artificial intelligence (AI) and machine learning (ML) methods are increasingly being developed for surgical oncology, a domain in which perioperative decision-making must account for uncertainty and substantial variability in patient anatomy, tumor extent, imaging protocols, devices, and operative workflows. Although proof-of-concept performance has been reported across multiple tasks, there remain limited data regarding the extent to which these systems generalize across institutions and whether they confer measurable clinical utility under routine conditions. In parallel with broader advances in medical AI and surgical robotics, there has been a growing effort to operationalize AI/ML across the perioperative continuum. The aim of this narrative review is to define the role of AI/ML in surgical oncology and to delineate the evidentiary and methodological requirements for translation into reliable clinical systems. For clarity, we structure the field into three domains that align with the perioperative workflow: (1) clinical translation and decision support, (2) preoperative planning, and (3) intraoperative navigation with robotic assistance/control. Clinical translation centers on decision support, including perioperative risk stratification, outcome prediction, and workflow-integrated recommendations. Preoperative planning is dominated by imaging-based methods for lesion detection and classification, anatomic segmentation, and image registration. Intraoperative navigation and robotic assistance/control emphasize perception and guidance, such as localization, tissue and instrument tracking, geometric reconstruction, and augmented reality visualization, alongside emerging autonomy-enabling approaches, such as learning from demonstration and reinforcement learning, implemented within supervised human&#x2013;robot interaction paradigms. Across these domains, we evaluate how data provenance, endpoint selection, external and prospective validation, robustness to dataset shift, uncertainty calibration, interpretability, and safety assurance constrain performance and determine translational readiness.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Artificial intelligence</kwd>
<kwd>Machine learning</kwd>
<kwd>Surgical oncology</kwd>
<kwd>Perioperative decision-making</kwd>
<kwd>Preoperative planning</kwd>
<kwd>Intraoperative navigation</kwd>
<kwd>Robotic surgery</kwd>
<kwd>Clinical translation</kwd>
</kwd-group>
<counts>
<fig-count count="1"/>
<table-count count="4"/>
<page-count count="21"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>Version accepted</meta-name>
<meta-value>3</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec>
<title><ext-link ext-link-type="uri" xlink:href="https://premierscience.com/wp-content/uploads/2026/18/pjs-26-1588.pdf">Source-File: pjs-26-1588.pdf</ext-link></title>
</sec>
<sec id="sec001" sec-type="intro">
<title>Introduction</title>
<p>Surgical oncology integrates technically demanding resection and reconstruction with perioperative decision-making constrained by biologic uncertainty and substantial heterogeneity in patient anatomy, tumor extent, prior treatment response, and operative environment. Even when intent is curative, the operative plan is frequently refined intraoperatively based on unanticipated patterns of invasion, occult metastatic disease, tissue planes distorted by neoadjuvant therapy, and evolving physiologic tolerance for blood loss or prolonged anesthesia. In this setting, the margin for error is narrow: oncologic adequacy must be balanced against preservation of function and minimization of morbidity, and these trade-offs must be negotiated using information that is incomplete, distributed across modalities, and variably reliable.</p>
<p>Artificial intelligence (AI) refers broadly to computational methods that perform tasks typically requiring human intelligence; machine learning (ML) denotes a subset of AI in which models learn statistical structure from data to generate predictions or recommendations without explicit rule-based programming. Contemporary AI/ML in perioperative medicine is dominated by data-driven approaches, particularly deep learning, applied to high-dimensional signals such as cross-sectional imaging, digital pathology, endoscopic video, and structured and unstructured clinical data. In surgical oncology, preoperative applications have been shaped by imaging-centered tasks, including lesion detection and characterization, risk stratification, treatment response assessment, and selection of candidates for neoadjuvant or adjuvant strategies. Radiomics and related quantitative imaging methods exemplify this trajectory by converting medical images into features that can be integrated with clinical and molecular variables for prognostic and predictive modeling.<sup><xref ref-type="bibr" rid="ref1">1</xref></sup></p>
<p>Preoperative planning has also increasingly incorporated anatomic reconstruction and simulation, motivated by the need to anticipate anatomic variants, determine feasible resection planes, and select minimally invasive trajectories that preserve critical structures. Three-dimensional (3D) reconstruction workflows, often combined with AI-assisted segmentation and registration, have been particularly prominent in thoracic surgery, where complex bronchovascular branching and limited operative exposure create dependence on preoperative imaging for safe dissection.<sup><xref ref-type="bibr" rid="ref2">2</xref></sup> Across surgical oncology more broadly, these imaging-based approaches have progressed from offline analysis toward workflow-integrated tools that aim to provide interpretable anatomic abstractions and patient-specific risk estimates at the point of care.</p>
<p>The intraoperative transition from open surgery to endoscopic and robot-assisted platforms has further expanded the volume of machine-readable data while simultaneously reducing direct tactile feedback. Within this environment, computer vision-enabled perception (eg, tissue and instrument localization), workflow modeling (eg, phase recognition), and context-aware guidance have been proposed as mechanisms to reduce operator dependence and improve the consistency of oncologic technique. A systematic review of AI integration in robotic cancer surgery underscores both the breadth of proof-of-concept tasks, ranging from instrument tracking and skill assessment to intraoperative decision support, and the methodological heterogeneity that still characterizes the literature, including variable dataset provenance, inconsistent endpoint definitions, and limited external validation.<sup><xref ref-type="bibr" rid="ref3">3</xref></sup> Parallel advances in multimodal AI seek to fuse complementary signals (eg, imaging, text, and video) to support perioperative decision-making, documentation, and surveillance; these approaches are conceptually attractive in oncologic surgery because key decisions often hinge on concordance across modalities rather than any single data stream.<sup><xref ref-type="bibr" rid="ref4">4</xref></sup></p>
<p>Despite technical progress, translation of AI/ML systems into reliable clinical support requires evidence that extends beyond retrospective performance within a single institution. Models intended to influence surgical oncology decisions must demonstrate calibration and robustness to dataset shift (systematic differences between training and deployment distributions), integrate uncertainty in ways that are actionable for clinicians, and evaluated against clinically meaningful endpoints under realistic workflow constraints. Importantly, many published systems are derived from retrospective, single-institution datasets and are evaluated using internal validation strategies, limiting inference about transportability and clinical impact.<sup><xref ref-type="bibr" rid="ref3">3</xref></sup> Clinical implementation has been reported but remains comparatively uncommon to the volume of proof-of-concept studies. A large-scale implementation study in colorectal cancer surgery deployed an AI-based prediction model as part of perioperative decision support and evaluated outcomes using routine clinical data capture, illustrating both the potential clinical impact of workflow-integrated prediction and the interpretive constraints of non-randomized implementation designs.<sup><xref ref-type="bibr" rid="ref5">5</xref></sup> Complementary evidence from international external validation of an ML-based risk prediction model for 90-day mortality after gastrectomy for cancer highlights the methodological necessity of testing transportability across institutions and case-mix distributions before widespread adoption.<sup><xref ref-type="bibr" rid="ref6">6</xref></sup></p>
<p>Clinical deployment also raises domain-specific ethical and epistemic considerations. In surgical oncology, AI-informed recommendations may affect treatment selection, extent of resection, and tolerance for operative risk, thereby shaping patient outcomes- and values-sensitive trade-offs. These realities sharpen questions of equipoise, liability, transparency, and informed consent, particularly when models are embedded in proprietary devices or closed robotic ecosystems and when failure modes are difficult to characterize a priori.<sup><xref ref-type="bibr" rid="ref7">7</xref></sup> Consequently, translation must couple algorithmic performance with safety assurance practices that explicitly define intended use, identify hazards, and evaluate human&#x2013;system interaction under conditions that approximate routine clinical care.</p>
<p>Intraoperative navigation and robotic assistance/control constitute a distinct translational domain in which performance is constrained by dynamic anatomy, occlusion, deformation, and real-time computational requirements. Representative work includes real-time vascular anatomical image navigation for laparoscopic surgery, in which deep learning is used to recognize key vascular structures in endoscopic video as a substrate for context-aware guidance.<sup><xref ref-type="bibr" rid="ref8">8</xref></sup> Optical stereotactic navigation has also been explored in rectosigmoid cancer surgery using deep learning, supported by 3D modeling to improve localization and support more standardized dissection strategies.<sup><xref ref-type="bibr" rid="ref9">9</xref></sup> In robot-assisted settings, safety-oriented perception has been approached through AI-based hazard detection methods that attempt to identify high-risk configurations of instruments and tissue interaction, reflecting a shift toward proactive risk mitigation rather than post hoc error attribution.<sup><xref ref-type="bibr" rid="ref10">10</xref></sup> For oncologic tasks in which decision thresholds are tightly coupled to margins, uncertainty-aware modeling is increasingly recognized as critical; uncertainty estimation has been evaluated, for example, in intraoperative margin detection using mass spectrometry to better characterize confidence in tissue classification.<sup><xref ref-type="bibr" rid="ref11">11</xref></sup> In parallel, prospective evaluations of non-contact, AI-assisted intraoperative 3D navigation technologies in lung cancer surgery illustrate continued interest in reducing friction in intraoperative data acquisition while maintaining surgeon autonomy.<sup><xref ref-type="bibr" rid="ref12">12</xref></sup></p>
<p>Collectively, the contemporary literature suggests that AI/ML methods can enable consistent extraction of clinically relevant signals from heterogeneous perioperative data, but also that translational readiness is principally determined by methodological rigor and deployment context rather than by headline accuracy. Surgical oncology provides a stringent test bed for clinical AI: models must generalize across variable anatomy and imaging protocols, support decisions that are safety-critical and time-constrained, and remain reliable when confronted with unexpected intraoperative conditions. In this narrative review, we define the role of AI/ML in surgical oncology and delineate the evidentiary and methodological requirements for translation into reliable clinical systems. For clarity, we structure the field into three domains aligned with the perioperative workflow: (1) clinical translation and decision support, (2) preoperative planning, and (3) intraoperative navigation with robotic assistance/control. Across these domains, we evaluate how data provenance, endpoint selection, external and prospective validation, robustness to dataset shift, uncertainty calibration, interpretability, and safety assurance constrain performance and determine translational readiness.</p>
<p>Lastly, to maintain the surgical oncology focus of this review, the remainder of this review schematizes the capabilities of ML systems against perioperative cancer-related decisions and actions such as oncologic surgery candidacy and risk tailoring, imaging-based characterization/planning for tumor and critical structure relationships, and intraoperative perception/guidance for margin assessment and complication avoidance. Where clinical AI frameworks are cited in general terms (eg, reporting, risk of bias, device control), they have been included insofar as they provide translation-relevant constraints for perioperative oncologic systems.</p>
</sec>
<sec id="sec002">
<title>Methodological Transparency</title>
<p>This review was designed with the goal of translational readiness in mind rather than maximal topical relevance. We conducted a structured literature review according to the following methods. We searched PubMed/MEDLINE, Embase, IEEE Xplore, and the Cochrane Library for English-language publications from January 2010 to the most recent date before submission. Search concepts were designed as follows: (&#x201C;surgical oncology&#x201D; or procedure- or organ-specific cancer surgery) AND ("artificial intelligence," "machine learning," "deep learning," "computer vision," "decision support," "robotic surgery," "augmented reality," "navigation," "risk prediction"). We also searched reference lists of relevant systematic reviews and landmark studies to ensure that any pertinent literature would not be missed. Studies were eligible for inclusion if they developed, validated, or clinically used AI/ML systems that were intended to affect or were used for perioperative decision-making in cancer surgery, preoperative planning, or intraoperative navigation in cancer surgery. We did not include non-oncologic applications unless they provided a generalizable translational framework that is pertinent to cancer surgery. Two reviewers screened the abstracts for relevance. The abstracts were then used to screen the full text for inclusion based on the study&#x2019;s clear intention to use the AI/ML system for the aforementioned applications. Data were abstracted for dataset source and representativeness, labeling strategy and reference standards, internal or external or prospective validation, calibration and uncertainty estimation, decision-analytic evaluation, workflow integration, and safety features. To minimize bias in the review process and to improve the interpretability of the study results for the reader in terms of the applicability to the development of AI/ML systems for cancer surgery, the review is designed to assess the applicability and the risk of bias in the study results according to the domains in the PROBAST/PROBAST+AI criteria (participants, predictors, outcomes, analysis) while focusing on the reporting criteria in the TRIPOD+AI, CONSORT-AI, SPIRIT-AI, DECIDE-AI criteria as applicable.</p>
<p>Lastly, to make the evidence assembly process explicit and reproducible, we provide a summary of identification, screening, eligibility, and inclusion criteria, including details of sources searched, de-duplication strategy, inclusion and exclusion criteria, and final included items by domain. For ease of reading and conciseness of the main text, we have placed expanded framework tables, such as comprehensive standards tables and expanded failure mode tables, throughout the manuscript.</p>
</sec>
<sec id="sec003">
<title>Clinical Translation and Decision Support</title>
<p>To help evidence maturity become clearer and avoid over-reliance on narrative emphasis, we provide domain-specific evidence maps that provide a summary of representative studies in surgical oncology (<xref ref-type="table" rid="T1">Tables 1</xref>&#x2013;<xref ref-type="table" rid="T4">4</xref>). Each table includes information on dataset provenance (single-site vs multi-site, registry vs electronic health record [EHR], imaging), validation approach (internal, external, internal&#x2013;external, prospective), calibration and handling of uncertainty, decision-analytic evaluation, and, most importantly, what specific clinical action is intended to be triggered by model output (eg, pathway enrollment, increased surveillance, changes in surgery approach, increased margins, sampling).</p>
<table-wrap id="T1">
<label>Table 1 |</label>
<caption><title>Surgical oncology AI operational taxonomy with translation maturity rubric</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="center"><bold>Operational Tier</bold></th>
<th valign="top" align="center"><bold>Module or Task</bold></th>
<th valign="top" align="center"><bold>Primary Clinical Product</bold></th>
<th valign="top" align="center"><bold>Typical Model Family</bold></th>
<th valign="top" align="center"><bold>Translation Maturity Rubric for This Module</bold></th>
<th valign="top" align="center"><bold>Minimum Translation Bar </bold></th>
<th valign="top" align="center"><bold>Dominant Harm If It Fails</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Decision Support</bold></td>
<td valign="top" align="left">Risk prediction, complication prediction, therapy guidance</td>
<td valign="top" align="left">Decision-relevant outputs for tumor board and pre-op planning</td>
<td valign="top" align="left">Supervised ML, deep nets, survival models</td>
<td valign="top" align="left">Retrospective discrimination &#x2192; internal validation &#x2192; external validation &#x2192; prospective workflow study &#x2192; impact study with patient or process endpoints &#x2192; monitoring</td>
<td valign="top" align="left">External validation and prospective assessment should become routine; interface and workflow integration are scientific outputs</td>
<td valign="top" align="left">Silent distribution-shift failure, false reassurance, inappropriate plan changes</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Pre-op Planning</bold></td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">Actionable probabilities for diagnosis or risk phenotype</td>
<td valign="top" align="left">CNN classifiers, multimodal models</td>
<td valign="top" align="left">Benchmark performance &#x2192; multi-site generalizability &#x2192; calibration and uncertainty &#x2192; clinical impact on planning decisions</td>
<td valign="top" align="left">Outputs must align with decisions rather than outcomes; handle missing and imbalanced data</td>
<td valign="top" align="left">Overconfident wrong probabilities bias operative aggressiveness</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Pre-op Planning</bold></td>
<td valign="top" align="left">Detection</td>
<td valign="top" align="left">Lesion and landmark localization that reduces search burden</td>
<td valign="top" align="left">Detection networks, regression heads</td>
<td valign="top" align="left">Phantom or single-site tests &#x2192; robustness across scanners and protocols &#x2192; error propagation testing into segmentation and registration</td>
<td valign="top" align="left">Must be treated as upstream dependency that can cascade into later modules</td>
<td valign="top" align="left">Cascading localization error creates systematic downstream overlay error</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Pre-op Planning</bold></td>
<td valign="top" align="left">Segmentation</td>
<td valign="top" align="left">Operative geometry: tumor margins, organs at risk, vessels, corridors</td>
<td valign="top" align="left">U-Net variants, encoder&#x2013;decoder nets</td>
<td valign="top" align="left">Accuracy on curated sets &#x2192; robustness to label heterogeneity &#x2192; clinically relevant geometry metrics</td>
<td valign="top" align="left">Annotation scalability is the binding constraint; weak and self-supervised are necessities</td>
<td valign="top" align="left">Wrong geometry yields unsafe distances, corridors, or resection maps</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Pre-op Planning</bold></td>
<td valign="top" align="left">Registration</td>
<td valign="top" align="left">Alignment across modalities, timepoints, and pre-op model to intra-op view</td>
<td valign="top" align="left">Deformable registration, learned deformation fields</td>
<td valign="top" align="left">Retrospective registration error &#x2192; stress tests under deformation &#x2192; downstream overlay safety tests</td>
<td valign="top" align="left">Confident subtle error is worse than no system; overlays require trust</td>
<td valign="top" align="left">Confident wrong overlay guides the surgeon toward danger</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Intra-op Navigation</bold></td>
<td valign="top" align="left">Shape instantiation</td>
<td valign="top" align="left">Recover usable 3D geometry from partial or 2D views</td>
<td valign="top" align="left">3D reconstruction, surface models</td>
<td valign="top" align="left">Lab or phantom &#x2192; robustness under deformation and occlusion &#x2192; failure-behavior characterization</td>
<td valign="top" align="left">Should be evaluated by modes and speed of failure, not frame accuracy</td>
<td valign="top" align="left">Wrong 3D model leads to wrong collision avoidance or distance-to-critical-structure</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Intra-op Navigation</bold></td>
<td valign="top" align="left">Endoscopic navigation</td>
<td valign="top" align="left">Depth, pose, mapping in non-rigid spaces</td>
<td valign="top" align="left">Depth estimation, odometry, SLAM, self-supervised</td>
<td valign="top" align="left">Phantom or limited datasets &#x2192; domain shift robustness &#x2192; OOD detection and safe degradation</td>
<td valign="top" align="left">Systems must be tested for failure behavior across environments</td>
<td valign="top" align="left">Catastrophic navigation drift with false confidence</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Intra-op Navigation</bold></td>
<td valign="top" align="left">Tissue tracking</td>
<td valign="top" align="left">Continuous lock on regions of interest under motion and deformation</td>
<td valign="top" align="left">Tracking-by-detection, online updating</td>
<td valign="top" align="left">Controlled settings &#x2192; robustness in real OR conditions &#x2192; governance for non-stationary updating</td>
<td valign="top" align="left">Tight constraints during low confidence; governance challenges for online learning</td>
<td valign="top" align="left">Tracker drift causes unsafe AR and automation bias</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Intra-op Navigation</bold></td>
<td valign="top" align="left">Augmented reality overlays</td>
<td valign="top" align="left">Intuitive overlay of pre-op models onto operative field</td>
<td valign="top" align="left">AR overlay pipelines</td>
<td valign="top" align="left">Lab alignment &#x2192; non-rigid organ overlay validation &#x2192; human factors evaluation</td>
<td valign="top" align="left">Periodically incorrect overlay is dangerous; uncertainty must be represented</td>
<td valign="top" align="left">Surgeon overtrusts an intermittently wrong overlay</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Robotics and Autonomy</bold></td>
<td valign="top" align="left">Instrument segmentation and tracking</td>
<td valign="top" align="left">Tool localization as prerequisite for safe automation</td>
<td valign="top" align="left">U-Net tools, tracking smoothing</td>
<td valign="top" align="left">Benchmark &#x2192; robustness under blood, smoke, occlusion &#x2192; integrated safety tests</td>
<td valign="top" align="left">Perception is prerequisite for bounded autonomy</td>
<td valign="top" align="left">Mislocalized tool leads to unsafe action</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Robotics and Autonomy</bold></td>
<td valign="top" align="left">Learning from demonstration</td>
<td valign="top" align="left">Surgical primitives and reusable motion policies</td>
<td valign="top" align="left">Imitation learning, primitive decomposition</td>
<td valign="top" align="left">Benchmark datasets &#x2192; OOD detection &#x2192; safe handoff</td>
<td valign="top" align="left">Must detect edge conditions and transfer control swiftly to human</td>
<td valign="top" align="left">Unrecognized edge case leads to unsafe continuation</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Robotics and Autonomy</bold></td>
<td valign="top" align="left">Reinforcement learning</td>
<td valign="top" align="left">Narrow task-level autonomy via policies</td>
<td valign="top" align="left">RL with sim-to-real</td>
<td valign="top" align="left">Simulation &#x2192; transfer learning &#x2192; robust safety constraints</td>
<td valign="top" align="left">Sim-to-real transfer is a major practical concern; autonomy is bounded</td>
<td valign="top" align="left">Policy fails at rare events and creates instantaneous harm</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Robotics and Autonomy</bold></td>
<td valign="top" align="left">Human&#x2013;robot interface</td>
<td valign="top" align="left">Command-intention detection and safe interaction</td>
<td valign="top" align="left">Multimodal intent models</td>
<td valign="top" align="left">Lab tests &#x2192; OR noise and occlusion validation &#x2192; extremely low tolerance error audits</td>
<td valign="top" align="left">Low tolerance for errors; single wrong command can be dangerous</td>
<td valign="top" align="left">Erroneous command triggers dangerous actuation</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T2">
<label>Table 2 |</label>
<caption><title>Integrity-chain audit table: how upstream failures propagate to downstream harms</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="center"><bold>Upstream Module</bold></th>
<th valign="top" align="center"><bold>Downstream Module</bold></th>
<th valign="top" align="center"><bold>Dependency You Claim</bold></th>
<th valign="top" align="center"><bold>How Failure Propagates</bold></th>
<th valign="top" align="center"><bold>Concrete Harm Example</bold></th>
<th valign="top" align="center"><bold>Translation Mitigation You Advocate</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Detection</bold></td>
<td valign="top" align="left">Segmentation</td>
<td valign="top" align="left">Landmark and ROI constraints</td>
<td valign="top" align="left">Missed or shifted detection biases segmentation seed and geometry</td>
<td valign="top" align="left">Incorrect tumor margin near critical structure</td>
<td valign="top" align="left">Evaluate cascade explicitly, not modules in isolation</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Segmentation</bold></td>
<td valign="top" align="left">Registration</td>
<td valign="top" align="left">Geometry informs alignment and overlays</td>
<td valign="top" align="left">Wrong anatomy geometry biases deformation alignment</td>
<td valign="top" align="left">AR overlay is systematically shifted despite looking plausible</td>
<td valign="top" align="left">Treat overlay trustworthiness as safety-critical</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Registration</bold></td>
<td valign="top" align="left">AR overlays</td>
<td valign="top" align="left">Overlay depends on correct alignment</td>
<td valign="top" align="left">Small registration error becomes high-confidence visual guidance</td>
<td valign="top" align="left">Confident wrong overlay increases surgeon confidence wrongly</td>
<td valign="top" align="left">Report uncertainty; fail safely when confidence is low</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Tracking</bold></td>
<td valign="top" align="left">AR overlays</td>
<td valign="top" align="left">Overlay stability depends on tracking</td>
<td valign="top" align="left">Tracker drift makes overlay intermittently correct and intermittently wrong</td>
<td valign="top" align="left">Surgeon trusts overlay during a wrong interval</td>
<td valign="top" align="left">Human factors evaluation and uncertainty-first outputs</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Shape Instantiation</bold></td>
<td valign="top" align="left">Robotics constraints</td>
<td valign="top" align="left">Collision avoidance requires usable 3D geometry</td>
<td valign="top" align="left">Incorrect surface model breaks distance-to-structure constraints</td>
<td valign="top" align="left">Instrument trajectory violates safe boundary</td>
<td valign="top" align="left">Require multimodal sensing and defined low-confidence behavior</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Decision Support</bold></td>
<td valign="top" align="left">Pre-op planning</td>
<td valign="top" align="left">Planning decisions depend on risk and advisability</td>
<td valign="top" align="left">Miscalibrated risk pushes corridor choice or aggressiveness</td>
<td valign="top" align="left">Overly aggressive plan in fragile patient</td>
<td valign="top" align="left">External and prospective validation; workflow-native interfaces</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Pre-op Planning Pipeline</bold></td>
<td valign="top" align="left">Intra-op navigation</td>
<td valign="top" align="left">Pre-op model is used as intra-op reference</td>
<td valign="top" align="left">Normalized pre-op outputs become wrong under deformation</td>
<td valign="top" align="left">Navigation uses outdated geometry</td>
<td valign="top" align="left">Closed-loop consistency across operating suite</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Intra-op Navigation</bold></td>
<td valign="top" align="left">Bounded autonomy</td>
<td valign="top" align="left">Perception is prerequisite for action</td>
<td valign="top" align="left">Perception errors produce unsafe actuation</td>
<td valign="top" align="left">Autonomous subtask continues under OOD conditions</td>
<td valign="top" align="left">OOD detection and swift transfer of control</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Any Module</bold></td>
<td valign="top" align="left">Downstream clinical workflow</td>
<td valign="top" align="left">Optional systems get dropped</td>
<td valign="top" align="left">Poor integration reduces adoption and increases misuse</td>
<td valign="top" align="left">Tool ignored or used incorrectly under time pressure</td>
<td valign="top" align="left">Interface and workflow are primary scientific outputs</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T3">
<label>Table 3 |</label>
<caption><title>Translation-grade evaluation crosswalk to reporting and staged-evidence frameworks</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="center"><bold>Evaluation Element</bold></th>
<th valign="top" align="center"><bold>What to Report in Surgical Oncology AI Papers</bold></th>
<th valign="top" align="center"><bold>Applies Most to</bold></th>
<th valign="top" align="center"><bold>Guideline and Staged-Evidence Crosswalk</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Cohort Definition and Target Decision</bold></td>
<td valign="top" align="left">Define the decision the model supports, not just the outcome</td>
<td valign="top" align="left">Decision support, classification</td>
<td valign="top" align="left">TRIPOD+AI for prediction model reporting; DECIDE-AI for decision-support evaluation framing</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Missing Data and Imbalance Handling</bold></td>
<td valign="top" align="left">Explicit handling strategies and sensitivity analyses</td>
<td valign="top" align="left">Decision support</td>
<td valign="top" align="left">TRIPOD+AI and DECIDE-AI emphasize transparent data handling and evaluation design</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Internal vs External Validation Separation</bold></td>
<td valign="top" align="left">Distinguish internal validation from true external testing</td>
<td valign="top" align="left">Decision support, all modules</td>
<td valign="top" align="left">TRIPOD+AI; IDEAL encourages staged evidence as systems mature</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Prospective Evaluation in Real Workflow</bold></td>
<td valign="top" align="left">Prospective studies with workflow endpoints, time to intervention, concordance</td>
<td valign="top" align="left">Decision support, intra-op systems</td>
<td valign="top" align="left">DECIDE-AI specifically targets early-stage prospective evaluation; CONSORT-AI applies when randomized trials exist; IDEAL later stages</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Interface and Human Factors</bold></td>
<td valign="top" align="left">Describe interface, tumor board integration, AR human factors testing</td>
<td valign="top" align="left">Decision support, AR</td>
<td valign="top" align="left">DECIDE-AI highlights integration into practice; CONSORT-AI requires reporting AI-specific trial elements when trialed</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Uncertainty as First-Class Output</bold></td>
<td valign="top" align="left">Calibration, confidence, abstention rules, and defined fail behavior</td>
<td valign="top" align="left">All modules, especially AR and robotics</td>
<td valign="top" align="left">DECIDE-AI and TRIPOD+AI support transparent uncertainty and safety behavior reporting</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Failure-Mode Characterization</bold></td>
<td valign="top" align="left">Evaluate modes and speed of failure, not only average accuracy</td>
<td valign="top" align="left">Intra-op navigation, tracking</td>
<td valign="top" align="left">IDEAL style staged evaluation aligns with progressively harsher real-world testing</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Cascade and System-Level Evaluation</bold></td>
<td valign="top" align="left">Test integrity chain and upstream&#x2013;downstream error propagation</td>
<td valign="top" align="left">Pre-op to intra-op to AR</td>
<td valign="top" align="left">IDEAL supports system maturation; DECIDE-AI supports system evaluation rather than isolated metrics</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Dataset Strategy for Domain Shift</bold></td>
<td valign="top" align="left">Build datasets with domain shift, label quality, temporal changes</td>
<td valign="top" align="left">All modules</td>
<td valign="top" align="left">TRIPOD+AI encourages transparent dataset and modeling description; staged evidence expects multi-site robustness</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Post-Deployment Monitoring</bold></td>
<td valign="top" align="left">Define monitoring signals, drift detection, auditability</td>
<td valign="top" align="left">Deployed systems</td>
<td valign="top" align="left">IDEAL long-term evaluation concepts; AI trial reporting literature emphasizes this as necessary for trustworthy adoption</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T4">
<label>Table 4 |</label>
<caption><title>Risk-of-harm and failure-mode matrix mapped to mitigations and what is actually tested</title></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="rows">
<thead>
<tr>
<th valign="top" align="center"><bold>Failure Mode</bold></th>
<th valign="top" align="center"><bold>Where It Manifests</bold></th>
<th valign="top" align="center"><bold>Why It Happens</bold></th>
<th valign="top" align="center"><bold>Why It Is Dangerous in Surgery</bold></th>
<th valign="top" align="center"><bold>Mitigation Strategy You Propose</bold></th>
<th valign="top" align="center"><bold>What the Literature Commonly Tests vs Rarely Tests</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Distribution Shift</bold></td>
<td valign="top" align="left">Decision support, classification</td>
<td valign="top" align="left">Multi-site variation in patients, imaging, practice</td>
<td valign="top" align="left">Quiet failure with plausible outputs</td>
<td valign="top" align="left">Routine external validation and prospective evaluation</td>
<td valign="top" align="left">Common: retrospective discrimination; Rare: prospective impact and multi-site robustness</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Confident Wrong Overlays</bold></td>
<td valign="top" align="left">Registration and AR</td>
<td valign="top" align="left">Subtle misalignment, non-rigid deformation, tracking drift</td>
<td valign="top" align="left">Elevates surgeon confidence toward wrong action</td>
<td valign="top" align="left">Uncertainty-first overlays; defined low-confidence behavior; multimodal sensing</td>
<td valign="top" align="left">Common: offline overlay accuracy; Rare: human factors harm testing under realistic conditions</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Cascading Upstream Error</bold></td>
<td valign="top" align="left">Detection to segmentation to registration</td>
<td valign="top" align="left">Pipeline dependency</td>
<td valign="top" align="left">Downstream harm scales from small upstream bias</td>
<td valign="top" align="left">Explicit cascade testing and integrity-chain evaluation</td>
<td valign="top" align="left">Common: module benchmarks; Rare: end-to-end safety tests</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Tracker Drift</bold></td>
<td valign="top" align="left">Intra-op tracking and AR</td>
<td valign="top" align="left">Occlusion, lighting, deformation, online updating</td>
<td valign="top" align="left">Intermittent wrong guidance at critical moments</td>
<td valign="top" align="left">Constraints during low confidence; governance for online updates</td>
<td valign="top" align="left">Common: controlled tracking benchmarks; Rare: governance and non-stationary behavior audits</td>
</tr>
<tr>
<td valign="top" align="left"><bold>OOD Edge Cases</bold></td>
<td valign="top" align="left">Robotics autonomy</td>
<td valign="top" align="left">Rare bleeds, unusual anatomy, unexpected complications</td>
<td valign="top" align="left">Policy performs worst at the edge where patients actually are</td>
<td valign="top" align="left">OOD detection and swift control transfer to human</td>
<td valign="top" align="left">Common: simulation success; Rare: validated OOD detection and safe handoff metrics</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Sim-to-Real Brittleness</bold></td>
<td valign="top" align="left">Reinforcement learning</td>
<td valign="top" align="left">Training in simulation with domain gap</td>
<td valign="top" align="left">Unsafe actuation when real-world dynamics differ</td>
<td valign="top" align="left">Conservative bounded autonomy; transfer learning validation</td>
<td valign="top" align="left">Common: simulation results; Rare: robust sim-to-real safety demonstration</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Command-Intention Misfire</bold></td>
<td valign="top" align="left">Human&#x2013;robot interface</td>
<td valign="top" align="left">OR noise, occlusion, cognitive load</td>
<td valign="top" align="left">Single wrong command can be dangerous</td>
<td valign="top" align="left">Extremely low tolerance error audits; multimodal redundancy</td>
<td valign="top" align="left">Common: lab HRI demos; Rare: OR-grade validation under noise and stress</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Dataset Label Heterogeneity</bold></td>
<td valign="top" align="left">Segmentation, detection</td>
<td valign="top" align="left">Annotation cost, heterogeneity, weak labels</td>
<td valign="top" align="left">Geometry errors appear precise and persuasive</td>
<td valign="top" align="left">Weak or self-supervised scaling plus robust evaluation</td>
<td valign="top" align="left">Common: single-site curated labels; Rare: heterogeneity stress tests</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Security and System Breakdown</bold></td>
<td valign="top" align="left">Deployed systems</td>
<td valign="top" align="left">Cybersecurity gaps, device failures</td>
<td valign="top" align="left">Instantaneous harm in high-stakes environment</td>
<td valign="top" align="left">Secure, auditable systems and governance</td>
<td valign="top" align="left">Common: privacy discussion; Rare: engineering-grade failure accountability</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="sec003-1">
<title>Scope and Clinical Tasks</title>
<p>In surgical oncology, <italic>clinical translation</italic> denotes the pathway by which AI and ML methods move from retrospective proof-of-concept toward deployment as workflow-integrated tools that measurably improve patient care under routine conditions.<sup><xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref13">13</xref></sup> This translational interface is dominated by <italic>clinical decision support</italic> (CDS), defined here as computer-based systems that provide patient-specific assessments or recommendations intended to aid clinical decision-making.<sup><xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref15">15</xref></sup> Decisions such as operative candidacy, extent and timing of resection, perioperative optimization, postoperative triage, and surveillance planning directly shape both short-term morbidity and long-term oncologic benefit.<sup><xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref16">16</xref>, <xref ref-type="bibr" rid="ref17">17</xref></sup> At this stage, model outputs must be not only accurate but also actionable, interpretable, and deliverable at the moment a decision is made, properties repeatedly associated with CDS effectiveness in broader health systems research and explicitly emphasized in surgical risk calculator design.<sup><xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref18">18</xref></sup></p>
<p>Contemporary perioperative decision-making is anchored by traditional risk stratification tools (eg, comorbidity indices and general surgical risk calculators), yet these instruments can be imperfectly calibrated to specific oncologic procedures, patient populations, and evolving perioperative pathways.<sup><xref ref-type="bibr" rid="ref18">18</xref>&#x2013;<xref ref-type="bibr" rid="ref21">21</xref></sup> ML-based CDS has therefore concentrated on three clinically actionable tasks: (1) preoperative prediction of postoperative morbidity, mortality, and resource use (eg, prolonged length of stay); (2) perioperative process-of-care decisions (eg, escalation of monitoring intensity or thromboprophylaxis); and (3) oncologic outcome prediction (eg, early recurrence, metastasis, survival) intended to tailor adjuvant therapy counseling and surveillance strategies.<sup><xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref22">22</xref>&#x2013;<xref ref-type="bibr" rid="ref24">24</xref></sup></p>
</sec>
<sec id="sec003-2">
<title>Data Substrates and Modeling Pradigms</title>
<p>Decision-support systems in surgical oncology are typically trained on multimodal clinical data rather than imaging alone.<sup><xref ref-type="bibr" rid="ref22">22</xref>, <xref ref-type="bibr" rid="ref25">25</xref>, <xref ref-type="bibr" rid="ref26">26</xref></sup> Common substrates include structured EHR variables (demographics, comorbidities, laboratory values, medications), perioperative process data (procedure type, operative duration, transfusion, enhanced recovery pathway adherence), and longitudinal physiologic measurements.<sup><xref ref-type="bibr" rid="ref22">22</xref>, <xref ref-type="bibr" rid="ref27">27</xref>, <xref ref-type="bibr" rid="ref28">28</xref></sup> Unstructured text has particular value because key prognostic and procedural descriptors are often documented in operative notes, pathology reports, and radiology narratives.<sup><xref ref-type="bibr" rid="ref25">25</xref>, <xref ref-type="bibr" rid="ref26">26</xref></sup> In ovarian cancer surgery, for example, natural language processing (NLP) applied to preoperative computed tomography (CT) reports improved prediction of morbidity and mortality beyond structured variables, illustrating how extraction of clinically salient detail from free text can enrich perioperative risk models without requiring new data collection.<sup><xref ref-type="bibr" rid="ref26">26</xref></sup></p>
<p>Patient-generated health data are an emerging input modality for post-discharge decision support.<sup><xref ref-type="bibr" rid="ref29">29</xref></sup> Remote telemonitoring platforms capturing symptoms and physiologic parameters have been evaluated for the prediction of postoperative complications after cancer surgery, addressing a recognized vulnerability in surgical pathways: many complications manifest after discharge, when traditional in-hospital monitoring has ended, and clinical contact is intermittent.<sup><xref ref-type="bibr" rid="ref29">29</xref></sup> These studies suggest that integration of patient-reported and sensor-derived data may extend the temporal window of actionable risk detection, but they also raise implementation requirements (device adherence, data governance, and clinical response capacity) that must be incorporated into translational evaluation rather than treated as downstream operational details.</p>
<p>Lastly, the notion of regulatory readiness for perioperative AI now extends to include a lifecycle plan for controlled updates and performance management, not merely pre-market performance and accuracy. In the case of the United States, the Food and Drug Administration (FDA) has issued guidance for AI-enabled medical devices that supports iterative modification via the Predetermined Change Control Plan (PCCP), which addresses what changes can occur and how they can be assessed to provide reasonable assurance of safety and effectiveness. At the same time, FDA cybersecurity guidance also addresses security-by-design within the quality system and the integration of cybersecurity documents within the device submission package for those devices with cybersecurity risk. In the EU, the Medical Device Regulation (MDR 2017/745) has also placed greater responsibility on manufacturers for post-market surveillance (Articles 83&#x2013;86) and for software IT security within general safety and performance requirements. In practice, we would advocate the alignment of OR-related quality and cybersecurity processes (secure health software lifecycle processes IEC 81001-5-1) and the establishment of triggers for escalation and rollback based upon statistically significant performance degradation on previously determined clinical endpoints (including calibration), sustained increases in out-of-distribution (OOD) rates and tracking and registration failures beyond control limits, safety events plausibly linked to the AI model performance, or significant cybersecurity events; mitigation should include rollback to the last known good model performance, silent mode revalidation, and auditable change history consistent with the PCCP.<sup><xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref29">29</xref></sup></p>
<p>Methodologically, most decision-support applications remain supervised prediction models trained on retrospective cohorts, benchmarking logistic regression or regularized regression against tree-based ensembles and, less commonly, neural networks.<sup><xref ref-type="bibr" rid="ref22">22</xref>, <xref ref-type="bibr" rid="ref30">30</xref>&#x2013;<xref ref-type="bibr" rid="ref32">32</xref></sup> Several comparative evaluations suggest that increased model complexity does not guarantee improved performance.<sup><xref ref-type="bibr" rid="ref22">22</xref>, <xref ref-type="bibr" rid="ref30">30</xref></sup> For example, in a population-based analysis of mortality prediction using administrative diagnosis codes, boosted trees did not outperform logistic regression, emphasizing that the added computational and interpretability costs of complex models should be justified by demonstrable gains in calibration, discrimination, or clinical utility relevant to the intended decision.<sup><xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref22">22</xref>, <xref ref-type="bibr" rid="ref33">33</xref></sup></p>
</sec>
<sec id="sec003-3">
<title>Perioperative Risk Stratification and Workflow-Integrated Recommendations</title>
<p>A central translational premise of perioperative CDS is to identify individuals at elevated risk for adverse outcomes and to trigger targeted pathways that reduce preventable morbidity without disproportionate resource consumption.<sup><xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref14">14</xref></sup> In colorectal cancer surgery, Rosen et al. reported a registry-based ML model predicting 1-year mortality, deployed as a clinical decision support tool coupled to personalized perioperative care pathways; the implementation was associated with reductions in postoperative complications, improved &#x201C;textbook&#x201D; outcomes, shorter length of stay, and lower costs, illustrating an end-to-end translational pattern from prediction to workflow-embedded intervention and measurable system-level impact.<sup><xref ref-type="bibr" rid="ref5">5</xref></sup> This kind of coupling between risk prediction and standardized, protocolized responses is also consistent with broader CDS literature showing that systems are more likely to change practice when they deliver recommendations (or pathways) within routine workflow rather than standalone risk estimates.<sup><xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref15">15</xref></sup></p>
<p>Across oncologic procedures, ML-based models have been developed to predict composite postoperative morbidity as well as specific complications that motivate discrete prophylactic or monitoring interventions.<sup><xref ref-type="bibr" rid="ref21">21</xref>, <xref ref-type="bibr" rid="ref28">28</xref>, <xref ref-type="bibr" rid="ref31">31</xref>, <xref ref-type="bibr" rid="ref33">33</xref>, <xref ref-type="bibr" rid="ref34">34</xref></sup> Examples include models for postoperative complications after liver surgery, early postoperative complications after radical gastrectomy, and complication risk stratification after lung cancer surgery.<sup><xref ref-type="bibr" rid="ref21">21</xref>, <xref ref-type="bibr" rid="ref31">31</xref>, <xref ref-type="bibr" rid="ref33">33</xref></sup> In a thoracic oncology cohort, an elastic net model predicting postoperative complications after lung cancer surgery achieved good discrimination and improved risk prediction compared with the Charlson Comorbidity Index, demonstrating that procedure-specific perioperative models can outperform general comorbidity summaries when appropriately developed and temporally validated.<sup><xref ref-type="bibr" rid="ref19">19</xref>, <xref ref-type="bibr" rid="ref21">21</xref></sup> Similarly, in head and neck cancer surgery, ML models have been used to predict prolonged length of stay and were compared against the ACS-NSQIP risk calculator and conventional statistical models, highlighting a recurring translational theme: models must be evaluated not only in isolation but also against the clinical baselines that actually shape care decisions.<sup><xref ref-type="bibr" rid="ref18">18</xref>, <xref ref-type="bibr" rid="ref20">20</xref></sup></p>
<p>Risk prediction for procedure-specific events illustrates both the opportunity and the translational constraints of CDS. Anastomotic complications after esophagectomy (leak, stricture) are high-impact outcomes with potential for algorithmically guided monitoring and early intervention.<sup><xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref35">35</xref>, <xref ref-type="bibr" rid="ref36">36</xref></sup> Recent studies have developed models combining clinical variables with imaging-derived features to predict anastomotic leakage, reporting discrimination and calibration metrics, as well as threshold-based evaluation intended to support clinical decision-making.<sup><xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref35">35</xref></sup> In parallel, automated ML approaches have been applied to predict post-esophagectomy strictures, suggesting that algorithmic pipelines can be designed for scalability when feature extraction and model selection are automated, although automation does not obviate the need for external validation, drift monitoring, and transparent reporting.<sup><xref ref-type="bibr" rid="ref36">36</xref>, <xref ref-type="bibr" rid="ref37">37</xref></sup></p>
<p>Dynamic decision support aligned to perioperative trajectories is increasingly emphasized because physiologic and laboratory changes often precede clinically recognized deterioration.<sup><xref ref-type="bibr" rid="ref27">27</xref>, <xref ref-type="bibr" rid="ref28">28</xref></sup> In a multicenter gastrectomy cohort, time-sequential ML models using serial laboratory and vital-sign measurements improved early prediction of postoperative complications compared with baseline models, supporting the concept that CDS should be designed as a longitudinal process rather than a single preoperative score.<sup><xref ref-type="bibr" rid="ref28">28</xref></sup> Complementary evidence comes from dynamic modeling of postoperative venous thromboembolism risk after colorectal cancer surgery, where repeated updates to risk estimates reflected evolving clinical states and supported individualized prophylaxis and monitoring decisions in a multicenter context.<sup><xref ref-type="bibr" rid="ref27">27</xref></sup></p>
<p>Prospective validation is particularly important for perioperative CDS because apparent performance in retrospective data can be inflated by outcome misclassification, missing data mechanisms, and site-specific practice patterns.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref37">37</xref>, <xref ref-type="bibr" rid="ref38">38</xref></sup> Notably, prospective evaluation has begun to appear in some surgical oncology prediction studies.<sup><xref ref-type="bibr" rid="ref28">28</xref>, <xref ref-type="bibr" rid="ref34">34</xref></sup> In lung cancer surgery, Chen et al. developed and prospectively validated an explainable ML model for postoperative pulmonary complications, reporting discrimination, calibration, decision curve analysis, and feature attribution via SHapley Additive exPlanations (SHAP), an unusually comprehensive suite of translational evaluation components for a single procedure-focused model.<sup><xref ref-type="bibr" rid="ref34">34</xref></sup> Together, these studies illustrate a maturation of perioperative CDS from static, internally validated scores toward longitudinal, interpretable, and prospectively assessed systems aligned with intervention thresholds and workflow integration.<sup><xref ref-type="bibr" rid="ref27">27</xref>, <xref ref-type="bibr" rid="ref34">34</xref>, <xref ref-type="bibr" rid="ref37">37</xref></sup></p>
</sec>
<sec id="sec003-4">
<title>Oncologic Outcomes and Treatment-Tailoring Decision Support</title>
<p>Decision support in surgical oncology must also address endpoints that determine cancer control and long-term survivorship, particularly early recurrence and metastasis after ostensibly curative resection.<sup><xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref39">39</xref></sup> In esophageal cancer, Rahman et al. developed ML models to predict early recurrence after surgery using multinational data and applied internal&#x2013;external validation across centers, providing an instructive template for transportability assessment when multicenter external validation is not yet feasible.<sup><xref ref-type="bibr" rid="ref39">39</xref></sup> In pancreatic cancer surgery, an interpretable ML model was developed to predict early liver metastasis after resection and was externally validated with calibration and decision-curve analyses; notably, the model was also implemented as an accessible application, reflecting an explicit translation intent. These examples underscore that oncologic CDS often targets events (recurrence, metastasis) that are clinically meaningful precisely because they can change downstream actions such as adjuvant therapy selection, surveillance intensity, and enrollment into clinical trials.<sup><xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref39">39</xref></sup></p>
<p>In glioblastoma, for instance, a quintessential conundrum in oncology, the same reasoning applies, in that despite &#x2018;maximal safe&#x2019; resection, early failure on imaging and failure patterns at the edge of resection are frequent and directly inform intensification of treatment, trial participation, and follow-up intensity.<sup><xref ref-type="bibr" rid="ref40">40</xref>, <xref ref-type="bibr" rid="ref41">41</xref></sup> A CDS approach using preoperative magnetic resonance imaging (MRI) radiomics, intraoperative factors such as extent of resection, and molecular factors such as MGMT promoter methylation or IDH mutation could risk-stratify for early failure and inform postoperative treatment and follow-up.<sup><xref ref-type="bibr" rid="ref42">42</xref></sup> Notably, in glioblastoma, this CDS system would need to directly address issues related to uncertainties in the differentiation of true failure from treatment-associated radiation changes and pseudoprogression.</p>
<p>Certain CDS applications bridge the boundary between decision support and intraoperative perception by producing staging or resectability assessments that directly influence operative strategy.<sup><xref ref-type="bibr" rid="ref24">24</xref>, <xref ref-type="bibr" rid="ref32">32</xref></sup> The artificial intelligence laparoscopic exploration system (AiLES) is an AI system designed to recognize intra-abdominal metastasis during laparoscopic gastric cancer surgery using real-time video, with evaluation against surgeon assessments; this form of intraoperative decision support can alter the surgical plan and downstream therapy selection when occult metastatic disease is detected.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup> In parallel, perioperative personalized decision support reviews have emphasized the need to integrate educational and feedback components with prediction outputs so that CDS improves not only prediction but also clinical action and decision quality.<sup><xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref32">32</xref></sup></p>
</sec>
<sec id="sec003-5">
<title>Evidence Maturity, External Validation, and Clinical Utility</title>
<p>A consistent finding across systematic reviews is that much of the surgical oncology AI literature remains concentrated in retrospective development studies, with heterogeneous endpoints and limited prospective evaluation demonstrating improved patient outcomes or workflow efficiency.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref37">37</xref></sup> Bekta&#x015F; et al., in a systematic review of ML applications in upper gastrointestinal cancer surgery, noted substantial heterogeneity in outcomes and methodological approaches, limiting comparative synthesis and emphasizing the need for standardized reporting and validation.<sup><xref ref-type="bibr" rid="ref37">37</xref></sup> Similarly, in rectal cancer surgery, a systematic review of AI models predicting surgical difficulty highlighted variability in definitions, imaging features, and validation strategies, illustrating that even seemingly bounded decision support targets (eg, &#x201C;difficulty&#x201D;) can suffer from endpoint ambiguity that impairs translation.<sup><xref ref-type="bibr" rid="ref37">37</xref></sup></p>
<p>Generalizability across institutions and time is a core translational constraint in perioperative CDS because case mix, perioperative protocols, coding practices, and follow-up capture differ across sites and evolve over time.<sup><xref ref-type="bibr" rid="ref13">13</xref></sup> Several surgical oncology studies have begun to address this through explicit external validation designs.<sup><xref ref-type="bibr" rid="ref6">6</xref>, <xref ref-type="bibr" rid="ref27">27</xref>, <xref ref-type="bibr" rid="ref29">29</xref>, <xref ref-type="bibr" rid="ref39">39</xref></sup> Dal Cero et al. performed an international external validation of a machine learning model for 90-day mortality after gastrectomy, providing direct evidence that transportability must be tested under clinical and geographic shifts rather than assumed from internal performance alone.<sup><xref ref-type="bibr" rid="ref6">6</xref></sup> Complementary internal&#x2013;external validation strategies, as used in multinational recurrence prediction after esophageal cancer surgery, similarly represent pragmatic approaches for early transportability assessment when full external validation cohorts are not yet available.<sup><xref ref-type="bibr" rid="ref37">37</xref>, <xref ref-type="bibr" rid="ref39">39</xref></sup></p>
<p>From a clinical translation perspective, discrimination is necessary but insufficient.<sup><xref ref-type="bibr" rid="ref43">43</xref>, <xref ref-type="bibr" rid="ref44">44</xref></sup> Poorly calibrated models can misestimate absolute risk and therefore misdirect threshold-based decisions (eg, intensive monitoring, admission to higher-acuity units, or initiation of prophylaxis).<sup><xref ref-type="bibr" rid="ref18">18</xref>, <xref ref-type="bibr" rid="ref43">43</xref></sup> Consequently, calibration assessment and decision-analytic evaluation should be considered minimum requirements for CDS systems intended to change clinical actions.<sup><xref ref-type="bibr" rid="ref43">43</xref>, <xref ref-type="bibr" rid="ref44">44</xref></sup> This emphasis is reflected both in methodological guidance and in several recent surgical oncology studies reporting calibration curves, Brier scores, and decision curve analysis (DCA) alongside discrimination.<sup><xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref34">34</xref>, <xref ref-type="bibr" rid="ref35">35</xref>, <xref ref-type="bibr" rid="ref40">40</xref></sup> The reporting of DCA is particularly relevant to decision support because it quantifies net benefit across thresholds, connecting model outputs to the clinical consequences of false positives and false negatives rather than relying solely on rank-based discrimination.<sup><xref ref-type="bibr" rid="ref34">34</xref>, <xref ref-type="bibr" rid="ref44">44</xref></sup></p>
</sec>
<sec id="sec003-6">
<title>Human Factors, Equity, and Reporting Standards</title>
<p>Even when predictive performance is favorable, CDS adoption depends on usability, clinician trust, and integration at the time and location of decision-making.<sup><xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref45">45</xref></sup> A systematic review of clinical decision support systems identified features associated with improved clinical practice, including automatic provision within workflow and delivery of recommendations rather than assessments alone, design principles that are directly relevant to AI/ML tools intended for perioperative oncology care.<sup><xref ref-type="bibr" rid="ref14">14</xref></sup> Qualitative research on decision support interventions targeting shared decision-making similarly underscores clinician concerns about appropriateness of recommendations, time costs, and alignment with clinical judgment, anticipating barriers to AI/ML CDS if outputs are poorly contextualized or not coupled to feasible actions.<sup><xref ref-type="bibr" rid="ref45">45</xref></sup> For patient-centered surgical decisions, evidence from randomized evaluation of conversation aids in breast cancer surgery further illustrates that the quality of decisions depends on aligning clinical options with patient values and understanding; prediction outputs are therefore only one component of the translational goal of improving decision quality.<sup><xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref46">46</xref></sup></p>
<p>Equity considerations are central to translational readiness because prediction models trained on historical healthcare data may encode structural inequities in access, treatment, and documentation.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref47">47</xref></sup> In a widely cited example outside oncology, Obermeyer et al. demonstrated substantial racial bias in a commercial risk prediction algorithm arising from the use of healthcare costs as a proxy for health needs; analogous proxy-label and documentation biases are plausible in surgical oncology when outcomes, comorbidities, and follow-up are differentially captured across groups.<sup><xref ref-type="bibr" rid="ref25">25</xref>, <xref ref-type="bibr" rid="ref37">37</xref>, <xref ref-type="bibr" rid="ref47">47</xref></sup> Systematic review evidence in cancer pathways also indicates inconsistent reporting of subgroup performance and limited attention to equity in prospective evaluations, reinforcing the need to treat fairness assessment as a core translational requirement rather than an optional post hoc analysis.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref38">38</xref>, <xref ref-type="bibr" rid="ref42">42</xref></sup></p>
<p>Finally, reporting and evaluation standards are prerequisites for reliable translation.<sup><xref ref-type="bibr" rid="ref42">42</xref>, <xref ref-type="bibr" rid="ref48">48</xref>&#x2013;<xref ref-type="bibr" rid="ref50">50</xref></sup> TRIPOD and the updated TRIPOD+AI statement provide minimum reporting items for prediction model studies using regression or ML methods, including a transparent description of data sources, outcome definitions, handling of missingness, and full reporting of performance metrics.<sup><xref ref-type="bibr" rid="ref48">48</xref>, <xref ref-type="bibr" rid="ref49">49</xref></sup> PROBAST and the updated PROBAST+AI tool support structured assessment of risk of bias and applicability, offering a shared framework for judging whether a model is likely to generalize and whether reported performance is credible.<sup><xref ref-type="bibr" rid="ref42">42</xref>, <xref ref-type="bibr" rid="ref50">50</xref></sup> For prospective and interventional evaluation of AI-enabled decision support, CONSORT-AI and SPIRIT-AI extend trial reporting and protocol guidance, while DECIDE-AI provides a reporting guideline for early-stage clinical evaluation of decision support systems driven by AI, an appropriate framework for perioperative CDS that may be introduced first in limited workflows before broader rollout.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref51">51</xref></sup></p>
<p>Human factors readiness of perioperative AI systems for use will demand interface design elements that support safety in situations of uncertainty, drift, and incorrectness. For example, at the UI level, uncertainty-first design principles must be implemented through quantification of probabilities and explicit threshold bands with prespecified actions, insufficient evidence states when probabilities are too low, drifting detection and notification, and hard constraints against providing confident guidance through overlays and trajectories when upstream uncertainty bands are breached. At the workflow level, each AI system will need a compact failure mode playbook that maps common types of failures (eg, detection miss, segmentation bias, registration offset, tracking drift/OOD, cybersecurity issues) to standard responses such as confirmation steps, escalation protocols, fail-safe transitions to manual or alternative modes of imaging, documentation requirements, and review triggers after failure events. In parallel, equity assessment will need to be addressed as a translational deliverable through a simple audit template that reports stratified performance and calibration across clinically meaningful subgroups and settings (eg, by sex, age, race/ethnicity if available, comorbidity burden, language and insurance status if applicable), subgroup-specific net benefit analysis through DCA, and prespecified mitigation approaches such as targeted data enrichment and reweighting and domain adaptation, subgroup-dependent thresholds with clinically justified trade-offs and monitoring with rollback criteria if disparate errors are seen (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig id="F1" position="float">
<object-id pub-id-type="doi">10.70389/journal.pjs.100269.g001</object-id>
<label>Fig 1 |</label>
<caption><title>An uncertainty-first operative intelligence stack for AI-enabled surgical care.</title></caption>
<caption><p>This diagram represents the end-to-end, dependency-conscious approach to the systematic integration of AI across the surgical continuum of care, from decision support to planning and navigation with bounded autonomy. (1) Decision support: Intra-operatively and retrospectively validated models are used to make inferences regarding risk assessment, complication, and therapy, which are quantitatively paired with model-fit or solution-space uncertainties to facilitate decision-making for clinicians. (2) Preoperatively, patient-specific models of anatomy and geometry are created via the use of pipelines of classification, detection, segmentation, and registration, which incorporate operative conditions to provide the patient with a surgical model. (3) Intraoperatively, navigation and robotics provide the operative intelligence stack that incorporates techniques for endoscopic navigation and SLAM, tissue tracking, and augmented reality overlays, which utilize supervised, bounded autonomy. Note the failure modes depicted in the figure: detection failure, segmentation bias, registration offset, and tracking drift with out-of-distribution (OOD) inputs, which propagate to confident but erroneous guidance. As the dependency rail makes manifest, the reliability of the former models of intelligence conditionally bounds the latter model&#x2019;s autonomy to emphasize the imperative for safe AI applications. For each domain, translational readiness is constrained by: data provenance and representativeness; endpoint definition and clinical action linkage; internal, external, and prospective validation; robustness to dataset shift; calibration and uncertainty communication; human factors and workflow integration; and lifecycle governance (safety, monitoring, and cybersecurity). The framework emphasizes that headline accuracy is insufficient when downstream actions are irreversible, time-constrained, and safety-critical. An accompanying domain-level evidence map should make the action linkage explicit (what decision changes, by whom, and at what threshold) and align evaluation to that decision rather than headline accuracy. Minimum reporting should include external and prospective validation (including silent trials) plus calibration and clinical-utility analyses (reliability curves and decision-curve analysis) to justify operating points and fail-safe triggers.</p></caption>
<graphic xlink:href="pjs-26-1588-Figure-1.jpg" mime-subtype="jpg"/>
</fig>
</sec>
</sec>
<sec id="sec004">
<title>Regulatory, Safety, and Lifecycle Governance for Perioperative Oncology AI</title>
<p>When ML systems are used to provide patient-specific information for the guidance of perioperative cancer care, they are likely to be categorized as Software as a Medical Device (SaMD), where the intention is to diagnose, treat, mitigate, or in any way affect or influence the care provided to the patient. For such ML systems to be translated for practical use in the field of medicine, they must align with the lifecycle approach that covers the major regulatory regions or the regulatory harmonization process (FDA, EMA, IMDRF), as opposed to the retrospective approach that is based purely on the system&#x2019;s historical performance. For the ML system to be translated for practical use in the field of medicine, the system must be designed to meet the regulatory requirements through the intended use, the definition of the system&#x2019;s role in the clinical setting (whether assistive or advisory), the hazards associated with the system&#x2019;s use, as well as the associated mitigations in line with the traditional approach to risk management. This means that the system must meet the requirements for the software development lifecycle (IEC 62304), usability engineering for the system&#x2019;s interfaces (IEC 62366), as well as the systematic approach to risk management (ISO 14971) that applies to the field of surgical oncology, where time pressure, irreversible actions, and the consequences associated with errors are the norm. The system&#x2019;s governance must not only be limited to the pre-deployment validation but must extend to the post-deployment monitoring to ensure that the system is performing as required in the real-world setting.<sup><xref ref-type="bibr" rid="ref52">52</xref>&#x2013;<xref ref-type="bibr" rid="ref56">56</xref></sup></p>
<p>Collectively, the evidence to date indicates that clinical translation of ML decision support in surgical oncology will depend less on incremental improvements in discrimination and more on rigorous alignment of model endpoints with actionable decisions, transparent reporting, robust external and prospective validation, user-centered workflow integration, and ongoing monitoring for calibration drift and inequitable performance.<sup><xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref49">49</xref>, <xref ref-type="bibr" rid="ref50">50</xref></sup></p>
</sec>
<sec id="sec005">
<title>Preoperative Planning</title>
<p>Preoperative planning in surgical oncology refers to computational and procedural methods that transform pre-treatment data, most commonly cross-sectional imaging, into patient-specific representations intended to guide operative strategy, define resection targets, anticipate technical constraints, and coordinate adjunct technologies (eg, navigation, fluorescence, or robotic assistance).<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref51">51</xref>, <xref ref-type="bibr" rid="ref52">52</xref></sup> As deployed in current clinical workflows, planning systems typically emphasize geometric and spatial reasoning, what anatomy is present, where the tumor is related to critical structures, and how operative goals map onto feasible dissection planes, rather than solely providing probabilistic risk estimates.<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref57">57</xref></sup> Across organ systems, the dominant technical primitives underlying these workflows remain lesion characterization (including staging-relevant inference), anatomic segmentation and reconstruction, and multimodal fusion/registration.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref53">53</xref>&#x2013;<xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref58">58</xref></sup></p>
<sec id="sec005-1">
<title>Imaging-Derived Characterization and Staging</title>
<p>Radiologic staging and anatomic definition remain central determinants of operability and approach selection; accordingly, AI/ML methods have been applied to extract structured staging signals from imaging, radiology narratives, and complementary tissue sources.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref59">59</xref>&#x2013;<xref ref-type="bibr" rid="ref62">62</xref></sup> Radiomics-based strategies, whether relying on engineered feature families or learned representations, have been positioned as a means to quantify tumor phenotype and context in ways that may complement conventional human interpretation, while also amplifying sensitivity to imaging protocol variability and preprocessing choices.<sup><xref ref-type="bibr" rid="ref1">1</xref></sup> In breast oncology, convolutional neural network analysis of multiparametric MRI has been evaluated for preoperative prediction of axillary lymph node metastasis, directly targeting a staging component that may influence the extent of axillary surgery and systemic therapy sequencing.<sup><xref ref-type="bibr" rid="ref59">59</xref></sup> In gastric cancer, multimodal approaches that combine CT-derived information with digital pathology features (whole-slide imaging [WSI]) have been reported for staging-related prediction, illustrating how &#x201C;preoperative planning&#x201D; increasingly extends beyond imaging alone when pre-treatment tissue is available (eg, endoscopic biopsy).<sup><xref ref-type="bibr" rid="ref60">60</xref></sup></p>
<p>Natural language processing (NLP) has also been explored to convert narrative radiology into structured, decision-relevant staging information. Automated esophageal cancer staging from free-text radiology reports has been evaluated using large language models (LLMs), supporting the feasibility of extracting standardized staging descriptors from routine documentation and potentially reducing manual abstraction burden in multidisciplinary workflows.<sup><xref ref-type="bibr" rid="ref61">61</xref></sup> Because many planning decisions hinge on a synthesis of imaging, histology, and clinical narrative, LLM-centered approaches may be attractive for workflow integration; however, their translational value depends on rigorous evaluation under realistic variability in report style, missingness, and institutional terminology, as well as safeguards against unsupported inference, and values that integrate restorative ventures for at risk social groups in healthcare.<sup><xref ref-type="bibr" rid="ref61">61</xref>, <xref ref-type="bibr" rid="ref63">63</xref></sup></p>
</sec>
<sec id="sec005-2">
<title>Planning-Oriented Targets Beyond Categorical Stage</title>
<p>Planning outputs extend beyond the categorical stage to intermediate, decision-relevant targets such as anticipated technical difficulty, required exposure, and margin risk.<sup><xref ref-type="bibr" rid="ref41">41</xref>, <xref ref-type="bibr" rid="ref62">62</xref></sup> In rectal cancer surgery, AI models applied to preoperative MRI have been studied for predicting surgical difficulty, an endpoint that can influence the choice of minimally invasive vs open approach, need for specialized instrumentation, and staffing/experience requirements.<sup><xref ref-type="bibr" rid="ref41">41</xref></sup> In oral squamous cell carcinoma, pathomics-based models have been studied for predicting positive surgical margins, illustrating an emerging paradigm in which pre-treatment or perioperative tissue-derived features are translated into margin-focused planning inputs.<sup><xref ref-type="bibr" rid="ref62">62</xref></sup> These examples underscore a broader trend: planning systems often seek to forecast &#x201C;actionable constraints&#x201D; (eg, limited working space, high-risk dissection planes) rather than only long-term outcomes, but such targets require careful operationalization and validation because they can be sensitive to surgeon factors, institutional practice patterns, and differences in operative technique.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref41">41</xref></sup></p>
</sec>
<sec id="sec005-3">
<title>Segmentation, Quantitative Anatomy, and Patient-Specific Modeling</title>
<p>Most preoperative planning workflows depend on delineation of tumors, organs-at-risk, and critical structures, making anatomic segmentation an enabling task for 3D reconstruction, simulation, and downstream guidance.<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref54">54</xref>&#x2013;<xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref58">58</xref>, <xref ref-type="bibr" rid="ref64">64</xref>&#x2013;<xref ref-type="bibr" rid="ref67">67</xref></sup> In thoracic oncology, semantic segmentation of chest CT has been used to recognize patient-specific pulmonary vessel variants relevant to resection planning, illustrating how segmentation can be directed toward clinically consequential anatomic variability rather than organ boundary extraction alone.<sup><xref ref-type="bibr" rid="ref58">58</xref></sup> In rectal cancer, 3D reconstruction has been described as a means to improve diagnosis and surgical planning, reflecting sustained interest in translating pelvic MRI into spatially coherent representations that can support operative strategy and multidisciplinary discussion.<sup><xref ref-type="bibr" rid="ref55">55</xref></sup></p>
<p>Segmentation also supports simulation-oriented planning that seeks to anticipate intraoperative views and dissection trajectories.<sup><xref ref-type="bibr" rid="ref64">64</xref>&#x2013;<xref ref-type="bibr" rid="ref66">66</xref></sup> An AI-driven 3D simulation system for gastric cancer surgery has been reported as a retrospective validation study, consistent with an early translational pathway in which automated model construction and anatomy recognition are first evaluated for feasibility and concordance with expert interpretation before the impact on procedural decisions is tested prospectively.<sup><xref ref-type="bibr" rid="ref64">64</xref></sup> Similarly, AI-based technology to generate a 3D model for rectal cancer surgery planning from MRI has been reported as a step toward preoperative simulation, highlighting an approach in which imaging-derived models are used for rehearsal and spatial understanding rather than direct automation of operative steps.<sup><xref ref-type="bibr" rid="ref65">65</xref></sup> At a more granular anatomic scale, automated segmentation of male pelvic floor soft tissues have been proposed for preoperative simulation and morphologic assessment in lower rectal cancer surgery, reflecting the planning value of extracting patient-specific pelvic anatomy that may influence exposure and dissection strategy.<sup><xref ref-type="bibr" rid="ref66">66</xref></sup></p>
<p>Planning applications have also been described in hepatobiliary contexts using intelligent image segmentation approaches, aligning with long-standing clinical needs for patient-specific visualization of vascular and biliary anatomy and for volumetric assessment in resection planning.<sup><xref ref-type="bibr" rid="ref67">67</xref></sup> Although organ-specific reviews emphasize potential benefits of AI/ML for liver cancer surgery, the evidentiary standard for preoperative planning systems remains high because errors in anatomic delineation or spatial relationships can directly affect operative decisions.<sup><xref ref-type="bibr" rid="ref52">52</xref></sup></p>
</sec>
<sec id="sec005-4">
<title>Quantitative Biomarkers Derived from Segmentation</title>
<p>Quantitative biomarkers derived from segmentation can inform preoperative optimization and approach selection when measurement performance is adequate, and the clinical mapping from measurement to action is explicit.<sup><xref ref-type="bibr" rid="ref68">68</xref>&#x2013;<xref ref-type="bibr" rid="ref70">70</xref></sup> Automated body composition assessment from routine CT has been evaluated as a scalable method to measure muscle and adipose compartments, including validation-focused studies of deep learning derived measurements from standard-of-care CT examinations.<sup><xref ref-type="bibr" rid="ref68">68</xref>, <xref ref-type="bibr" rid="ref69">69</xref></sup> Related work has linked imaging-derived sarcopenic obesity with prognostic outcomes, supporting the premise that quantitative imaging may identify phenotypes relevant to prehabilitation, nutritional optimization, and candidacy assessments.<sup><xref ref-type="bibr" rid="ref70">70</xref></sup> These approaches may be particularly attractive because they leverage imaging already obtained for staging, but translational use requires transparent reporting of measurement error, robustness across scanners and protocols, and clear articulation of how biomarker thresholds would change management.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref68">68</xref>, <xref ref-type="bibr" rid="ref70">70</xref></sup></p>
</sec>
<sec id="sec005-5">
<title>Image Fusion, Registration, and Deformation-Aware Planning</title>
<p>Preoperative planning frequently requires integration across modalities (eg, multiphase CT and MRI), across timepoints, and across coordinate frames that differ from the intraoperative configuration; therefore, image registration and fusion are recurrent bottlenecks for constructing coherent 3D models that can be inspected by clinicians and, in some settings, transferred to guidance systems.<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref54">54</xref>, <xref ref-type="bibr" rid="ref56">56</xref></sup> Reviews of 3D reconstruction and organ modeling emphasize that static preoperative models can be undermined by soft-tissue deformation and physiologic motion (eg, respiration), motivating deformation-aware planning and, where feasible, updating or reconciling models with intraoperative sensing.<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref54">54</xref></sup> The urologic robotics literature similarly situates preoperative modeling within a broader imaging-robotics ecosystem, highlighting both the promise of patient-specific reconstructions and the technical challenges of aligning preoperative images with intraoperative anatomy.<sup><xref ref-type="bibr" rid="ref53">53</xref></sup></p>
</sec>
<sec id="sec005-6">
<title>Simulation, Rehearsal, and Operator-Facing Visualization</title>
<p>Because a preoperative plan must be interpretable and actionable for surgeons and operative teams, many planning systems emphasize operator-facing 3D visualization and rehearsal rather than prediction outputs alone.<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref54">54</xref>&#x2013;<xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref58">58</xref>, <xref ref-type="bibr" rid="ref64">64</xref>, <xref ref-type="bibr" rid="ref65">65</xref></sup> Work on 3D lung model development for minimally invasive lung cancer surgery has explicitly framed a progression from static reconstructions toward real-time or dynamic modeling, consistent with the clinical observation that thoracoscopic and robotic approaches increase dependence on accurate spatial representations when tactile feedback is limited.<sup><xref ref-type="bibr" rid="ref54">54</xref></sup> This emphasis on visualization is also apparent in gastrointestinal oncology planning efforts that use AI-enabled reconstruction to support preoperative simulation in gastric and rectal cancer operations.<sup><xref ref-type="bibr" rid="ref64">64</xref>, <xref ref-type="bibr" rid="ref65">65</xref></sup></p>
<p>Virtual reality and 3D-printed models have been systematically reviewed as planning tools in image-guided, robot-assisted nephron-sparing surgery, providing evidence synthesis on how patient-specific 3D representations may support surgeon understanding, rehearsal, and intra-team communication.<sup><xref ref-type="bibr" rid="ref57">57</xref></sup> Complementary discussions of robotic partial nephrectomy in the era of 3D virtual reconstructions similarly reflect continued interest in operationalizing preoperative 3D models as part of routine planning.<sup><xref ref-type="bibr" rid="ref71">71</xref></sup> While these technologies are conceptually aligned with surgical oncology goals, improving spatial understanding and reducing unanticipated anatomy, the translational question is whether such tools measurably improve decision quality or outcomes when implemented at scale, accounting for the time and expertise required for model generation and review.<sup><xref ref-type="bibr" rid="ref57">57</xref>, <xref ref-type="bibr" rid="ref71">71</xref></sup></p>
</sec>
<sec id="sec005-7">
<title>Selecting and Planning Adjunct Imaging and Guidance Technologies</title>
<p>Preoperative planning also includes selection and orchestration of adjunct intraoperative sensing modalities and tracers, with plans often specifying timing of administration, expected contrast behavior, and decision thresholds.<sup><xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref72">72</xref>, <xref ref-type="bibr" rid="ref73">73</xref></sup> Reviews of precision cancer surgery describe fluorescence-guided approaches that seek to improve tumor margin identification and lymphatic mapping, while emphasizing constraints imposed by probe pharmacokinetics, tissue optical properties, and imaging hardware.<sup><xref ref-type="bibr" rid="ref72">72</xref></sup> A randomized translational study of protein- and peptide-based probes illustrates that probe selection and deployment strategy are empirically tractable components of the planning process, but also highlights that clinical utility depends on matching probe behavior to a defined decision task (eg, margin assessment or lesion localization).<sup><xref ref-type="bibr" rid="ref73">73</xref></sup> Robotic- and image-guided workflows have likewise been described for fluorescence detection, reflecting ongoing efforts to integrate targeted imaging into operative systems.<sup><xref ref-type="bibr" rid="ref74">74</xref></sup></p>
<p>Lymphatic mapping provides a complementary example in which tracer workflow and imaging protocol determine intraoperative localization targets. Indocyanine green (ICG)-based lymph node mapping has been described in cancer surgery contexts, and preoperative sentinel node mapping work in gynecologic oncology demonstrates how imaging protocol design and tracer deployment can be codified into planning pathways that influence operative navigation targets.<sup><xref ref-type="bibr" rid="ref75">75</xref>, <xref ref-type="bibr" rid="ref76">76</xref></sup> Optical coherence tomography (OCT) adds a further dimension by providing microstructural imaging that can, in principle, support margin-focused decision-making; systematic and scoping reviews describe OCT in intraoperative tissue assessment and AI-enabled OCT for disease detection and diagnosis, respectively, emphasizing both potential and the need for clinically grounded validation.<sup><xref ref-type="bibr" rid="ref77">77</xref>&#x2013;<xref ref-type="bibr" rid="ref79">79</xref></sup></p>
</sec>
<sec id="sec005-8">
<title>Methodological and Evidentiary Requirements for Translation</title>
<p>For AI/ML-enabled preoperative planning, translational readiness depends on demonstrating that outputs are reliable under routine imaging heterogeneity, that errors are bounded and detectable, and that any claimed benefit is linked to decision-relevant endpoints rather than surrogate technical metrics alone.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref54">54</xref>, <xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref68">68</xref></sup> Validation studies in segmentation and quantitative imaging (eg, body composition measurement) illustrate the importance of reporting agreement with reference standards and clarifying conditions under which manual correction remains necessary.<sup><xref ref-type="bibr" rid="ref68">68</xref>, <xref ref-type="bibr" rid="ref69">69</xref></sup> Because planning models often function as upstream dependencies for downstream navigation, simulation, or robotic workflows, failure modes should be characterized not only at the level of segmentation overlap but also in terms of clinically meaningful spatial errors (eg, mislocalization relative to vessels, planes, or margins).<sup><xref ref-type="bibr" rid="ref2">2</xref>, <xref ref-type="bibr" rid="ref54">54</xref>, <xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref58">58</xref></sup></p>
<p>Surgeon-facing generative systems and automated staging tools introduce additional requirements beyond conventional imaging models, including provenance-aware synthesis, safeguards against hallucinated or non&#x2013;evidence-based assertions, and evaluation under realistic documentation variability.<sup><xref ref-type="bibr" rid="ref61">61</xref></sup> Current single-center retrospective planning studies, whether focused on 3D reconstruction, simulation, or prompt-driven assistance, should therefore be interpreted as feasibility signals; subsequent work must incorporate external validation, prospectively defined endpoints, and, where possible, decision-impact study designs that quantify how planning outputs change operative choices and whether those changes improve patient-centered outcomes.<sup><xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref54">54</xref>, <xref ref-type="bibr" rid="ref57">57</xref>, <xref ref-type="bibr" rid="ref61">61</xref>&#x2013;<xref ref-type="bibr" rid="ref65">65</xref></sup></p>
</sec>
</sec>
<sec id="sec006">
<title>Intraoperative Navigation with Robotic Assistance/Control</title>
<p>Intraoperative navigation with robotic assistance/control comprises computational systems that interpret operative data streams in real time and translate those interpretations into actionable guidance, visualization, or robot-mediated behaviors during surgery.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref></sup> In surgical oncology, this domain has expanded alongside the adoption of minimally invasive and robotic approaches across urologic, colorectal, foregut, and pancreatic procedures, which standardize endoscopic viewpoints and yield high-fidelity digital signals (eg, video and robotic platform telemetry).<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref></sup> Contemporary frameworks described in systematic and specialty-focused reviews generally position intraoperative AI/ML as progressing from perception and measurement to navigation and decision support, and only later (and more cautiously) toward shared autonomy or control-constraining behaviors.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref></sup></p>
<sec id="sec006-1">
<title>Data Modalities and the Primacy of Intraoperative Perception</title>
<p>Across robotic oncologic surgery, the dominant substrate for intraoperative ML is surgical video, reflecting the ubiquity and standardization of endoscopic and robotic camera feeds and the relative ease of rendering outputs as overlays within existing display pipelines.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref></sup> Proposed enabling architectures for AI-enhanced processing of da Vinci robot-assisted video emphasize that translational feasibility is conditioned by system-level constraints, deterministic latency, robust video ingestion, and reliable handling of large data streams, which can be as decisive as model accuracy for operating room deployment.<sup><xref ref-type="bibr" rid="ref82">82</xref></sup></p>
<p>Video-based perception has been used to recognize anatomic structures directly tied to oncologic safety and postoperative function. Deep learning approaches for vessel and tissue recognition during third-space endoscopy illustrate a generalizable pattern: models are trained to detect structures that may be visually subtle yet clinically consequential when injured, with outputs intended to support safer dissection in minimally invasive contexts.<sup><xref ref-type="bibr" rid="ref83">83</xref></sup> In urologic oncology, an AI-based intraoperative nerve recognition system has been described for nerve sparing during robotic prostatectomy, exemplifying a navigation-adjacent use case in which &#x201C;recognize-and-overlay&#x201D; can provide moment-to-moment spatial cues during technically demanding dissection.<sup><xref ref-type="bibr" rid="ref84">84</xref></sup> These approaches align with broader reviews of robotic oncologic surgery that highlight computer vision-based recognition of anatomy and &#x201C;danger zones&#x201D; as a near-term, clinically plausible tier of intraoperative AI support.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref></sup></p>
<p>Intraoperative perception has also been extended to targets that are explicitly oncologic rather than purely anatomic. In laparoscopic gastric cancer surgery, the AiLES was developed for real-time recognition of intra-abdominal metastasis during exploration, directly addressing a clinically consequential failure mode in which small or occult lesions are overlooked, and staging is thereby misclassified.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup> Reviews of intraoperative imaging tools in gastrointestinal oncology likewise emphasize that the clinical value of intraoperative perception increases when outputs are tied to specific intraoperative actions (eg, escalation to biopsy/frozen section, targeted inspection, or altered resection strategy) rather than solely to descriptive visualization.<sup><xref ref-type="bibr" rid="ref56">56</xref></sup></p>
<p>Functional state estimation provides a complementary intraoperative objective because tissue perfusion and viability influence anastomotic strategy and can interact with oncologic extent through resection planning.<sup><xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref85">85</xref>, <xref ref-type="bibr" rid="ref86">86</xref></sup> AI-based real-time microcirculation imaging has been evaluated for assessing colonic perfusion status, representing a class of intraoperative systems that infer physiologic states from subtle imaging cues that are difficult to quantify reliably by human vision alone.<sup><xref ref-type="bibr" rid="ref85">85</xref></sup> In addition, intraoperative near-infrared (NIR) functional imaging has been studied in colorectal cancer settings, reflecting efforts to quantify perfusion-related and function-related signals that could be integrated into navigation workflows and assessed against endpoints such as leak risk, complication rates, or re-intervention.<sup><xref ref-type="bibr" rid="ref86">86</xref></sup></p>
</sec>
<sec id="sec006-2">
<title>Geometry-Aware Navigation: Localization, Registration, and Coordinate Consistency</title>
<p>Navigation systems must establish spatial relationships between recognized structures, instruments, and (when applicable) preoperative models, often under conditions of soft-tissue deformation, camera motion, and variable insufflation.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref></sup> Real-time vascular anatomical image navigation has been demonstrated for laparoscopic surgery as an approach to aligning vascular anatomy with intraoperative views to support orientation and safer dissection around critical vessels.<sup><xref ref-type="bibr" rid="ref8">8</xref></sup> In rectosigmoid oncology, the feasibility of optical stereotactic navigation supported by deep learning based 3D modeling has been reported, highlighting the clinical interest in &#x201C;coordinate-consistent&#x201D; guidance for tumor localization and margin-conscious dissection within anatomically constrained pelvic spaces.<sup><xref ref-type="bibr" rid="ref9">9</xref></sup></p>
<p>Evidence from thoracic oncology similarly indicates active development of geometry-aware navigation strategies. A prospective application study of non-contact, AI-assisted intraoperative 3D navigation in lung cancer surgery demonstrates a pragmatic approach in which spatial information is acquired and reconstructed intraoperatively to mitigate the mismatch between preoperative imaging and intraoperative anatomy.<sup><xref ref-type="bibr" rid="ref12">12</xref></sup> Across these demonstrations, the translational challenge is not merely producing a geometric solution but maintaining accuracy under clinically routine perturbations (eg, deformation, retractors, motion, variable insufflation), and predefining failure detection and escalation workflows when navigation assumptions no longer hold.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref9">9</xref>, <xref ref-type="bibr" rid="ref12">12</xref>, <xref ref-type="bibr" rid="ref53">53</xref></sup></p>
<p>Visualization is a central component of intraoperative navigation because guidance must be interpretable at the point of action. Reviews of extended reality and &#x201C;metaverse&#x201D; concepts in surgery discuss how augmented visualization environments could provide spatial context and training ecosystems, while emphasizing limited clinical evidence for outcome benefit and the nontrivial challenges of integrating head-mounted or mixed reality interfaces into sterile workflows.<sup><xref ref-type="bibr" rid="ref87">87</xref></sup> In robotic radical prostatectomy, specialty-focused reviews similarly frame advanced visualization (including potential augmented guidance) as an emerging direction that will require rigorous clinical validation and human factors evaluation before routine adoption.<sup><xref ref-type="bibr" rid="ref81">81</xref></sup></p>
</sec>
<sec id="sec006-3">
<title>Imaging-Augmented Guidance in Robotic Oncology</title>
<p>Optical contrast agents and intraoperative imaging can function as &#x201C;signal amplifiers&#x201D; for tumor, lymphatics, or function, and ML methods are increasingly used to standardize interpretation and generate overlays that can be integrated into robotic visualization pipelines.<sup><xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref56">56</xref></sup> Robotic assistance can stabilize imaging geometry and enable instrument-mounted sensing, as illustrated by robotic &#x201C;click-on&#x201D; fluorescence detection using surgical instruments to characterize molecular tissue aspects, a design pattern that can support localized assessment within an otherwise video-dominated workflow.<sup><xref ref-type="bibr" rid="ref74">74</xref></sup></p>
<p>Deep learning enabled fluorescence imaging has been developed for surgical guidance tasks aligned with oncologic decision-making. For oral cancer, in silico trained deep learning methods have been described for fluorescence-based depth quantification and for fluorescence-guided margin classification in preclinical models, representing an approach that seeks to convert fluorescence signal into explicitly interpretable, decision-relevant outputs rather than qualitative visualization alone.<sup><xref ref-type="bibr" rid="ref88">88</xref>, <xref ref-type="bibr" rid="ref89">89</xref></sup> More broadly, reviews of precision cancer surgery emphasize that the translational value of imaging-augmented guidance depends on linking the imaging signal (and any ML-derived interpretation) to prespecified intraoperative decisions (eg, additional resection, targeted sampling, or altered dissection plane) and demonstrating benefit against clinically meaningful endpoints.<sup><xref ref-type="bibr" rid="ref56">56</xref></sup></p>
<p>Near-infrared fluorescence and functional imaging also support navigation decisions beyond margin assessment, including lymphatic mapping and perfusion-oriented guidance.<sup><xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref86">86</xref>, <xref ref-type="bibr" rid="ref90">90</xref></sup> In upper gastrointestinal oncology, indocyanine green (ICG) lymph node mapping has been described for cancer surgery, and prospective mapping of lymphatic drainage patterns with NIR fluorescence has been studied during robotic-assisted minimally invasive Ivor Lewis esophagectomy.<sup><xref ref-type="bibr" rid="ref75">75</xref>, <xref ref-type="bibr" rid="ref91">91</xref></sup> These studies illustrate that intraoperative guidance is often probabilistic and pathway-dependent (eg, drainage patterns and node basins) and therefore benefits from clear protocols describing how intraoperative findings are intended to modify the operative plan.<sup><xref ref-type="bibr" rid="ref56">56</xref></sup></p>
</sec>
<sec id="sec006-4">
<title>Label-Free Tissue Assessment and Uncertainty-Aware Inference</title>
<p>Label-free tissue characterization provides an alternative approach to intraoperative guidance by aiming to classify tissue state directly from biophysical signatures, potentially enabling margin assessment without exogenous tracers.<sup><xref ref-type="bibr" rid="ref11">11</xref>, <xref ref-type="bibr" rid="ref92">92</xref>, <xref ref-type="bibr" rid="ref93">93</xref></sup> Perioperative tissue assessment using combined mass spectrometry and histopathology imaging has been described as a framework for integrating molecular and morphologic information to inform intraoperative decisions in cancer surgery.<sup><xref ref-type="bibr" rid="ref93">93</xref></sup> Within this domain, uncertainty estimation for surgical margin detection using mass spectrometry has been reported as a strategy to calibrate confidence and support decision thresholds suited to high-stakes intraoperative use.<sup><xref ref-type="bibr" rid="ref11">11</xref></sup></p>
<p>Representation-learning strategies have also been applied to reduce dependence on exhaustive labeling while preserving clinically relevant performance, exemplified by image-driven self-supervised learning for mass spectrometry-based tissue assessment.<sup><xref ref-type="bibr" rid="ref92">92</xref></sup> Although these methodological advances may improve scalability, systematic syntheses in robotic cancer surgery converge on the need for external validation and explicit specification of action policies when model outputs are uncertain, particularly when outputs are used to trigger irreversible actions such as widening margins, converting operative plans, or escalating to additional resection.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref></sup> Accordingly, translational protocols should predefine how uncertainty will be communicated to the surgical team, how clinicians should respond to uncertain outputs (eg, confirmatory pathology), and how performance will be monitored over time once deployed.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref11">11</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref92">92</xref></sup></p>
</sec>
<sec id="sec006-5">
<title>Microscopic Intraoperative Imaging and AI-Enabled Interpretation</title>
<p>Microscopic intraoperative imaging modalities such as OCT have been evaluated as tools for margin-focused guidance and tissue characterization when macroscopic imaging is insufficient to resolve microstructural features at the resection interface.<sup><xref ref-type="bibr" rid="ref77">77</xref>, <xref ref-type="bibr" rid="ref79">79</xref></sup> A systematic review of OCT in oncologic surgery summarizes evidence for intraoperative tissue assessment and highlights translational barriers, including limited standardization, variable acquisition conditions, and uncertainty about how best to integrate OCT-derived information into operative workflows.<sup><xref ref-type="bibr" rid="ref79">79</xref></sup> A scoping review focused on AI in advancing OCT further reinforces that algorithmic interpretation is a key enabler for converting high-dimensional OCT data into actionable intraoperative outputs, while also underscoring the need for robust validation and clinically grounded study designs.<sup><xref ref-type="bibr" rid="ref77">77</xref></sup></p>
<p>Hybrid systems that fuse OCT with ML inference have been reported for breast cancer surgical margin visualization, illustrating a design pattern in which high-resolution imaging is paired with automated classification to generate decision-supportive outputs intraoperatively.<sup><xref ref-type="bibr" rid="ref78">78</xref></sup> Translation of such systems into routine oncologic practice requires demonstration of robustness to blood, motion, and specular artifacts; prespecified uncertainty-aware operating points; and empirical evidence that OCT-based guidance improves margin-related outcomes without disproportionate resection or operative burden.<sup><xref ref-type="bibr" rid="ref77">77</xref>&#x2013;<xref ref-type="bibr" rid="ref79">79</xref></sup></p>
</sec>
<sec id="sec006-6">
<title>Robotic Assistance, Safety Monitoring, and the Boundary Between Guidance and Control</title>
<p>Robotic platforms create opportunities for continuous performance monitoring and algorithmic safety layers that detect and mitigate hazards, but the evidentiary threshold increases substantially when systems move from passive guidance to action-constraining control.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref></sup> AI-based hazard detection in robotic-assisted single-incision oncologic surgery exemplifies a safety-oriented direction by targeting the identification of hazardous states in a modality characterized by constrained instrument motion, limited triangulation, and heightened collision risk.<sup><xref ref-type="bibr" rid="ref10">10</xref></sup></p>
<p>Robotic data streams can also be used to evaluate and standardize operative performance, with potential downstream implications for oncologic outcomes and complication risk. A study associating skill and errors with outcomes in robotic rectal cancer surgery illustrates how intraoperative performance metrics can be linked to clinical endpoints, supporting the concept that intraoperative analytics may provide actionable feedback for quality improvement.<sup><xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref94">94</xref></sup> At the same time, outcome-linked analytics emphasize the need to avoid overinterpreting surrogate performance measures unless they are validated against patient-centered endpoints and contextualized by case complexity, anatomy, and oncologic extent.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref>, <xref ref-type="bibr" rid="ref94">94</xref>, <xref ref-type="bibr" rid="ref95">95</xref></sup></p>
<p>Engineering frameworks for AI-enhanced processing of da Vinci robot-assisted video reinforce that translation requires not only accurate models but also reliable system-level integration, including secure data handling, deterministic latency, and compatibility with the robotic platform&#x2019;s data interfaces.<sup><xref ref-type="bibr" rid="ref82">82</xref></sup> Systematic synthesis of AI integration into robotic cancer surgery indicates that most published systems remain at the level of feasibility or retrospective validation, with limited evidence from prospective studies and limited clarity regarding how ML outputs are incorporated into standardized intraoperative actions.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref></sup> This evidence profile supports a conservative near-term posture: prioritize decision support, safety monitoring, and uncertainty-aware guidance before pursuing control-constraining behaviors that would require substantially higher safety assurance.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref10">10</xref>, <xref ref-type="bibr" rid="ref53">53</xref></sup></p>
</sec>
<sec id="sec006-7">
<title>Translational Requirements: Real-Time Operation, Robustness, and Clinically Testable Utility</title>
<p>Across intraoperative navigation and robotic assistance/control, translational readiness depends on demonstrating reliable real-time operation under routine variability, explicit handling of uncertainty, and measurable benefit in decision-relevant endpoints.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref56">56</xref></sup> Feasibility studies in vascular and stereotactic navigation demonstrate that geometry-aware guidance can be operationalized in oncologic procedures, while prospective application of AI-assisted 3D navigation in lung surgery shows that intraoperative navigation can be evaluated in forward-looking designs rather than solely retrospective settings.<sup><xref ref-type="bibr" rid="ref8">8</xref>, <xref ref-type="bibr" rid="ref9">9</xref>, <xref ref-type="bibr" rid="ref12">12</xref></sup> Prospective demonstration of real-time metastasis recognition during exploration further illustrates how intraoperative perception can be tied to immediate oncologic decisions, providing a model for future studies that specify workflow integration and decision thresholds a priori.<sup><xref ref-type="bibr" rid="ref24">24</xref></sup></p>
<p>For imaging-enhanced guidance (eg, fluorescence and NIR functional imaging), translation requires separating device- and protocol-specific variability from biologically meaningful signal and validating ML-derived overlays across acquisition settings, patient factors, and institutions.<sup><xref ref-type="bibr" rid="ref56">56</xref>, <xref ref-type="bibr" rid="ref86">86</xref>, <xref ref-type="bibr" rid="ref88">88</xref>, <xref ref-type="bibr" rid="ref89">89</xref></sup> For label-free modalities (eg, mass spectrometry and OCT), uncertainty-aware inference and robustness to intraoperative artifacts are central because both false reassurance and false alarms can produce clinically meaningful harm (eg, inadequate margins vs unnecessary tissue loss).<sup><xref ref-type="bibr" rid="ref11">11</xref>, <xref ref-type="bibr" rid="ref77">77</xref>&#x2013;<xref ref-type="bibr" rid="ref79">79</xref></sup> These requirements are emphasized across systematic and specialty-focused reviews, which report that the clinical evidence for many AI-enabled intraoperative systems remains dominated by retrospective analyses and early feasibility work, with relatively few prospective evaluations tied to patient-centered outcomes.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref80">80</xref>, <xref ref-type="bibr" rid="ref81">81</xref>, <xref ref-type="bibr" rid="ref95">95</xref></sup> Prospective application studies in navigation (eg, non-contact 3D guidance) and real-time disease detection (eg, AiLES) provide useful templates for study designs that connect real-time inference to standardized intraoperative actions and measurable outcomes.<sup><xref ref-type="bibr" rid="ref12">12</xref>, <xref ref-type="bibr" rid="ref24">24</xref></sup> Finally, because robotic surgery continues to expand across complex oncologic procedures (including pancreatic surgery), the bar for safe, clinically effective integration of intraoperative AI should remain anchored in workflow-aware validation, transparent uncertainty handling, and rigorous prospective evaluation rather than technical performance alone.<sup><xref ref-type="bibr" rid="ref3">3</xref>, <xref ref-type="bibr" rid="ref10">10</xref>, <xref ref-type="bibr" rid="ref53">53</xref></sup></p>
<p>In addition, outside the perioperative realm, there have been equal advancements in AI for psychiatry and mental health applications, including risk assessment (suicide attempts, relapse, and acute decompensation), symptom profiling, and treatment recommendation with multimodal data sources like longitudinal electronic health records, wearable and smartphone-based digital phenotyping, speech/language processing, and neuroimaging.<sup><xref ref-type="bibr" rid="ref96">96</xref>&#x2013;<xref ref-type="bibr" rid="ref98">98</xref></sup> In precision oncology, the combination of radiology, digital pathology, and genomic profiling enabled by AI has facilitated tumor profiling, prediction of treatment response (including immunotherapy), and computational triage for molecular analyses, thereby propelling the transition of high-dimensional biomarkers into decision-making.<sup><xref ref-type="bibr" rid="ref99">99</xref>&#x2013;<xref ref-type="bibr" rid="ref101">101</xref></sup> These parallel applications collectively point to a shift towards data-driven, personalized decision support systems, while also pointing out common issues in fairness, generalization, explainability, and validation that are just as applicable in surgical oncology.</p>
</sec>
</sec>
<sec id="sec007">
<title>Conclusion</title>
<p>Across surgical oncology, AI/ML research increasingly spans the full perioperative continuum, with maturation that varies substantially by domain and by proximity to real-world clinical integration.<sup><xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref95">95</xref></sup> In clinical translation and decision support, recent work demonstrates that deployment and monitoring of prediction systems in routine colorectal cancer surgical workflows are feasible, but such implementation-level evidence remains uncommon to the volume of retrospective development studies.<sup><xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref95">95</xref></sup> Consistent with broader concerns in clinical prediction and medical AI, the primary constraint on translation is not proof-of-concept discrimination but the combination of generalizability, calibration, and clinically meaningful utility under routine conditions.<sup><xref ref-type="bibr" rid="ref42">42</xref>&#x2013;<xref ref-type="bibr" rid="ref44">44</xref>, <xref ref-type="bibr" rid="ref48">48</xref>&#x2013;<xref ref-type="bibr" rid="ref50">50</xref></sup></p>
<p>Preoperative planning remains dominated by imaging-based methods, particularly segmentation and localization, where performance may appear strong in curated datasets yet degrade under cross-site variation in acquisition protocols and label practices. The translational requirement is therefore not solely architectural sophistication but demonstrable robustness to dataset shift, transparent reporting of data provenance, and external validation that reflects target deployment heterogeneity. <sup><xref ref-type="bibr" rid="ref42">42</xref>&#x2013;<xref ref-type="bibr" rid="ref44">44</xref>, <xref ref-type="bibr" rid="ref48">48</xref>&#x2013;<xref ref-type="bibr" rid="ref50">50</xref></sup> In parallel, perioperative risk prediction models for thoracic and esophageal oncologic surgery increasingly integrate imaging with clinical variables, and emerging reports emphasize explainability; however, interpretability claims must be matched to calibrated risk estimates and decision-relevant evaluations to support counseling, triage, and perioperative planning.<sup><xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref34">34</xref>, <xref ref-type="bibr" rid="ref43">43</xref>, <xref ref-type="bibr" rid="ref44">44</xref></sup></p>
<p>Intraoperative navigation and robotic assistance/control are advancing in tandem with surgical robotics and training-focused AI, yet the evidentiary base remains weighted toward feasibility demonstrations and educational applications rather than prospective, outcome-linked clinical utility.<sup><xref ref-type="bibr" rid="ref53">53</xref>, <xref ref-type="bibr" rid="ref81">81</xref></sup> The methodological gap is well captured by contemporary guidance: early-stage evaluations should explicitly report human&#x2013;AI interaction, workflow integration, safety mitigations, and error&#x2013;case behavior (DECIDE-AI), while later-stage interventional studies should adhere to AI-specific trial and protocol standards (CONSORT-AI, SPIRIT-AI).<sup><xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref38">38</xref>, <xref ref-type="bibr" rid="ref51">51</xref></sup></p>
<p>Future progress in surgical oncology AI will depend on aligning model development with deployment realities: multicenter and temporally robust evaluation, explicit calibration assessment and updating strategies, decision-analytic utility assessment at clinically relevant thresholds, and systematic appraisal of bias and subgroup performance when outcomes and labels reflect structural differences in access and care pathways. These requirements are increasingly codified in updated reporting and risk-of-bias instruments (TRIPOD+AI, PROBAST+AI) and in good ML practice guidance emphasizing lifecycle governance, monitoring, and safety assurance for medical-device ML systems.<sup><xref ref-type="bibr" rid="ref49">49</xref>, <xref ref-type="bibr" rid="ref50">50</xref></sup> When these evidentiary expectations are met, AI/ML systems in surgical oncology are more likely to transition from high in silico performance to reproducible, safe, and clinically useful tools that improve perioperative decision-making rather than merely predicting outcomes.</p>
</sec>
<sec id="sec008">
<title>List of Abbreviations</title>
<def-list>
<def-item><term>AI</term> <def><p>Artificial intelligence</p></def></def-item>
<def-item><term>AiLES</term> <def><p>Artificial intelligence Laparoscopic Exploration System</p></def></def-item>
<def-item><term>CDS</term> <def><p>Clinical Decision Support</p></def></def-item>
<def-item><term>CT</term> <def><p>Computed Tomography</p></def></def-item>
<def-item><term>DCA</term> <def><p>Decision Curve Analysis</p></def></def-item>
<def-item><term>FDA</term> <def><p>Food and Drug Administration</p></def></def-item>
<def-item><term>HER</term> <def><p>Electronic Health Record</p></def></def-item>
<def-item><term>LLM</term> <def><p>Large Language Model</p></def></def-item>
<def-item><term>MDR</term> <def><p>Medical Device Regulation</p></def></def-item>
<def-item><term>ML</term> <def><p>Machine Learning</p></def></def-item>
<def-item><term>MRI</term> <def><p>Magnetic Resonance Imaging</p></def></def-item>
<def-item><term>NIR</term> <def><p>Near-Infrared</p></def></def-item>
<def-item><term>NLP</term> <def><p>Natural language processing</p></def></def-item>
<def-item><term>OCT</term> <def><p>Optical coherence tomography</p></def></def-item>
<def-item><term>PCCP</term> <def><p>Predetermined Change Control Plan</p></def></def-item>
<def-item><term>WSI</term> <def><p>Whole-Slide Imaging</p></def></def-item>
</def-list>
</sec>
</body>
<back>
<fn-group>
<fn id="n1" fn-type="other">
<p>Additional material is published online only. To view please visit the journal online.</p>
<p><bold>Cite this as:</bold> Matthew Abikenari. Artificial Intelligence Across the Surgical Oncology Continuum: Decision Support, Operative Intelligence, and a Translation-First Roadmap REVIEW:. Premier Journal of Science 2026;3:100269</p>
<p><bold>DOI:</bold> <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.70389/PJS.100269">https://doi.org/10.70389/PJS.100269</ext-link></p>
</fn>
<fn id="n2" fn-type="other">
<p><bold>Ethical approval</bold></p>
<p>N/a</p>
</fn>
<fn id="n3" fn-type="other">
<p><bold>Consent</bold></p>
<p>N/a</p>
</fn>
<fn id="n4" fn-type="other">
<p>Conflict of Interest Disclosure:</p>
<p>No pertinent conflicts of interest relevant to this manuscript.</p>
</fn>
<fn id="n5" fn-type="other">
<p><bold>Author contribution:</bold></p>
<p>Matthew Abikenari &#x2013; Writing &#x2013; review &amp; editing, Writing &#x2013; original draft, Visualization, Validation, Supervision, Data curation, Conceptualization</p>
</fn>
<fn id="n6" fn-type="other">
<p><bold>Guarantor:</bold></p>
<p>Matthew Abikenari</p>
</fn>
<fn id="n7" fn-type="other">
<p><bold>Provenance peer review:</bold></p>
<p>unsolicited</p>
</fn>
<fn id="n8" fn-type="other">
<p><bold>Data availability statement:</bold></p>
<p>N/a</p>
</fn>
<fn id="n9" fn-type="other">
<p><bold>Financial Disclosure:</bold></p>
<p>None.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1">
<label>1</label>
<mixed-citation publication-type="journal">
<string-name><surname>Williams</surname> <given-names>TL</given-names></string-name>, <string-name><surname>Saadat</surname> <given-names>LV</given-names></string-name>, <string-name><surname>Gonen</surname> <given-names>M</given-names></string-name>, <string-name><surname>Wei</surname> <given-names>A</given-names></string-name>, <string-name><surname>Do</surname> <given-names>RK</given-names></string-name>, <string-name><surname>Simpson</surname> <given-names>AL</given-names></string-name>. <article-title>Radiomics in surgical oncology: applications and challenges</article-title>. <source>Comput Assist Surg (Abingdon)</source>. <year>2021</year>;<volume>26</volume>(<issue>1</issue>):<fpage>85</fpage>&#x2013;<lpage>96</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1080/24699322.2021.1994014">https://doi.org/10.1080/24699322.2021.1994014</ext-link>
</mixed-citation>
</ref>
<ref id="ref2">
<label>2</label>
<mixed-citation publication-type="journal">
<string-name><surname>Lin</surname> <given-names>G</given-names></string-name>, <string-name><surname>Li</surname> <given-names>R</given-names></string-name>, <string-name><surname>Li</surname> <given-names>X</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>D</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>X</given-names></string-name>. <article-title>Advances in the application of three-dimensional reconstruction in thoracic surgery: a comprehensive review</article-title>. <source>Thorac Cancer</source>. <year>2025</year>;<volume>16</volume>(<issue>17</issue>):<elocation-id>e70159</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1111/1759-7714.70159">https://doi.org/10.1111/1759-7714.70159</ext-link>
</mixed-citation>
</ref>
<ref id="ref3">
<label>3</label>
<mixed-citation publication-type="journal">
<string-name><surname>Leszczy&#x0144;ska</surname> <given-names>A</given-names></string-name>, <string-name><surname>Obuchowicz</surname> <given-names>R</given-names></string-name>, <string-name><surname>Strzelecki</surname> <given-names>M</given-names></string-name>, <string-name><surname>Seweryn</surname> <given-names>M</given-names></string-name>. <article-title>The integration of artificial intelligence into robotic cancer surgery: a systematic review</article-title>. <source>J Clin Med</source>. <year>2025</year>;<volume>14</volume>(<issue>17</issue>):<fpage>6181</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/jcm14176181">https://doi.org/10.3390/jcm14176181</ext-link>
</mixed-citation>
</ref>
<ref id="ref4">
<label>4</label>
<mixed-citation publication-type="journal">
<string-name><surname>Buck</surname> <given-names>L</given-names></string-name>, <string-name><surname>Kohler</surname> <given-names>J</given-names></string-name>, <string-name><surname>Risch</surname> <given-names>J</given-names></string-name>, <string-name><surname>Incesu</surname> <given-names>RB</given-names></string-name>, <string-name><surname>H&#x00FC;gelmann</surname> <given-names>K</given-names></string-name>, <string-name><surname>Weiss</surname> <given-names>ML</given-names></string-name>, <etal>et al</etal>. <article-title>The emerging role of multimodal artificial intelligence in urological surgery</article-title>. <source>Curr Oncol</source>. <year>2025</year>;<volume>32</volume>(<issue>12</issue>):<fpage>665</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/curroncol32120665">https://doi.org/10.3390/curroncol32120665</ext-link>
</mixed-citation>
</ref>
<ref id="ref5">
<label>5</label>
<mixed-citation publication-type="journal">
<string-name><surname>Rosen</surname> <given-names>AW</given-names></string-name>, <string-name><surname>Ose</surname> <given-names>I</given-names></string-name>, <string-name><surname>G&#x00F6;genur</surname> <given-names>M</given-names></string-name>, <string-name><surname>Andersen</surname> <given-names>LP</given-names></string-name>, <string-name><surname>Bojesen</surname> <given-names>RD</given-names></string-name>, <string-name><surname>Vogelsang</surname> <given-names>RP</given-names></string-name>, <etal>et al</etal>.; <collab>AID-SURG study group</collab>. <article-title>Clinical implementation of an AI-based prediction model for decision support for patients undergoing colorectal cancer surgery</article-title>. <source>Nat Med</source>. <year>2025</year>;<volume>31</volume>(<issue>11</issue>):<fpage>3737</fpage>&#x2013;<lpage>48</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41591-025-03942-x">https://doi.org/10.1038/s41591-025-03942-x</ext-link>
</mixed-citation>
</ref>
<ref id="ref6">
<label>6</label>
<mixed-citation publication-type="journal">
<string-name><surname>Dal Cero</surname> <given-names>M</given-names></string-name>, <string-name><surname>Gibert</surname> <given-names>J</given-names></string-name>, <string-name><surname>Grande</surname> <given-names>L</given-names></string-name>, <string-name><surname>Gimeno</surname> <given-names>M</given-names></string-name>, <string-name><surname>Osorio</surname> <given-names>J</given-names></string-name>, <string-name><surname>Bencivenga</surname> <given-names>M</given-names></string-name>, <etal>et al</etal>.; <collab>On behalf of the Spanish Eurecca Esophagogastric Cancer Group and the European Gastrodata Study Group</collab>. <article-title>International external validation of risk prediction model of 90-day mortality after gastrectomy for cancer using machine learning</article-title>. <source>Cancers (Basel)</source>. <year>2024</year>;<volume>16</volume>(<issue>13</issue>):<fpage>2463</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers16132463">https://doi.org/10.3390/cancers16132463</ext-link>
</mixed-citation>
</ref>
<ref id="ref7">
<label>7</label>
<mixed-citation publication-type="journal">
<string-name><surname>Vakili-Ojarood</surname> <given-names>M</given-names></string-name>, <string-name><surname>Naseri</surname> <given-names>A</given-names></string-name>, <string-name><surname>Shirinzadeh-Dastgiri</surname> <given-names>A</given-names></string-name>, <string-name><surname>Saberi</surname> <given-names>A</given-names></string-name>, <string-name><surname>HaghighiKian</surname> <given-names>SM</given-names></string-name>, <string-name><surname>Rahmani</surname> <given-names>A</given-names></string-name>, <etal>et al</etal>. <article-title>Ethical considerations and equipoise in cancer surgery</article-title>. <source>Indian J Surg Oncol</source>. <year>2024</year>;<volume>15</volume>(<issue>S3 Suppl 3</issue>):<fpage>363</fpage>&#x2013;<lpage>73</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s13193-024-02023-8">https://doi.org/10.1007/s13193-024-02023-8</ext-link>
</mixed-citation>
</ref>
<ref id="ref8">
<label>8</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kitaguchi</surname> <given-names>D</given-names></string-name>, <string-name><surname>Takeshita</surname> <given-names>N</given-names></string-name>, <string-name><surname>Matsuzaki</surname> <given-names>H</given-names></string-name>, <string-name><surname>Igaki</surname> <given-names>T</given-names></string-name>, <string-name><surname>Hasegawa</surname> <given-names>H</given-names></string-name>, <string-name><surname>Kojima</surname> <given-names>S</given-names></string-name>, <etal>et al</etal>. <article-title>Real-time vascular anatomical image navigation for laparoscopic surgery: experimental study</article-title>. <source>Surg Endosc</source>. <year>2022</year>;<volume>36</volume>(<issue>8</issue>):<fpage>6105</fpage>&#x2013;<lpage>12</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00464-022-09384-7">https://doi.org/10.1007/s00464-022-09384-7</ext-link>
</mixed-citation>
</ref>
<ref id="ref9">
<label>9</label>
<mixed-citation publication-type="journal">
<string-name><surname>Ten Brink</surname> <given-names>R</given-names></string-name>, <string-name><surname>Schram</surname> <given-names>R</given-names></string-name>, <string-name><surname>Kats-Ugurlu</surname> <given-names>G</given-names></string-name>, <string-name><surname>Kwee</surname> <given-names>T</given-names></string-name>, <string-name><surname>Hemmer</surname> <given-names>P</given-names></string-name>, <string-name><surname>Havenga</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Feasibility of optical stereotactic navigation for rectosigmoid cancer with deep learning-supported 3D modelling</article-title>. <source>Eur J Surg Oncol</source>. <year>2025</year>;<volume>51</volume>(<issue>11</issue>):<fpage>110397</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ejso.2025.110397">https://doi.org/10.1016/j.ejso.2025.110397</ext-link>
</mixed-citation>
</ref>
<ref id="ref10">
<label>10</label>
<mixed-citation publication-type="journal">
<string-name><surname>Rus</surname> <given-names>G</given-names></string-name>, <string-name><surname>Andras</surname> <given-names>I</given-names></string-name>, <string-name><surname>Vaida</surname> <given-names>C</given-names></string-name>, <string-name><surname>Crisan</surname> <given-names>N</given-names></string-name>, <string-name><surname>Gherman</surname> <given-names>B</given-names></string-name>, <string-name><surname>Radu</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence-based hazard detection in robotic-assisted single-incision oncologic surgery</article-title>. <source>Cancers (Basel)</source>. <year>2023</year>;<volume>15</volume>(<issue>13</issue>):<fpage>3387</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers15133387">https://doi.org/10.3390/cancers15133387</ext-link>
</mixed-citation>
</ref>
<ref id="ref11">
<label>11</label>
<mixed-citation publication-type="journal">
<string-name><surname>Fooladgar</surname> <given-names>F</given-names></string-name>, <string-name><surname>Jamzad</surname> <given-names>A</given-names></string-name>, <string-name><surname>Connolly</surname> <given-names>L</given-names></string-name>, <string-name><surname>Santilli</surname> <given-names>A</given-names></string-name>, <string-name><surname>Kaufmann</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Uncertainty estimation for margin detection in cancer surgery using mass spectrometry</article-title>. <source>Int J Comput Assist Radiol Surg</source>. <year>2022</year>;<volume>17</volume>(<issue>12</issue>):<fpage>2305</fpage>&#x2013;<lpage>13</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11548-022-02764-3">https://doi.org/10.1007/s11548-022-02764-3</ext-link>
</mixed-citation>
</ref>
<ref id="ref12">
<label>12</label>
<mixed-citation publication-type="journal">
<string-name><surname>Li</surname> <given-names>C</given-names></string-name>, <string-name><surname>Song</surname> <given-names>J</given-names></string-name>, <string-name><surname>Yan</surname> <given-names>R</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>B</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>B</given-names></string-name>, <string-name><surname>Fan</surname> <given-names>J</given-names></string-name>, <etal>et al</etal>. <article-title>Non-contact artificial intelligence-assisted intraoperative 3D navigation technology prospective application study in lung cancer surgery</article-title>. <source>J Thorac Dis</source>. <year>2025</year>;<volume>17</volume>(<issue>11</issue>):<fpage>9610</fpage>&#x2013;<lpage>21</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.21037/jtd-2025-1136">https://doi.org/10.21037/jtd-2025-1136</ext-link>
</mixed-citation>
</ref>
<ref id="ref13">
<label>13</label>
<mixed-citation publication-type="journal">
<string-name><surname>Macheka</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ng</surname> <given-names>PY</given-names></string-name>, <string-name><surname>Ginsburg</surname> <given-names>O</given-names></string-name>, <string-name><surname>Hope</surname> <given-names>A</given-names></string-name>, <string-name><surname>Sullivan</surname> <given-names>R</given-names></string-name>, <string-name><surname>Aggarwal</surname> <given-names>A</given-names></string-name>. <article-title>Prospective evaluation of artificial intelligence (AI) applications for use in cancer pathways following diagnosis: a systematic review</article-title>. <source>BMJ Oncol</source>. <year>2024</year>;<volume>3</volume>(<issue>1</issue>):<elocation-id>e000255</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmjonc-2023-000255">https://doi.org/10.1136/bmjonc-2023-000255</ext-link>
</mixed-citation>
</ref>
<ref id="ref14">
<label>14</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kawamoto</surname> <given-names>K</given-names></string-name>, <string-name><surname>Houlihan</surname> <given-names>CA</given-names></string-name>, <string-name><surname>Balas</surname> <given-names>EA</given-names></string-name>, <string-name><surname>Lobach</surname> <given-names>DF</given-names></string-name>. <article-title>Improving clinical practice using clinical decision support systems: a systematic review of trials to identify features critical to success</article-title>. <source>BMJ</source>. <year>2005</year>;<volume>330</volume>(<issue>7494</issue>):<fpage>765</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj.38398.500764.8F">https://doi.org/10.1136/bmj.38398.500764.8F</ext-link>
</mixed-citation>
</ref>
<ref id="ref15">
<label>15</label>
<mixed-citation publication-type="journal">
<string-name><surname>Vasey</surname> <given-names>B</given-names></string-name>, <string-name><surname>Nagendran</surname> <given-names>M</given-names></string-name>, <string-name><surname>Campbell</surname> <given-names>B</given-names></string-name>, <string-name><surname>Clifton</surname> <given-names>DA</given-names></string-name>, <string-name><surname>Collins</surname> <given-names>GS</given-names></string-name>, <string-name><surname>Denaxas</surname> <given-names>S</given-names></string-name>, <etal>et al</etal>.; <collab>DECIDE-AI expert group</collab>. <article-title>Reporting guideline for the early-stage clinical evaluation of decision support systems driven by artificial intelligence: DECIDE-AI</article-title>. <source>BMJ</source>. <year>2022</year>;<volume>28</volume>(<issue>5</issue>):<fpage>924</fpage>&#x2013;<lpage>33</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj-2022-070904">https://doi.org/10.1136/bmj-2022-070904</ext-link>
</mixed-citation>
</ref>
<ref id="ref16">
<label>16</label>
<mixed-citation publication-type="journal">
<string-name><surname>Alselaim</surname> <given-names>NA</given-names></string-name>, <string-name><surname>Alsemari</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Alyabsi</surname> <given-names>M</given-names></string-name>, <string-name><surname>Al-Mutairi</surname> <given-names>AM</given-names></string-name>. <article-title>Factors associated with 30-day mortality and morbidity in patients undergoing emergency colorectal surgery</article-title>. <source>Ann Saudi Med</source>. <year>2023</year>;<volume>43</volume>(<issue>6</issue>):<fpage>364</fpage>&#x2013;<lpage>72</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5144/0256-4947.2023.364">https://doi.org/10.5144/0256-4947.2023.364</ext-link>
</mixed-citation>
</ref>
<ref id="ref17">
<label>17</label>
<mixed-citation publication-type="journal">
<string-name><surname>Xu</surname> <given-names>J</given-names></string-name>, <string-name><surname>Zhou</surname> <given-names>J</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>J</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Shu</surname> <given-names>Y</given-names></string-name>. <article-title>Development and validation of a machine learning model for survival risk stratification after esophageal cancer surgery</article-title>. <source>Front Oncol</source>. <year>2022</year>;<volume>12</volume>:<fpage>1068198</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fonc.2022.1068198">https://doi.org/10.3389/fonc.2022.1068198</ext-link>
</mixed-citation>
</ref>
<ref id="ref18">
<label>18</label>
<mixed-citation publication-type="journal">
<string-name><surname>Bilimoria</surname> <given-names>KY</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Paruch</surname> <given-names>JL</given-names></string-name>, <string-name><surname>Zhou</surname> <given-names>L</given-names></string-name>, <string-name><surname>Kmiecik</surname> <given-names>TE</given-names></string-name>, <string-name><surname>Ko</surname> <given-names>CY</given-names></string-name>, <etal>et al</etal>. <article-title>Development and evaluation of the universal ACS NSQIP surgical risk calculator: a decision aid and informed consent tool for patients and surgeons</article-title>. <source>J Am Coll Surg</source>. <year>2013</year>;<volume>217</volume>(<issue>5</issue>):<fpage>833</fpage>&#x2013;<lpage>42</lpage>.e1. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jamcollsurg.2013.07.385">https://doi.org/10.1016/j.jamcollsurg.2013.07.385</ext-link>
</mixed-citation>
</ref>
<ref id="ref19">
<label>19</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Jain</surname> <given-names>B</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Jackson</surname> <given-names>C</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>J</given-names></string-name>, <string-name><surname>Bettegowda</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Bridging imaging and molecular biomarkers in trigeminal neuralgia: toward precision diagnosis and prognostication in neuropathic pain</article-title>. <source>Med Res Arch</source>, [S.l.], v. <volume>13</volume>, n.5, May <year>2025</year>. <issn>ISSN 2375-1924</issn>. Available at: <ext-link ext-link-type="uri" xlink:href="https://esmed.org/MRA/mra/article/view/6605">https://esmed.org/MRA/mra/article/view/6605</ext-link>. Date accessed: 15 January 2026. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.18103/mra.v13i5.6605">https://doi.org/10.18103/mra.v13i5.6605</ext-link>
</mixed-citation>
</ref>
<ref id="ref20">
<label>20</label>
<mixed-citation publication-type="journal">
<string-name><surname>Namavarian</surname> <given-names>A</given-names></string-name>, <string-name><surname>Gabinet-Equihua</surname> <given-names>A</given-names></string-name>, <string-name><surname>Deng</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Khalid</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ziai</surname> <given-names>H</given-names></string-name>, <string-name><surname>Deutsch</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Length of stay prediction models for oral cancer surgery: machine learning, statistical and ACS-NSQIP</article-title>. <source>Laryngoscope</source>. <year>2024</year>;<volume>134</volume>(<issue>8</issue>):<fpage>3664</fpage>&#x2013;<lpage>72</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/lary.31443">https://doi.org/10.1002/lary.31443</ext-link>
</mixed-citation>
</ref>
<ref id="ref21">
<label>21</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kadomatsu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Emoto</surname> <given-names>R</given-names></string-name>, <string-name><surname>Kubo</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Nakanishi</surname> <given-names>K</given-names></string-name>, <string-name><surname>Ueno</surname> <given-names>H</given-names></string-name>, <string-name><surname>Kato</surname> <given-names>T</given-names></string-name>, <etal>et al</etal>. <article-title>Development of a machine learning-based risk model for postoperative complications of lung cancer surgery</article-title>. <source>Surg Today</source>. <year>2024</year>;<volume>54</volume>(<issue>12</issue>):<fpage>1482</fpage>&#x2013;<lpage>9</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00595-024-02878-y">https://doi.org/10.1007/s00595-024-02878-y</ext-link>
</mixed-citation>
</ref>
<ref id="ref22">
<label>22</label>
<mixed-citation publication-type="journal">
<string-name><surname>Cowling</surname> <given-names>TE</given-names></string-name>, <string-name><surname>Cromwell</surname> <given-names>DA</given-names></string-name>, <string-name><surname>Bellot</surname> <given-names>A</given-names></string-name>, <string-name><surname>Sharples</surname> <given-names>LD</given-names></string-name>, <string-name><surname>van der Meulen</surname> <given-names>J</given-names></string-name>. <article-title>Logistic regression and machine learning predicted patient mortality from large sets of diagnosis codes comparably</article-title>. <source>J Clin Epidemiol</source>. <year>2021</year>;<volume>133</volume>:<fpage>43</fpage>&#x2013;<lpage>52</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jclinepi.2020.12.018">https://doi.org/10.1016/j.jclinepi.2020.12.018</ext-link>
</mixed-citation>
</ref>
<ref id="ref23">
<label>23</label>
<mixed-citation publication-type="journal">
<string-name><surname>Klontzas</surname> <given-names>ME</given-names></string-name>, <string-name><surname>Ri</surname> <given-names>M</given-names></string-name>, <string-name><surname>Koltsakis</surname> <given-names>E</given-names></string-name>, <string-name><surname>Stenqvist</surname> <given-names>E</given-names></string-name>, <string-name><surname>Kalarakis</surname> <given-names>G</given-names></string-name>, <string-name><surname>Bostr&#x00F6;m</surname> <given-names>E</given-names></string-name>, <etal>et al</etal>. <article-title>Prediction of anastomotic leakage in esophageal cancer surgery: a multimodal machine learning model integrating imaging and clinical data</article-title>. <source>Acad Radiol</source>. <year>2024</year>;<volume>31</volume>(<issue>12</issue>):<fpage>4878</fpage>&#x2013;<lpage>85</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.acra.2024.06.026">https://doi.org/10.1016/j.acra.2024.06.026</ext-link>
</mixed-citation>
</ref>
<ref id="ref24">
<label>24</label>
<mixed-citation publication-type="journal">
<string-name><surname>Chen</surname> <given-names>H</given-names></string-name>, <string-name><surname>Gou</surname> <given-names>L</given-names></string-name>, <string-name><surname>Fang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Dou</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>H</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence-assisted real-time recognition of intra-abdominal metastasis during laparoscopic gastric cancer surgery</article-title>. <source>NPJ Digit Med</source>. <year>2025</year>;<volume>8</volume>(<issue>1</issue>):<fpage>9</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41746-024-01372-6">https://doi.org/10.1038/s41746-024-01372-6</ext-link>
</mixed-citation>
</ref>
<ref id="ref25">
<label>25</label>
<mixed-citation publication-type="journal">
<string-name><surname>Barber</surname> <given-names>EL</given-names></string-name>, <string-name><surname>Garg</surname> <given-names>R</given-names></string-name>, <string-name><surname>Persenaire</surname> <given-names>C</given-names></string-name>, <string-name><surname>Simon</surname> <given-names>M</given-names></string-name>. <article-title>Natural language processing with machine learning to predict outcomes after ovarian cancer surgery</article-title>. <source>Gynecol Oncol</source>. <year>2021</year>;<volume>160</volume>(<issue>1</issue>):<fpage>182</fpage>&#x2013;<lpage>6</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ygyno.2020.10.004">https://doi.org/10.1016/j.ygyno.2020.10.004</ext-link>
</mixed-citation>
</ref>
<ref id="ref26">
<label>26</label>
<mixed-citation publication-type="journal">
<string-name><surname>Rossi</surname> <given-names>LA</given-names></string-name>, <string-name><surname>Melstrom</surname> <given-names>LG</given-names></string-name>, <string-name><surname>Fong</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>V</given-names></string-name>. <article-title>Predicting post-discharge cancer surgery complications via telemonitoring of patient-reported outcomes and patient-generated health data</article-title>. <source>J Surg Oncol</source>. <year>2021</year>;<volume>123</volume>(<issue>5</issue>):<fpage>1345</fpage>&#x2013;<lpage>52</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/jso.26413">https://doi.org/10.1002/jso.26413</ext-link>
</mixed-citation>
</ref>
<ref id="ref27">
<label>27</label>
<mixed-citation publication-type="journal">
<string-name><surname>Yan</surname> <given-names>YD</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>XW</given-names></string-name>, <string-name><surname>Li</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Lin</surname> <given-names>HW</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>ZT</given-names></string-name>, <string-name><surname>Jia</surname> <given-names>D</given-names></string-name>, <etal>et al</etal>.; <collab>CRC-VTE investigators</collab>. <article-title>Machine learning to predict venous thromboembolism after colorectal cancer surgery: a Chinese dynamic modelling study</article-title>. <source>Int J Surg</source>. <year>2025</year>; <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1097/JS9.0000000000004036">https://doi.org/10.1097/JS9.0000000000004036</ext-link>
</mixed-citation>
</ref>
<ref id="ref28">
<label>28</label>
<mixed-citation publication-type="journal">
<string-name><surname>Ri</surname> <given-names>M</given-names></string-name>, <string-name><surname>Nunobe</surname> <given-names>S</given-names></string-name>, <string-name><surname>Narita</surname> <given-names>T</given-names></string-name>, <string-name><surname>Seto</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Kawazoe</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Ohe</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Time-sequential prediction of postoperative complications after gastric cancer surgery using machine learning: a multicenter cohort study</article-title>. <source>Gastric Cancer</source>. <year>2025</year>;<volume>28</volume>(<issue>6</issue>):<fpage>1273</fpage>&#x2013;<lpage>81</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10120-025-01658-y">https://doi.org/10.1007/s10120-025-01658-y</ext-link>
</mixed-citation>
</ref>
<ref id="ref29">
<label>29</label>
<mixed-citation publication-type="journal">
<string-name><surname>Zeng</surname> <given-names>S</given-names></string-name>, <string-name><surname>Li</surname> <given-names>L</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Luo</surname> <given-names>L</given-names></string-name>, <string-name><surname>Fang</surname> <given-names>Y</given-names></string-name>. <article-title>Machine learning approaches for the prediction of postoperative complication risk in liver resection patients</article-title>. <source>BMC Med Inform Decis Mak</source>. <year>2021</year>;<volume>21</volume>(<issue>1</issue>):<fpage>371</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12911-021-01731-3">https://doi.org/10.1186/s12911-021-01731-3</ext-link>
</mixed-citation>
</ref>
<ref id="ref30">
<label>30</label>
<mixed-citation publication-type="journal">
<string-name><surname>Lin</surname> <given-names>V</given-names></string-name>, <string-name><surname>Tsouchnika</surname> <given-names>A</given-names></string-name>, <string-name><surname>Allakhverdiiev</surname> <given-names>E</given-names></string-name>, <string-name><surname>Rosen</surname> <given-names>AW</given-names></string-name>, <string-name><surname>G&#x00F6;genur</surname> <given-names>M</given-names></string-name>, <string-name><surname>Clausen</surname> <given-names>JS</given-names></string-name>, <etal>et al</etal>. <article-title>Training prediction models for individual risk assessment of postoperative complications after surgery for colorectal cancer</article-title>. <source>Tech Coloproctol</source>. <year>2022</year>;<volume>26</volume>(<issue>8</issue>):<fpage>665</fpage>&#x2013;<lpage>75</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10151-022-02624-x">https://doi.org/10.1007/s10151-022-02624-x</ext-link>
</mixed-citation>
</ref>
<ref id="ref31">
<label>31</label>
<mixed-citation publication-type="journal">
<string-name><surname>Lee</surname> <given-names>S</given-names></string-name>, <string-name><surname>Oh</surname> <given-names>HJ</given-names></string-name>, <string-name><surname>Yoo</surname> <given-names>H</given-names></string-name>, <string-name><surname>Kim</surname> <given-names>CY</given-names></string-name>. <article-title>Machine learning insight: unveiling overlooked risk factors for postoperative complications in gastric cancer</article-title>. <source>Cancers (Basel)</source>. <year>2025</year>;<volume>17</volume>(<issue>7</issue>):<fpage>1225</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers17071225">https://doi.org/10.3390/cancers17071225</ext-link>
</mixed-citation>
</ref>
<ref id="ref32">
<label>32</label>
<mixed-citation publication-type="journal">
<string-name><surname>Khan</surname> <given-names>MF</given-names></string-name>, <string-name><surname>Cahill</surname> <given-names>RA</given-names></string-name>. <article-title>Peroperative personalised decision support and analytics for colon cancer surgery&#x2013; Short report</article-title>. <source>Eur J Surg Oncol</source>. <year>2021</year>;<volume>47</volume>(<issue>2</issue>):<fpage>477</fpage>&#x2013;<lpage>9</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ejso.2020.04.010">https://doi.org/10.1016/j.ejso.2020.04.010</ext-link>
</mixed-citation>
</ref>
<ref id="ref33">
<label>33</label>
<mixed-citation publication-type="journal">
<string-name><surname>Li</surname> <given-names>R</given-names></string-name>, <string-name><surname>Zhao</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Yu</surname> <given-names>J</given-names></string-name>. <article-title>Development and validation of a machine learning model for predicting early postoperative complications after radical gastrectomy</article-title>. <source>Front Oncol</source>. <year>2025</year>;<volume>15</volume>:<fpage>1631260</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fonc.2025.1631260">https://doi.org/10.3389/fonc.2025.1631260</ext-link>
</mixed-citation>
</ref>
<ref id="ref34">
<label>34</label>
<mixed-citation publication-type="journal">
<string-name><surname>Chen</surname> <given-names>S</given-names></string-name>, <string-name><surname>Deng</surname> <given-names>T</given-names></string-name>, <string-name><surname>Yang</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Li</surname> <given-names>J</given-names></string-name>, <string-name><surname>Shen</surname> <given-names>J</given-names></string-name>, <string-name><surname>Luo</surname> <given-names>X</given-names></string-name>, <etal>et al</etal>. <article-title>Development and validation of an explainable machine learning model for predicting postoperative pulmonary complications after lung cancer surgery: a machine learning study</article-title>. <source>EClinicalMedicine</source>. <year>2025</year>;<volume>86</volume>:<fpage>103386</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.eclinm.2025.103386">https://doi.org/10.1016/j.eclinm.2025.103386</ext-link>
</mixed-citation>
</ref>
<ref id="ref35">
<label>35</label>
<mixed-citation publication-type="journal">
<string-name><surname>Yang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Dou</surname> <given-names>F</given-names></string-name>, <string-name><surname>Tang</surname> <given-names>G</given-names></string-name>, <string-name><surname>Xiu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Zhao</surname> <given-names>X</given-names></string-name>. <article-title>Interpretable machine learning model for predicting anastomotic leak after esophageal cancer surgery via LightGBM</article-title>. <source>BMC Cancer</source>. <year>2025</year>;<volume>25</volume>(<issue>1</issue>):<fpage>976</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12885-025-14387-3">https://doi.org/10.1186/s12885-025-14387-3</ext-link>
</mixed-citation>
</ref>
<ref id="ref36">
<label>36</label>
<mixed-citation publication-type="journal">
<string-name><surname>Hu</surname> <given-names>J</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>Q</given-names></string-name>, <string-name><surname>He</surname> <given-names>W</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>J</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>D</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Automated machine learning model for predicting anastomotic strictures after esophageal cancer surgery: a retrospective cohort study</article-title>. <source>Surg Endosc</source>. <year>2025</year>;<volume>39</volume>(<issue>6</issue>):<fpage>3737</fpage>&#x2013;<lpage>48</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00464-025-11759-5">https://doi.org/10.1007/s00464-025-11759-5</ext-link>
</mixed-citation>
</ref>
<ref id="ref37">
<label>37</label>
<mixed-citation publication-type="journal">
<string-name><surname>Bekta&#x015F;</surname> <given-names>M</given-names></string-name>, <string-name><surname>Burchell</surname> <given-names>GL</given-names></string-name>, <string-name><surname>Bonjer</surname> <given-names>HJ</given-names></string-name>, <string-name><surname>van der Peet</surname> <given-names>DL</given-names></string-name>. <article-title>Machine learning applications in upper gastrointestinal cancer surgery: a systematic review</article-title>. <source>Surg Endosc</source>. <year>2023</year>;<volume>37</volume>(<issue>1</issue>):<fpage>75</fpage>&#x2013;<lpage>89</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00464-022-09516-z">https://doi.org/10.1007/s00464-022-09516-z</ext-link>
</mixed-citation>
</ref>
<ref id="ref38">
<label>38</label>
<mixed-citation publication-type="journal">
<string-name><surname>Liu</surname> <given-names>X</given-names></string-name>, <string-name><surname>Cruz Rivera</surname> <given-names>S</given-names></string-name>, <string-name><surname>Moher</surname> <given-names>D</given-names></string-name>, <string-name><surname>Calvert</surname> <given-names>MJ</given-names></string-name>, <string-name><surname>Denniston</surname> <given-names>AK</given-names></string-name>, <string-name><surname>Chan</surname> <given-names>AW</given-names></string-name>, <etal>et al</etal>.; <collab>SPIRIT-AI and CONSORT-AI Working Group</collab>. <article-title>Reporting guidelines for clinical trial reports for interventions involving artificial intelligence: the CONSORT-AI extension</article-title>. <source>Nat Med</source>. <year>2020</year>;<volume>26</volume>(<issue>9</issue>):<fpage>1364</fpage>&#x2013;<lpage>74</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41591-020-1034-x">https://doi.org/10.1038/s41591-020-1034-x</ext-link>
</mixed-citation>
</ref>
<ref id="ref39">
<label>39</label>
<mixed-citation publication-type="journal">
<string-name><surname>Rahman</surname> <given-names>SA</given-names></string-name>, <string-name><surname>Walker</surname> <given-names>RC</given-names></string-name>, <string-name><surname>Lloyd</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Grace</surname> <given-names>BL</given-names></string-name>, <string-name><surname>van Boxel</surname> <given-names>GI</given-names></string-name>, <string-name><surname>Kingma</surname> <given-names>BF</given-names></string-name>, <etal>et al</etal>.; <collab>OCCAMS Consortium</collab>. <article-title>Machine learning to predict early recurrence after oesophageal cancer surgery</article-title>. <source>Br J Surg</source>. <year>2020</year>;<volume>107</volume>(<issue>8</issue>):<fpage>1042</fpage>&#x2013;<lpage>52</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/bjs.11461">https://doi.org/10.1002/bjs.11461</ext-link>
</mixed-citation>
</ref>
<ref id="ref40">
<label>40</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Schonfeld</surname> <given-names>E</given-names></string-name>, <string-name><surname>Choi</surname> <given-names>J</given-names></string-name>, <string-name><surname>Kim</surname> <given-names>LH</given-names></string-name>, <string-name><surname>Lim</surname> <given-names>M</given-names></string-name>. <article-title>Revisiting glioblastoma classification through an immunological lens: a narrative review</article-title>. <source>Glioma</source>. <year>2024</year>;<volume>7</volume>(<issue>2</issue>). <fpage>3</fpage>&#x2013;<lpage>9</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.4103/glioma.glioma_4_24">https://doi.org/10.4103/glioma.glioma_4_24</ext-link>
</mixed-citation>
</ref>
<ref id="ref41">
<label>41</label>
<mixed-citation publication-type="journal">
<string-name><surname>Vargas</surname> <given-names>LO</given-names></string-name>, <string-name><surname>Himic</surname> <given-names>V</given-names></string-name>, <string-name><surname>Otaner</surname> <given-names>F</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Chandar</surname> <given-names>J</given-names></string-name>, <string-name><surname>Govindarajan</surname> <given-names>V</given-names></string-name>, <etal>et al</etal>. <article-title>Modulating the glioma microenvironment with laser interstitial thermal therapy: mechanisms and therapeutic implications</article-title>. <source>J Neurooncol</source>. <year>2025</year>;<volume>176</volume>(<issue>1</issue>):<fpage>99</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11060-025-05305-5">https://doi.org/10.1007/s11060-025-05305-5</ext-link>
</mixed-citation>
</ref>
<ref id="ref42">
<label>42</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Enayati</surname> <given-names>I</given-names></string-name>, <string-name><surname>Fountain</surname> <given-names>DM</given-names></string-name>, <string-name><surname>Leite</surname> <given-names>MI</given-names></string-name>. <article-title>Navigating glioblastoma therapy: a narrative review of emerging immunotherapeutics and small-molecule inhibitors</article-title>. <source>Microbes Immun</source>. <year>2025</year>;<volume>2</volume>(<issue>4</issue>):<fpage>132</fpage>&#x2013;<lpage>43</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.36922/mi.5075">https://doi.org/10.36922/mi.5075</ext-link>
</mixed-citation>
</ref>
<ref id="ref43">
<label>43</label>
<mixed-citation publication-type="journal">
<string-name><surname>Van Calster</surname> <given-names>B</given-names></string-name>, <string-name><surname>McLernon</surname> <given-names>DJ</given-names></string-name>, <string-name><surname>van Smeden</surname> <given-names>M</given-names></string-name>, <string-name><surname>Wynants</surname> <given-names>L</given-names></string-name>, <string-name><surname>Steyerberg</surname> <given-names>EW</given-names></string-name>. <collab>Topic Group Evaluating diagnostic tests and prediction models of the STRATOS initiative</collab>. <article-title>Calibration: the Achilles heel of predictive analytics</article-title>. <source>BMC Med</source>. <year>2019</year>;<volume>17</volume>(<issue>1</issue>):<fpage>230</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12916-019-1466-7">https://doi.org/10.1186/s12916-019-1466-7</ext-link>
</mixed-citation>
</ref>
<ref id="ref44">
<label>44</label>
<mixed-citation publication-type="journal">
<string-name><surname>Vickers</surname> <given-names>AJ</given-names></string-name>, <string-name><surname>Elkin</surname> <given-names>EB</given-names></string-name>. <article-title>Decision curve analysis: a novel method for evaluating prediction models</article-title>. <source>Med Decis Making</source>. <year>2006</year>;<volume>26</volume>(<issue>6</issue>):<fpage>565</fpage>&#x2013;<lpage>74</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1177/0272989X06295361">https://doi.org/10.1177/0272989X06295361</ext-link>
</mixed-citation>
</ref>
<ref id="ref45">
<label>45</label>
<mixed-citation publication-type="journal">
<string-name><surname>Caldon</surname> <given-names>LJ</given-names></string-name>, <string-name><surname>Collins</surname> <given-names>KA</given-names></string-name>, <string-name><surname>Reed</surname> <given-names>MW</given-names></string-name>, <string-name><surname>Sivell</surname> <given-names>S</given-names></string-name>, <string-name><surname>Austoker</surname> <given-names>J</given-names></string-name>, <string-name><surname>Clements</surname> <given-names>AM</given-names></string-name>, <etal>et al</etal>.; <collab>BresDex Group</collab>. <article-title>Clinicians&#x2019; concerns about decision support interventions for patients facing breast cancer surgery options: understanding the challenge of implementing shared decision-making</article-title>. <source>Health Expect</source>. <year>2011</year>;<volume>14</volume>(<issue>2</issue>):<fpage>133</fpage>&#x2013;<lpage>46</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1111/j.1369-7625.2010.00633.x">https://doi.org/10.1111/j.1369-7625.2010.00633.x</ext-link>
</mixed-citation>
</ref>
<ref id="ref46">
<label>46</label>
<mixed-citation publication-type="journal">
<string-name><surname>Yen</surname> <given-names>RW</given-names></string-name>, <string-name><surname>Durand</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Harris</surname> <given-names>C</given-names></string-name>, <string-name><surname>Cohen</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ward</surname> <given-names>A</given-names></string-name>, <string-name><surname>O&#x2019;Malley</surname> <given-names>AJ</given-names></string-name>, <etal>et al</etal>. <article-title>Text-only and picture conversation aids both supported shared decision making for breast cancer surgery: analysis from a cluster randomized trial</article-title>. <source>Patient Educ Couns</source>. <year>2020</year>;<volume>103</volume>(<issue>11</issue>):<fpage>2235</fpage>&#x2013;<lpage>43</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.pec.2020.07.015">https://doi.org/10.1016/j.pec.2020.07.015</ext-link>
</mixed-citation>
</ref>
<ref id="ref47">
<label>47</label>
<mixed-citation publication-type="journal">
<string-name><surname>Obermeyer</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Powers</surname> <given-names>B</given-names></string-name>, <string-name><surname>Vogeli</surname> <given-names>C</given-names></string-name>, <string-name><surname>Mullainathan</surname> <given-names>S</given-names></string-name>. <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>. <source>Science</source>. <year>2019</year>;<volume>366</volume>(<issue>6464</issue>):<fpage>447</fpage>&#x2013;<lpage>53</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1126/science.aax2342">https://doi.org/10.1126/science.aax2342</ext-link>
</mixed-citation>
</ref>
<ref id="ref48">
<label>48</label>
<mixed-citation publication-type="journal">
<string-name><surname>Collins</surname> <given-names>GS</given-names></string-name>, <string-name><surname>Reitsma</surname> <given-names>JB</given-names></string-name>, <string-name><surname>Altman</surname> <given-names>DG</given-names></string-name>, <string-name><surname>Moons</surname> <given-names>KGM</given-names></string-name>. <article-title>Transparent reporting of a multivariable prediction model for individual prognosis or diagnosis (TRIPOD): the TRIPOD statement</article-title>. <source>BMJ</source>. <year>2015</year>;<volume>350</volume>(<issue>4</issue>):<fpage>g7594</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj.g7594">https://doi.org/10.1136/bmj.g7594</ext-link>
</mixed-citation>
</ref>
<ref id="ref49">
<label>49</label>
<mixed-citation publication-type="journal">
<string-name><surname>Collins</surname> <given-names>GS</given-names></string-name>, <string-name><surname>Moons</surname> <given-names>KG</given-names></string-name>, <string-name><surname>Dhiman</surname> <given-names>P</given-names></string-name>, <string-name><surname>Riley</surname> <given-names>RD</given-names></string-name>, <string-name><surname>Beam</surname> <given-names>AL</given-names></string-name>, <string-name><surname>Van Calster</surname> <given-names>B</given-names></string-name>, <etal>et al</etal>. <article-title>TRIPOD+AI statement: updated guidance for reporting clinical prediction models that use regression or machine learning methods</article-title>. <source>BMJ</source>. <year>2024</year>;<volume>385</volume>:<elocation-id>e078378</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj-2023-078378">https://doi.org/10.1136/bmj-2023-078378</ext-link>
</mixed-citation>
</ref>
<ref id="ref50">
<label>50</label>
<mixed-citation publication-type="journal">
<string-name><surname>Moons</surname> <given-names>KG</given-names></string-name>, <string-name><surname>Damen</surname> <given-names>JA</given-names></string-name>, <string-name><surname>Kaul</surname> <given-names>T</given-names></string-name>, <string-name><surname>Hooft</surname> <given-names>L</given-names></string-name>, <string-name><surname>Andaur Navarro</surname> <given-names>C</given-names></string-name>, <string-name><surname>Dhiman</surname> <given-names>P</given-names></string-name>, <etal>et al</etal>. <article-title>PROBAST+AI: an updated quality, risk of bias, and applicability assessment tool for prediction models using regression or artificial intelligence methods</article-title>. <source>BMJ</source>. <year>2025</year>;<volume>388</volume>:<elocation-id>e082505</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/bmj-2024-082505">https://doi.org/10.1136/bmj-2024-082505</ext-link>
</mixed-citation>
</ref>
<ref id="ref51">
<label>51</label>
<mixed-citation publication-type="journal">
<string-name><surname>Cruz Rivera</surname> <given-names>S</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>X</given-names></string-name>, <string-name><surname>Chan</surname> <given-names>AW</given-names></string-name>, <string-name><surname>Denniston</surname> <given-names>AK</given-names></string-name>, <string-name><surname>Calvert</surname> <given-names>MJ</given-names></string-name>, <string-name><surname>Darzi</surname> <given-names>A</given-names></string-name>, <etal>et al</etal>.; <collab>SPIRIT-AI and CONSORT-AI Working Group; SPIRIT-AI and CONSORT-AI Steering Group; SPIRIT-AI and CONSORT-AI Consensus Group</collab>. <article-title>Guidelines for clinical trial protocols for interventions involving artificial intelligence: the SPIRIT-AI extension</article-title>. <source>Nat Med</source>. <year>2020</year>;<volume>26</volume>(<issue>9</issue>):<fpage>1351</fpage>&#x2013;<lpage>63</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41591-020-1037-7">https://doi.org/10.1038/s41591-020-1037-7</ext-link>
</mixed-citation>
</ref>
<ref id="ref52">
<label>52</label>
<mixed-citation publication-type="journal">
<string-name><surname>Chan</surname> <given-names>SY</given-names></string-name>, <string-name><surname>Twohig</surname> <given-names>P</given-names></string-name>. <article-title>Artificial intelligence in liver cancer surgery: predicting success before the first incision</article-title>. <source>World J Gastroenterol</source>. <year>2025</year>;<volume>31</volume>(<issue>16</issue>):<fpage>107221</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3748/wjg.v31.i16.107221">https://doi.org/10.3748/wjg.v31.i16.107221</ext-link>
</mixed-citation>
</ref>
<ref id="ref53">
<label>53</label>
<mixed-citation publication-type="journal">
<collab>International Medical Device Regulators Forum (IMDRF)</collab>. <article-title>Software as a Medical Device (SaMD): key definitions</article-title>. <source>IMDRF/SaMD WG/N10FINAL:2013</source>. Published December 18, 2013. Accessed February 25, 2026. <ext-link ext-link-type="uri" xlink:href="https://www.imdrf.org/sites/default/files/docs/imdrf/final/technical/imdrf-tech-131209-samd-key-definitions-140901.pdf">https://www.imdrf.org/sites/default/files/docs/imdrf/final/technical/imdrf-tech-131209-samd-key-definitions-140901.pdf</ext-link></mixed-citation></ref>
<ref id="ref54">
<label>54</label>
<mixed-citation publication-type="journal">
<collab>International Medical Device Regulators Forum (IMDRF)</collab>. <article-title>Software as a Medical Device (SaMD): Clinical Evaluation</article-title>. <source>IMDRF/SaMD WG/N41FINAL:2017. Published September 21, 2017. Accessed February 25, 2026</source>. <ext-link ext-link-type="uri" xlink:href="https://www.imdrf.org/sites/default/files/docs/imdrf/final/technical/imdrf-tech-170921-samd-n41-clinical-evaluation_1.pdf">https://www.imdrf.org/sites/default/files/docs/imdrf/final/technical/imdrf-tech-170921-samd-n41-clinical-evaluation_1.pdf</ext-link>
</mixed-citation>
</ref>
<ref id="ref55">
<label>55</label>
<mixed-citation publication-type="journal">
<string-name><surname>Garc&#x00ED;a-Granero</surname> <given-names>&#x00C1;</given-names></string-name>, <string-name><surname>Jer&#x00ED;-McFarlane</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ochogav&#x00ED;a</surname> <given-names>A</given-names></string-name>, <string-name><surname>Gamund&#x00ED;-Cuesta</surname> <given-names>M</given-names></string-name>, <string-name><surname>Garcia-Granero</surname> <given-names>E</given-names></string-name>, <string-name><surname>Gonz&#x00E1;lez-Argent&#x00E9;</surname> <given-names>FX</given-names></string-name>. <article-title>3D reconstructions in rectal cancer. New tools for better diagnosis and surgical planning</article-title>. <source>Cir Esp (Engl Ed)</source>. <year>2025</year>;<volume>103</volume>(<issue>9</issue>):<fpage>800198</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.cireng.2025.800198">https://doi.org/10.1016/j.cireng.2025.800198</ext-link>
</mixed-citation>
</ref>
<ref id="ref56">
<label>56</label>
<mixed-citation publication-type="journal">
<collab>US Food and Drug Administration</collab>. <article-title>Software as a Medical Device (SaMD)</article-title>. <source>Updated December 4, 2018. Accessed February 25, 2026</source>. <ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/medical-devices/digital-health-center-excellence/software-medical-device-samd">https://www.fda.gov/medical-devices/digital-health-center-excellence/software-medical-device-samd</ext-link>
</mixed-citation>
</ref>
<ref id="ref57">
<label>57</label>
<mixed-citation publication-type="journal">
<string-name><surname>Checcucci</surname> <given-names>E</given-names></string-name>, <string-name><surname>De Cillis</surname> <given-names>S</given-names></string-name>, <string-name><surname>Porpiglia</surname> <given-names>F</given-names></string-name>. <article-title>3D-printed models and virtual reality as new tools for image-guided robot-assisted nephron-sparing surgery: a systematic review of the newest evidences</article-title>. <source>Curr Opin Urol</source>. <year>2020</year>;<volume>30</volume>(<issue>1</issue>):<fpage>55</fpage>&#x2013;<lpage>64</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1097/MOU.0000000000000686">https://doi.org/10.1097/MOU.0000000000000686</ext-link>
</mixed-citation>
</ref>
<ref id="ref58">
<label>58</label>
<mixed-citation publication-type="journal">
<string-name><surname>Chen</surname> <given-names>X</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>H</given-names></string-name>, <string-name><surname>Qi</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>C</given-names></string-name>, <string-name><surname>Jin</surname> <given-names>J</given-names></string-name>, <string-name><surname>Zhao</surname> <given-names>H</given-names></string-name>, <etal>et al</etal>. <article-title>AI-based chest CT semantic segmentation algorithm enables semi-automated lung cancer surgery planning by recognizing anatomical variants of pulmonary vessels</article-title>. <source>Front Oncol</source>. <year>2022</year>;<volume>12</volume>:<fpage>1021084</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fonc.2022.1021084">https://doi.org/10.3389/fonc.2022.1021084</ext-link>
</mixed-citation>
</ref>
<ref id="ref59">
<label>59</label>
<mixed-citation publication-type="journal">
<string-name><surname>Wang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>H</given-names></string-name>, <string-name><surname>Li</surname> <given-names>J</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>J</given-names></string-name>, <string-name><surname>Meng</surname> <given-names>F</given-names></string-name>, <string-name><surname>Li</surname> <given-names>H</given-names></string-name>, <etal>et al</etal>. <article-title>Preoperative Prediction of Axillary Lymph Node Metastasis in Breast Cancer Using CNN Based on Multiparametric MRI</article-title>. <source>J Magn Reson Imaging</source>. <year>2022</year> Sep;<volume>56</volume>(<issue>3</issue>):<fpage>700</fpage>&#x2013;<lpage>9</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/jmri.28082">https://doi.org/10.1002/jmri.28082</ext-link>
</mixed-citation>
</ref>
<ref id="ref60">
<label>60</label>
<mixed-citation publication-type="journal">
<string-name><surname>Tan</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Feng</surname> <given-names>LJ</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>YH</given-names></string-name>, <string-name><surname>Xue</surname> <given-names>JW</given-names></string-name>, <string-name><surname>Long</surname> <given-names>LL</given-names></string-name>, <string-name><surname>Feng</surname> <given-names>ZB</given-names></string-name>. <article-title>A comprehensive radiopathological nomogram for the prediction of pathological staging in gastric cancer using CT-derived and WSI-based features</article-title>. <source>Transl Oncol</source>. <year>2024</year>;<volume>40</volume>:<fpage>101864</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.tranon.2023.101864">https://doi.org/10.1016/j.tranon.2023.101864</ext-link>
</mixed-citation>
</ref>
<ref id="ref61">
<label>61</label>
<mixed-citation publication-type="journal">
<string-name><surname>Yao</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Cen</surname> <given-names>X</given-names></string-name>, <string-name><surname>Gan</surname> <given-names>L</given-names></string-name>, <string-name><surname>Jiang</surname> <given-names>J</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>M</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>Y</given-names></string-name>, <etal>et al</etal>. <article-title>Automated esophageal cancer staging from free-text radiology reports: large language model evaluation study</article-title>. <source>JMIR Med Inform</source>. <year>2025</year>;<volume>13</volume>:<elocation-id>e75556</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2196/75556">https://doi.org/10.2196/75556</ext-link>
</mixed-citation>
</ref>
<ref id="ref62">
<label>62</label>
<mixed-citation publication-type="journal">
<string-name><surname>Tang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Feng</surname> <given-names>S</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Ban</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>Y</given-names></string-name>. <article-title>Using pathomics-based model for predicting positive surgical margins in patients with esophageal squamous cell carcinoma: a comparative study of decision tree and nomogram</article-title>. <source>Int J Gen Med</source>. <year>2024</year>;<volume>17</volume>:<fpage>5869</fpage>&#x2013;<lpage>82</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.2147/IJGM.S495296">https://doi.org/10.2147/IJGM.S495296</ext-link>
</mixed-citation>
</ref>
<ref id="ref63">
<label>63</label>
<mixed-citation publication-type="journal">
<string-name><surname>Awad</surname> <given-names>MH</given-names></string-name>, <string-name><surname>Sanchez</surname> <given-names>M</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>MA</given-names></string-name>. <article-title>The values work of restorative ventures: the role of founders&#x2019; embodied embeddedness with at-risk social groups</article-title>. <source>J Bus Ventur Insights</source>. <year>2022</year>;<volume>18</volume>:<elocation-id>e00337</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jbvi.2022.e00337">https://doi.org/10.1016/j.jbvi.2022.e00337</ext-link>
</mixed-citation>
</ref>
<ref id="ref64">
<label>64</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kaida</surname> <given-names>S</given-names></string-name>, <string-name><surname>Murakami</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Masaki</surname> <given-names>Y</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence-driven 3-dimensional simulation system for enhanced preoperative planning in gastric cancer surgery: a retrospective validation study</article-title>. <source>J Gastrointest Surg</source>. Published online December 2025:<elocation-id>102295</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.gassur.2025.102295">https://doi.org/10.1016/j.gassur.2025.102295</ext-link>
</mixed-citation>
</ref>
<ref id="ref65">
<label>65</label>
<mixed-citation publication-type="journal">
<string-name><surname>Hamabe</surname> <given-names>A</given-names></string-name>, <string-name><surname>Ishii</surname> <given-names>M</given-names></string-name>, <string-name><surname>Kamoda</surname> <given-names>R</given-names></string-name>, <string-name><surname>Sasuga</surname> <given-names>S</given-names></string-name>, <string-name><surname>Okuya</surname> <given-names>K</given-names></string-name>, <string-name><surname>Okita</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence-based technology to make a three-dimensional pelvic model for preoperative simulation of rectal cancer surgery using MRI</article-title>. <source>Ann Gastroenterol Surg</source>. <year>2022</year>;<volume>6</volume>(<issue>6</issue>):<fpage>788</fpage>&#x2013;<lpage>94</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/ags3.12574">https://doi.org/10.1002/ags3.12574</ext-link>
</mixed-citation>
</ref>
<ref id="ref66">
<label>66</label>
<mixed-citation publication-type="journal">
<string-name><surname>Aisu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Okada</surname> <given-names>T</given-names></string-name>, <string-name><surname>Itatani</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Masuo</surname> <given-names>A</given-names></string-name>, <string-name><surname>Tani</surname> <given-names>R</given-names></string-name>, <string-name><surname>Fujimoto</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>Automatic segmentation of male pelvic floor soft tissue structures for anatomical simulation and morphological assessment in lower rectal cancer surgery</article-title>. <source>Tech Coloproctol</source>. <year>2025</year>;<volume>29</volume>(<issue>1</issue>):<fpage>176</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10151-025-03218-z">https://doi.org/10.1007/s10151-025-03218-z</ext-link>
</mixed-citation>
</ref>
<ref id="ref67">
<label>67</label>
<mixed-citation publication-type="journal">
<string-name><surname>Wang</surname> <given-names>F</given-names></string-name>, <string-name><surname>Xiao</surname> <given-names>C</given-names></string-name>, <string-name><surname>Jia</surname> <given-names>T</given-names></string-name>, <string-name><surname>Pan</surname> <given-names>L</given-names></string-name>, <string-name><surname>Du</surname> <given-names>F</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>Z</given-names></string-name>. <article-title>Hepatobiliary surgery based on intelligent image segmentation technology</article-title>. <source>Open Life Sci</source>. <year>2023</year> Aug;<volume>18</volume>(<issue>1</issue>):<fpage>20220674</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1515/biol-2022-0674">https://doi.org/10.1515/biol-2022-0674</ext-link>
</mixed-citation>
</ref>
<ref id="ref68">
<label>68</label>
<mixed-citation publication-type="journal">
<string-name><surname>Hofmann</surname> <given-names>FO</given-names></string-name>, <string-name><surname>Heiliger</surname> <given-names>C</given-names></string-name>, <string-name><surname>Tschaidse</surname> <given-names>T</given-names></string-name>, <string-name><surname>Jarmusch</surname> <given-names>S</given-names></string-name>, <string-name><surname>Auhage</surname> <given-names>LA</given-names></string-name>, <string-name><surname>Aghamaliyev</surname> <given-names>U</given-names></string-name>, <etal>et al</etal>. <article-title>Validation of body composition parameters extracted via deep learning-based segmentation from routine computed tomographies</article-title>. <source>Sci Rep</source>. <year>2025</year>;<volume>15</volume>(<issue>1</issue>):<fpage>11909</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41598-025-96238-6">https://doi.org/10.1038/s41598-025-96238-6</ext-link>
</mixed-citation>
</ref>
<ref id="ref69">
<label>69</label>
<mixed-citation publication-type="journal">
<string-name><surname>Suthakaran</surname> <given-names>R</given-names></string-name>, <string-name><surname>Cao</surname> <given-names>K</given-names></string-name>, <string-name><surname>Arafat</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Yeung</surname> <given-names>J</given-names></string-name>, <string-name><surname>Chan</surname> <given-names>S</given-names></string-name>, <string-name><surname>Master</surname> <given-names>M</given-names></string-name>, <etal>et al</etal>. <article-title>Body composition assessment by artificial intelligence can be a predictive tool for short-term postoperative complications in Hartmann&#x2019;s reversals</article-title>. <source>BMC Surg</source>. <year>2024</year> Apr;<volume>24</volume>(<issue>1</issue>):<fpage>111</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12893-024-02408-0">https://doi.org/10.1186/s12893-024-02408-0</ext-link>
</mixed-citation>
</ref>
<ref id="ref70">
<label>70</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kim</surname> <given-names>J</given-names></string-name>, <string-name><surname>Han</surname> <given-names>SH</given-names></string-name>, <string-name><surname>Kim</surname> <given-names>HI</given-names></string-name>. <article-title>Detection of sarcopenic obesity and prediction of long-term survival in patients with gastric cancer using preoperative computed tomography and machine learning</article-title>. <source>J Surg Oncol</source>. <year>2021</year>;<volume>124</volume>(<issue>8</issue>):<fpage>1347</fpage>&#x2013;<lpage>55</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1002/jso.26668">https://doi.org/10.1002/jso.26668</ext-link>
</mixed-citation>
</ref>
<ref id="ref71">
<label>71</label>
<mixed-citation publication-type="journal">
<string-name><surname>Amparore</surname> <given-names>D</given-names></string-name>, <string-name><surname>Piramide</surname> <given-names>F</given-names></string-name>, <string-name><surname>De Cillis</surname> <given-names>S</given-names></string-name>, <string-name><surname>Verri</surname> <given-names>P</given-names></string-name>, <string-name><surname>Piana</surname> <given-names>A</given-names></string-name>, <string-name><surname>Pecoraro</surname> <given-names>A</given-names></string-name>, <etal>et al</etal>.; <article-title>Renal Cancer Working Group of the Young Academic Urologists (YAU) and European Association of Urology (EAU)</article-title>. <source>Robotic partial nephrectomy in 3D virtual reconstructions era: is the paradigm changed? World J Urol</source>. <year>2022</year>;<volume>40</volume>(<issue>3</issue>):<fpage>659</fpage>&#x2013;<lpage>70</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00345-022-03964-x">https://doi.org/10.1007/s00345-022-03964-x</ext-link>
</mixed-citation>
</ref>
<ref id="ref72">
<label>72</label>
<mixed-citation publication-type="journal">
<string-name><surname>Cheng</surname> <given-names>H</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>H</given-names></string-name>, <string-name><surname>Peng</surname> <given-names>B</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Zheng</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Illuminating the future of precision cancer surgery with fluorescence imaging and artificial intelligence convergence</article-title>. <source>NPJ Precis Oncol</source>. <year>2024</year>;<volume>8</volume>(<issue>1</issue>):<fpage>196</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41698-024-00699-3">https://doi.org/10.1038/s41698-024-00699-3</ext-link>
</mixed-citation>
</ref>
<ref id="ref73">
<label>73</label>
<mixed-citation publication-type="journal">
<string-name><surname>Iresj&#x00F6;</surname> <given-names>BM</given-names></string-name>, <string-name><surname>Smedh</surname> <given-names>U</given-names></string-name>, <string-name><surname>Engstr&#x00F6;m</surname> <given-names>C</given-names></string-name>, <string-name><surname>Persson</surname> <given-names>J</given-names></string-name>, <string-name><surname>M&#x00E5;rtensson</surname> <given-names>C</given-names></string-name>, <string-name><surname>Lundholm</surname> <given-names>K</given-names></string-name>. <article-title>A randomized translational study on protein and glucose metabolism in skeletal muscles evaluated by gene-ontology, following preoperative oral carbohydrate loading compared to overnight peripheral parenteral nutrition (PPN) before major cancer surgery</article-title>. <source>J Transl Med</source>. <year>2024</year>;<volume>22</volume>(<issue>1</issue>):<fpage>675</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s12967-024-05484-1">https://doi.org/10.1186/s12967-024-05484-1</ext-link>
</mixed-citation>
</ref>
<ref id="ref74">
<label>74</label>
<mixed-citation publication-type="journal">
<string-name><surname>van Oosterom</surname> <given-names>MN</given-names></string-name>, <string-name><surname>van Leeuwen</surname> <given-names>SI</given-names></string-name>, <string-name><surname>Mazzone</surname> <given-names>E</given-names></string-name>, <string-name><surname>Dell&#x2019;Oglio</surname> <given-names>P</given-names></string-name>, <string-name><surname>Buckle</surname> <given-names>T</given-names></string-name>, <string-name><surname>van Beurden</surname> <given-names>F</given-names></string-name>, <etal>et al</etal>. <article-title>Click-on fluorescence detectors: using robotic surgical instruments to characterize molecular tissue aspects</article-title>. <source>J Robot Surg</source>. <year>2023</year>;<volume>17</volume>(<issue>1</issue>):<fpage>131</fpage>&#x2013;<lpage>40</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11701-022-01382-0">https://doi.org/10.1007/s11701-022-01382-0</ext-link>
</mixed-citation>
</ref>
<ref id="ref75">
<label>75</label>
<mixed-citation publication-type="journal">
<string-name><surname>M&#x00FC;ller</surname> <given-names>D</given-names></string-name>, <string-name><surname>Stier</surname> <given-names>R</given-names></string-name>, <string-name><surname>Straatman</surname> <given-names>J</given-names></string-name>, <string-name><surname>Babic</surname> <given-names>B</given-names></string-name>, <string-name><surname>Schiffmann</surname> <given-names>L</given-names></string-name>, <string-name><surname>Eckhoff</surname> <given-names>J</given-names></string-name>, <etal>et al</etal>. <article-title>ICG-lymphknoten-mapping in der Tumorchirurgie des oberen Gastrointestinaltrakts</article-title>. <source>Chirurgie (Heidelb)</source>. <year>2022</year>;<volume>93</volume>(<issue>10</issue>):<fpage>925</fpage>&#x2013;<lpage>33</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00104-022-01659-y">https://doi.org/10.1007/s00104-022-01659-y</ext-link>
</mixed-citation>
</ref>
<ref id="ref76">
<label>76</label>
<mixed-citation publication-type="journal">
<string-name><surname>Hoogendam</surname> <given-names>JP</given-names></string-name>, <string-name><surname>Hobbelink</surname> <given-names>MG</given-names></string-name>, <string-name><surname>Veldhuis</surname> <given-names>WB</given-names></string-name>, <string-name><surname>Verheijen</surname> <given-names>RH</given-names></string-name>, <string-name><surname>van Diest</surname> <given-names>PJ</given-names></string-name>, <string-name><surname>Zweemer</surname> <given-names>RP</given-names></string-name>. <article-title>Preoperative sentinel node mapping with (99m)Tc-nanocolloid SPECT-CT significantly reduces the intraoperative sentinel node retrieval time in robot assisted laparoscopic cervical cancer surgery</article-title>. <source>Gynecol Oncol</source>. <year>2013</year>;<volume>129</volume>(<issue>2</issue>):<fpage>389</fpage>&#x2013;<lpage>94</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ygyno.2013.02.020">https://doi.org/10.1016/j.ygyno.2013.02.020</ext-link>
</mixed-citation>
</ref>
<ref id="ref77">
<label>77</label>
<mixed-citation publication-type="journal">
<string-name><surname>Alikarami</surname> <given-names>M</given-names></string-name>, <string-name><surname>Faraj</surname> <given-names>TA</given-names></string-name>, <string-name><surname>Hama</surname> <given-names>NH</given-names></string-name>, <string-name><surname>Hosseini</surname> <given-names>AS</given-names></string-name>, <string-name><surname>Habibi</surname> <given-names>P</given-names></string-name>, <string-name><surname>Samiei Mosleh</surname> <given-names>I</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence in advancing optical coherence tomography for disease detection and cancer diagnosis: A scoping review</article-title>. <source>Eur J Surg Oncol</source>. <year>2025</year>;<volume>51</volume>(<issue>9</issue>):<fpage>110188</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ejso.2025.110188">https://doi.org/10.1016/j.ejso.2025.110188</ext-link>
</mixed-citation>
</ref>
<ref id="ref78">
<label>78</label>
<mixed-citation publication-type="journal">
<string-name><surname>Levy</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Rempel</surname> <given-names>D</given-names></string-name>, <string-name><surname>Nguyen</surname> <given-names>M</given-names></string-name>, <string-name><surname>Yassine</surname> <given-names>A</given-names></string-name>, <string-name><surname>Sanati-Burns</surname> <given-names>M</given-names></string-name>, <string-name><surname>Salgia</surname> <given-names>P</given-names></string-name>, <etal>et al</etal>. <article-title>The fusion of wide field optical coherence tomography and AI: advancing breast cancer surgical margin visualization</article-title>. <source>Life (Basel)</source>. <year>2023</year>;<volume>13</volume>(<issue>12</issue>):<fpage>2340</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/life13122340">https://doi.org/10.3390/life13122340</ext-link>
</mixed-citation>
</ref>
<ref id="ref79">
<label>79</label>
<mixed-citation publication-type="journal">
<string-name><surname>Pavone</surname> <given-names>M</given-names></string-name>, <string-name><surname>Innocenzi</surname> <given-names>C</given-names></string-name>, <string-name><surname>Carles</surname> <given-names>E</given-names></string-name>, <string-name><surname>Bizzarri</surname> <given-names>N</given-names></string-name>, <string-name><surname>Moro</surname> <given-names>F</given-names></string-name>, <string-name><surname>Ferrari</surname> <given-names>FA</given-names></string-name>, <etal>et al</etal>. <article-title>Cutting edge microscopic intraoperative tissue assessment for guidance in oncologic surgery: a systematic review of the role of optical coherence tomography</article-title>. <source>Ann Surg Oncol</source>. <year>2025</year>;<volume>32</volume>(<issue>3</issue>):<fpage>2191</fpage>&#x2013;<lpage>205</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1245/s10434-024-16632-8">https://doi.org/10.1245/s10434-024-16632-8</ext-link>
</mixed-citation>
</ref>
<ref id="ref80">
<label>80</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Awad</surname> <given-names>MH</given-names></string-name>, <string-name><surname>Korouri</surname> <given-names>S</given-names></string-name>, <string-name><surname>Mohseni</surname> <given-names>K</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>D</given-names></string-name>, <string-name><surname>Freichel</surname> <given-names>R</given-names></string-name>, <etal>et al</etal>. <article-title>Reframing clinical AI evaluation in the era of generative models: toward multidimensional, stakeholder-informed, and safety-centric frameworks for real-world health care deployment</article-title>. <source>PJS</source>. <year>2025</year>;<volume>11</volume>:<fpage>100089</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.70389/PJS.100089">https://doi.org/10.70389/PJS.100089</ext-link>
</mixed-citation>
</ref>
<ref id="ref81">
<label>81</label>
<mixed-citation publication-type="journal">
<string-name><surname>Tanzim</surname> <given-names>U</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>IA</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Al-Deen</surname> <given-names>RH</given-names></string-name>, <string-name><surname>Miah</surname> <given-names>L</given-names></string-name>, <string-name><surname>Blaaza</surname> <given-names>M</given-names></string-name>, <etal>et al</etal>. <article-title>Transforming patient-provider communication: the role of artificial intelligence in advancing health literacy &#x2013; a comprehensive review</article-title>. <source>PJS</source>. <year>2025</year>;<volume>13</volume>:<fpage>100095</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.70389/PJS.100095">https://doi.org/10.70389/PJS.100095</ext-link>
</mixed-citation>
</ref>
<ref id="ref82">
<label>82</label>
<mixed-citation publication-type="journal">
<string-name><surname>Cheng</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Dong</surname> <given-names>Y</given-names></string-name> (<string-name><surname>Hussein</surname> <given-names>AF</given-names></string-name>, editor). <article-title>Da Vinci robot-assisted video image processing under artificial intelligence vision processing technology</article-title>. <source>Comput Math Methods Med</source>. <year>2022</year>. pp. <fpage>1</fpage>&#x2013;<lpage>10</lpage>.<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1155/2022/2752444">https://doi.org/10.1155/2022/2752444</ext-link>
</mixed-citation>
</ref>
<ref id="ref83">
<label>83</label>
<mixed-citation publication-type="journal">
<string-name><surname>Ebigbo</surname> <given-names>A</given-names></string-name>, <string-name><surname>Mendel</surname> <given-names>R</given-names></string-name>, <string-name><surname>Scheppach</surname> <given-names>MW</given-names></string-name>, <string-name><surname>Probst</surname> <given-names>A</given-names></string-name>, <string-name><surname>Shahidi</surname> <given-names>N</given-names></string-name>, <string-name><surname>Prinz</surname> <given-names>F</given-names></string-name>, <etal>et al</etal>. <article-title>Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm</article-title>. <source>Gut</source>. <year>2022</year>;<volume>71</volume>(<issue>12</issue>):<fpage>2388</fpage>&#x2013;<lpage>90</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1136/gutjnl-2021-326470">https://doi.org/10.1136/gutjnl-2021-326470</ext-link>
</mixed-citation>
</ref>
<ref id="ref84">
<label>84</label>
<mixed-citation publication-type="journal">
<string-name><surname>Checcucci</surname> <given-names>E</given-names></string-name>, <string-name><surname>Piana</surname> <given-names>A</given-names></string-name>, <string-name><surname>Volpi</surname> <given-names>G</given-names></string-name>, <string-name><surname>Piazzolla</surname> <given-names>P</given-names></string-name>, <string-name><surname>Amparore</surname> <given-names>D</given-names></string-name>, <string-name><surname>De Cillis</surname> <given-names>S</given-names></string-name>, <etal>et al</etal>. <article-title>Three-dimensional automatic artificial intelligence driven augmented-reality selective biopsy during nerve-sparing robot-assisted radical prostatectomy: A feasibility and accuracy study</article-title>. <source>Asian J Urol</source>. <year>2023</year>;<volume>10</volume>(<issue>4</issue>):<fpage>407</fpage>&#x2013;<lpage>15</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ajur.2023.08.001">https://doi.org/10.1016/j.ajur.2023.08.001</ext-link>
</mixed-citation>
</ref>
<ref id="ref85">
<label>85</label>
<mixed-citation publication-type="journal">
<string-name><surname>Park</surname> <given-names>SH</given-names></string-name>, <string-name><surname>Park</surname> <given-names>HM</given-names></string-name>, <string-name><surname>Baek</surname> <given-names>KR</given-names></string-name>, <string-name><surname>Ahn</surname> <given-names>HM</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>IY</given-names></string-name>, <string-name><surname>Son</surname> <given-names>GM</given-names></string-name>. <article-title>Artificial intelligence based real-time microcirculation analysis system for laparoscopic colorectal surgery</article-title>. <source>World J Gastroenterol</source>. <year>2020</year>;<volume>26</volume>(<issue>44</issue>):<fpage>6945</fpage>&#x2013;<lpage>62</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3748/wjg.v26.i44.6945">https://doi.org/10.3748/wjg.v26.i44.6945</ext-link>
</mixed-citation>
</ref>
<ref id="ref86">
<label>86</label>
<mixed-citation publication-type="journal">
<string-name><surname>Boland</surname> <given-names>PA</given-names></string-name>, <string-name><surname>Hardy</surname> <given-names>NP</given-names></string-name>, <string-name><surname>Moynihan</surname> <given-names>A</given-names></string-name>, <string-name><surname>McEntee</surname> <given-names>PD</given-names></string-name>, <string-name><surname>Loo</surname> <given-names>C</given-names></string-name>, <string-name><surname>Fenlon</surname> <given-names>H</given-names></string-name>, <etal>et al</etal>. <article-title>Intraoperative near infrared functional imaging of rectal cancer using artificial intelligence methods &#x2013; now and near future state of the art</article-title>. <source>Eur J Nucl Med Mol Imaging</source>. <year>2024</year>;<volume>51</volume>(<issue>10</issue>):<fpage>3135</fpage>&#x2013;<lpage>48</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00259-024-06731-9">https://doi.org/10.1007/s00259-024-06731-9</ext-link>
</mixed-citation>
</ref>
<ref id="ref87">
<label>87</label>
<mixed-citation publication-type="journal">
<string-name><surname>Checcucci</surname> <given-names>E</given-names></string-name>, <string-name><surname>Veccia</surname> <given-names>A</given-names></string-name>, <string-name><surname>Puliatti</surname> <given-names>S</given-names></string-name>, <string-name><surname>De Backer</surname> <given-names>P</given-names></string-name>, <string-name><surname>Piazza</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kowalewski</surname> <given-names>KF</given-names></string-name>, <etal>et al</etal>. <article-title>Metaverse in surgery &#x2013; origins and future potential</article-title>. <source>Nat Rev Urol</source>. <year>2026</year>;<volume>23</volume>(<issue>1</issue>):<fpage>50</fpage>&#x2013;<lpage>63</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41585-024-00941-4">https://doi.org/10.1038/s41585-024-00941-4</ext-link>
</mixed-citation>
</ref>
<ref id="ref88">
<label>88</label>
<mixed-citation publication-type="journal">
<string-name><surname>Won</surname> <given-names>NJ</given-names></string-name>, <string-name><surname>Bartling</surname> <given-names>M</given-names></string-name>, <string-name><surname>La Macchia</surname> <given-names>J</given-names></string-name>, <string-name><surname>Markevich</surname> <given-names>S</given-names></string-name>, <string-name><surname>Holtshousen</surname> <given-names>S</given-names></string-name>, <string-name><surname>Jagota</surname> <given-names>A</given-names></string-name>, <etal>et al</etal>. <article-title>Deep learning-enabled fluorescence imaging for surgical guidance: in silico training for oral cancer depth quantification</article-title>. <source>J Biomed Opt</source>. <year>2025</year>;<volume>30</volume>(<issue>S1 Suppl 1</issue>):<fpage>S13706</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1117/1.JBO.30.S1.S13706">https://doi.org/10.1117/1.JBO.30.S1.S13706</ext-link>
</mixed-citation>
</ref>
<ref id="ref89">
<label>89</label>
<mixed-citation publication-type="journal">
<string-name><surname>Kurosawa</surname> <given-names>H</given-names></string-name>, <string-name><surname>Won</surname> <given-names>NJ</given-names></string-name>, <string-name><surname>Wunder</surname> <given-names>JB</given-names></string-name>, <string-name><surname>Patil</surname> <given-names>S</given-names></string-name>, <string-name><surname>Bartling</surname> <given-names>M</given-names></string-name>, <string-name><surname>Najjar</surname> <given-names>E</given-names></string-name>, <etal>et al</etal>. <article-title>Deep learning-enabled fluorescence imaging for oral cancer margin classification in preclinical models</article-title>. <source>J Biomed Opt</source>. <year>2025</year>;<volume>30</volume>(<issue>S3 Suppl 3</issue>):<fpage>S34109</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1117/1.JBO.30.S3.S34109">https://doi.org/10.1117/1.JBO.30.S3.S34109</ext-link>
</mixed-citation>
</ref>
<ref id="ref90">
<label>90</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abosheisha</surname> <given-names>M</given-names></string-name>, <string-name><surname>Nasr</surname> <given-names>E</given-names></string-name>, <string-name><surname>Ali</surname> <given-names>M</given-names></string-name>, <string-name><surname>Tamanna</surname> <given-names>R</given-names></string-name>, <string-name><surname>Bin Halim</surname> <given-names>S</given-names></string-name>, <string-name><surname>Rakib Hasan</surname> <given-names>M</given-names></string-name>, <etal>et al</etal>. <article-title>Near-infrared fluorescence imaging in general surgery: applications in vascularization, tumor margin detection, and biliary anatomy</article-title>. <source>Cureus</source>. <year>2025</year>;<volume>17</volume>(<issue>9</issue>):<elocation-id>e92194</elocation-id>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.7759/cureus.92194">https://doi.org/10.7759/cureus.92194</ext-link>
</mixed-citation>
</ref>
<ref id="ref91">
<label>91</label>
<mixed-citation publication-type="journal">
<string-name><surname>M&#x00FC;ller</surname> <given-names>DT</given-names></string-name>, <string-name><surname>Schiffmann</surname> <given-names>LM</given-names></string-name>, <string-name><surname>Reisewitz</surname> <given-names>A</given-names></string-name>, <string-name><surname>Chon</surname> <given-names>SH</given-names></string-name>, <string-name><surname>Eckhoff</surname> <given-names>JA</given-names></string-name>, <string-name><surname>Babic</surname> <given-names>B</given-names></string-name>, <etal>et al</etal>. <article-title>Mapping the lymphatic drainage pattern of esophageal cancer with near-infrared fluorescent imaging during robotic assisted minimally invasive Ivor Lewis esophagectomy (RAMIE) &#x2013; First results of the prospective ESOMAP feasibility trial</article-title>. <source>Cancers (Basel)</source>. <year>2023</year>;<volume>15</volume>(<issue>8</issue>):<fpage>2247</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers15082247">https://doi.org/10.3390/cancers15082247</ext-link>
</mixed-citation>
</ref>
<ref id="ref92">
<label>92</label>
<mixed-citation publication-type="journal">
<string-name><surname>Connolly</surname> <given-names>L</given-names></string-name>, <string-name><surname>Fooladgar</surname> <given-names>F</given-names></string-name>, <string-name><surname>Jamzad</surname> <given-names>A</given-names></string-name>, <string-name><surname>Kaufmann</surname> <given-names>M</given-names></string-name>, <string-name><surname>Syeda</surname> <given-names>A</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>K</given-names></string-name>, <etal>et al</etal>. <article-title>ImSpect: image-driven self-supervised learning for surgical margin evaluation with mass spectrometry</article-title>. <source>Int J Comput Assist Radiol Surg</source>. <year>2024</year>;<volume>19</volume>(<issue>6</issue>):<fpage>1129</fpage>&#x2013;<lpage>36</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11548-024-03106-1">https://doi.org/10.1007/s11548-024-03106-1</ext-link>
</mixed-citation>
</ref>
<ref id="ref93">
<label>93</label>
<mixed-citation publication-type="journal">
<string-name><surname>Connolly</surname> <given-names>L</given-names></string-name>, <string-name><surname>Jamzad</surname> <given-names>A</given-names></string-name>, <string-name><surname>Kaufmann</surname> <given-names>M</given-names></string-name>, <string-name><surname>Farquharson</surname> <given-names>CE</given-names></string-name>, <string-name><surname>Ren</surname> <given-names>K</given-names></string-name>, <string-name><surname>Rudan</surname> <given-names>JF</given-names></string-name>, <etal>et al</etal>. <article-title>Combined mass spectrometry and histopathology imaging for perioperative tissue assessment in cancer surgery</article-title>. <source>J Imaging</source>. <year>2021</year>;<volume>7</volume>(<issue>10</issue>):<fpage>203</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/jimaging7100203">https://doi.org/10.3390/jimaging7100203</ext-link>
</mixed-citation>
</ref>
<ref id="ref94">
<label>94</label>
<mixed-citation publication-type="journal">
<string-name><surname>Boal</surname> <given-names>M</given-names></string-name>, <string-name><surname>Reali</surname> <given-names>C</given-names></string-name>, <string-name><surname>Duhoky</surname> <given-names>R</given-names></string-name>, <string-name><surname>Gill</surname> <given-names>T</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>J</given-names></string-name>, <string-name><surname>Miskovic</surname> <given-names>D</given-names></string-name>, <etal>et al</etal>. <article-title>Association of skill and errors with outcomes in robotic rectal cancer surgery</article-title>. <source>Surg Endosc</source>. <year>2025</year>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00464-025-12393-x">https://doi.org/10.1007/s00464-025-12393-x</ext-link>
</mixed-citation>
</ref>
<ref id="ref95">
<label>95</label>
<mixed-citation publication-type="journal">
<string-name><surname>Quero</surname> <given-names>G</given-names></string-name>, <string-name><surname>Mascagni</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kolbinger</surname> <given-names>FR</given-names></string-name>, <string-name><surname>Fiorillo</surname> <given-names>C</given-names></string-name>, <string-name><surname>De Sio</surname> <given-names>D</given-names></string-name>, <string-name><surname>Longo</surname> <given-names>F</given-names></string-name>, <etal>et al</etal>. <article-title>Artificial intelligence in colorectal cancer surgery: present and future perspectives</article-title>. <source>Cancers (Basel)</source>. <year>2022</year>;<volume>14</volume>(<issue>15</issue>):<fpage>3803</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/cancers14153803">https://doi.org/10.3390/cancers14153803</ext-link>
</mixed-citation>
</ref>
<ref id="ref96">
<label>96</label>
<mixed-citation publication-type="journal">
<string-name><surname>Siddarth</surname> <given-names>P</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Grzenda</surname> <given-names>A</given-names></string-name>, <string-name><surname>Cappelletti</surname> <given-names>M</given-names></string-name>, <string-name><surname>Oughli</surname> <given-names>H</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>C</given-names></string-name>, <etal>et al</etal>. <article-title>Inflammatory markers of geriatric depression response to Tai Chi or Health Education Adjunct Interventions</article-title>. <source>Am J Geriatr Psychiatry</source>. <year>2023</year>;<volume>31</volume>(<issue>1</issue>):<fpage>22</fpage>&#x2013;<lpage>32</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.jagp.2022.08.004">https://doi.org/10.1016/j.jagp.2022.08.004</ext-link>
</mixed-citation>
</ref>
<ref id="ref97">
<label>97</label>
<mixed-citation publication-type="journal">
<string-name><surname>Ajam Oughli</surname> <given-names>H</given-names></string-name>, <string-name><surname>Siddarth</surname> <given-names>P</given-names></string-name>, <string-name><surname>Lum</surname> <given-names>M</given-names></string-name>, <string-name><surname>Tang</surname> <given-names>L</given-names></string-name>, <string-name><surname>Ito</surname> <given-names>B</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <etal>et al</etal>. <article-title>Peripheral Alzheimer&#x2019;s disease biomarkers are related to change in subjective memory in older women with cardiovascular risk factors in a trial of yoga vs. memory training: lien &#x00E9;tabli entre les biomarqueurs p&#x00E9;riph&#x00E9;riques de la maladie d&#x2019;Alzheimer et l&#x2019;am&#x00E9;lioration de la m&#x00E9;moire subjective chez les femmes &#x00E2;g&#x00E9;es pr&#x00E9;sentant des facteurs de risque cardiovasculaire dans le cadre d&#x2019;un essai comparant le yoga &#x00E0; l&#x2019;entra&#x00EE;nement de la m&#x00E9;moire</article-title>. <source>Can J Psychiatry</source>. <year>2025</year>;<volume>0</volume>(<issue>0</issue>):<fpage>7067437251343291</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1177/07067437251343291">https://doi.org/10.1177/07067437251343291</ext-link>
</mixed-citation>
</ref>
<ref id="ref98">
<label>98</label>
<mixed-citation publication-type="journal">
<string-name><surname>Basereh</surname> <given-names>M</given-names></string-name>, <string-name><surname>Abikenari</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Sadeghzadeh</surname> <given-names>S</given-names></string-name>, <etal>et al</etal>. <article-title>ConvNeXt-driven detection of Alzheimer&#x2019;s disease: a benchmark study on expert-annotated alzaset mri dataset across anatomical planes</article-title>. <source>Diagnostics (Basel)</source>. <year>2025</year>;<volume>15</volume>(<issue>23</issue>):<fpage>2997</fpage>. Published November 25, 2025. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/diagnostics15232997">https://doi.org/10.3390/diagnostics15232997</ext-link>
</mixed-citation>
</ref>
<ref id="ref99">
<label>99</label>
<mixed-citation publication-type="journal">
<string-name><surname>Luo</surname> <given-names>X</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Eweje</surname> <given-names>F</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Yang</surname> <given-names>S</given-names></string-name>, <string-name><surname>Quinton</surname> <given-names>R</given-names></string-name>, <etal>et al</etal>. <article-title>Ensemble learning of foundation models for precision oncology</article-title>. <source>arXiv preprint</source>. <year>2025</year>. arXiv:2508.16085.</mixed-citation></ref>
<ref id="ref100">
<label>100</label>
<mixed-citation publication-type="journal">
<string-name><surname>Abikenari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>J</given-names></string-name>, <string-name><surname>Ha</surname> <given-names>JH</given-names></string-name>, <string-name><surname>Annagiri</surname> <given-names>S</given-names></string-name>, <string-name><surname>Himic</surname> <given-names>V</given-names></string-name>, <string-name><surname>Medikonda</surname> <given-names>R</given-names></string-name>, <etal>et al</etal>. <article-title>Emerging trends in cell-based therapies: contemporary advances and ethical considerations in translational neurosurgical oncology</article-title>. <source>J Neurooncol</source>. <year>2025</year>;<volume>175</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>20</lpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11060-025-05170-2">https://doi.org/10.1007/s11060-025-05170-2</ext-link>
</mixed-citation>
</ref>
<ref id="ref101">
<label>101</label>
<mixed-citation publication-type="journal">
<string-name><surname>Li</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>X</given-names></string-name>, <string-name><surname>Fang</surname> <given-names>D</given-names></string-name>, <string-name><surname>Luo</surname> <given-names>Y</given-names></string-name>. <article-title>Informing immunotherapy with multi-omics driven machine learning</article-title>. <source>NPJ Digit Med</source>. <year>2024</year>;<volume>7</volume>(<issue>1</issue>):<fpage>67</fpage>. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1038/s41746-024-01043-6">https://doi.org/10.1038/s41746-024-01043-6</ext-link>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>