<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2022.986089</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Oncology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Preoperative CT-based deep learning model for predicting overall survival in patients with high-grade serous ovarian cancer</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Zheng</surname>
<given-names>Yawen</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1978383"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Fang</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1682195"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Wenxia</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Yongmei</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yang</surname>
<given-names>Bo</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yang</surname>
<given-names>Xingsheng</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1317811"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Dong</surname>
<given-names>Taotao</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1898111"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Obstetrics and Gynecology, Qilu Hospital of Shandong University</institution>, <addr-line>Jinan</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Radiology, Qilu Hospital of Shandong University</institution>, <addr-line>Jinan</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Operating room, Qilu Hospital of Shandong University</institution>, <addr-line>Jinan</addr-line>, <country>China</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Radiology, Qingzhou People&#x2019;s Hospital</institution>, <addr-line>Qingzhou</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Gulisa Turashvili, Emory University, United States</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Chuanbo Xie, Sun Yat-sen University Cancer Center (SYSUCC), China; James Java, University of Rochester Medical Center, United States</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Xingsheng Yang, <email xlink:href="mailto:xingshengyang@sdu.edu.cn">xingshengyang@sdu.edu.cn</email>; Taotao Dong, <email xlink:href="mailto:stevendtt@163.com">stevendtt@163.com</email>
</p>
</fn>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work and share first authorship</p>
</fn>
<fn fn-type="other" id="fn002">
<p>This article was submitted to Gynecological Oncology, a section of the journal Frontiers in Oncology</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>09</day>
<month>09</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>12</volume>
<elocation-id>986089</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>07</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>18</day>
<month>08</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Zheng, Wang, Zhang, Li, Yang, Yang and Dong</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Zheng, Wang, Zhang, Li, Yang, Yang and Dong</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Purpose</title>
<p>High-grade serous ovarian cancer (HGSOC) is aggressive and has a high mortality rate. A Vit-based deep learning model was developed to predicting overall survival in HGSOC patients based on preoperative CT images.</p>
</sec>
<sec>
<title>Methods</title>
<p>734 patients with HGSOC were retrospectively studied at Qilu Hospital of Shandong University with preoperative CT images and clinical information. The whole dataset was randomly split into training cohort (n = 550) and validation cohort (n = 184). A Vit-based deep learning model was built to output an independent prognostic risk score, afterward, a nomogram was then established for predicting overall survival.</p>
</sec>
<sec>
<title>Results</title>
<p>Our Vit-based deep learning model showed promising results in predicting survival in the training cohort (AUC = 0.822) and the validation cohort (AUC = 0.823). The multivariate Cox regression analysis indicated that the image score was an independent prognostic factor in the training (HR = 9.03, 95% CI: 4.38, 18.65) and validation cohorts (HR = 9.59, 95% CI: 4.20, 21.92). Kaplan-Meier&#xa0;survival&#xa0;analysis indicates that the image score obtained from model yields promising prognostic significance to refine the risk stratification of patients with HGSOC, and the integrative nomogram achieved a C-index of 0.74 in the training cohort and 0.72 in the validation cohort.</p>
</sec>
<sec>
<title>Conclusions</title>
<p>Our model provides a non-invasive, simple, and feasible method to predicting overall survival in patients with HGSOC based on preoperative CT images, which could help predicting the survival prognostication and may facilitate clinical decision making in the era&#xa0;of&#xa0;individualized&#xa0;and&#xa0;precision&#xa0;medicine.</p>
</sec>
</abstract>
<kwd-group>
<kwd>ovarian cancer</kwd>
<kwd>survival prediction</kwd>
<kwd>deep learning</kwd>
<kwd>personalized model</kwd>
<kwd>nomogram</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="35"/>
<page-count count="10"/>
<word-count count="4002"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>In the gynecological field, with an estimated 184,799 deaths worldwide annually, ovarian cancer is one of the most common and deadliest tumors. Among them, high-grade serous carcinoma subtype (HGSOC) is the most aggressive form and accounts for&#xa0;the&#xa0;majority&#xa0;of&#xa0;mortality (<xref ref-type="bibr" rid="B1">1</xref>). Despite advances in HGSOC therapy, such as surgery, chemotherapy, targeted therapy&#xa0;and&#xa0;immunotherapy, the 5-year overall survival is still substantial, less than 50% (<xref ref-type="bibr" rid="B2">2</xref>). Currently, stratification of HGSOC risk is still based on the stage of International Federation of Gynecology and Obstetrics (FIGO) (<xref ref-type="bibr" rid="B3">3</xref>), but because of the spatial and temporal heterogeneity of tumors, clinical biomarkers provide only partial information (<xref ref-type="bibr" rid="B4">4</xref>).</p>
<p>Currently, with its capacity to visualize a cancer&#x2019;s appearance at a macroscopic level noninvasively, medical imaging contains more prognostic information for the primary tumor (<xref ref-type="bibr" rid="B5">5</xref>). Growing evidence suggests that computed tomography (CT) contains more mineable high-dimensional data which could promote personalized care and survival prediction in cancer patients (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B8">8</xref>). Despite the advantages that CT could quantify tumor shape and texture information, the existing hand-crafted feature engineering is difficult to extract the full intrinsic characteristics and prone to human biases (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B8">8</xref>).</p>
<p>In recent years, with the development of optimization techniques and the improvement in computing devices, deep learning is becoming a popular method in medical image analysis (<xref ref-type="bibr" rid="B9">9</xref>). Because of its unique ability to learn features from raw data, the intrinsic characteristics of images have been mined automatically, thus reducing information redundancy and aiding clinical decision (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B11">11</xref>). With the Transformer architecture, the Vision Transformer (ViT) has been shown to model long-range dependency among pixels and demonstrated the state-of-the-art (SOTA) performance in image classification task (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B13">13</xref>). The attention mechanism in ViT model allows integration of global information but not of the local information from the CT images, which could have a significant impact on classification tasks. Existing literature describes the use of the ViT model to predict medical targets, such as emphysema classification (<xref ref-type="bibr" rid="B14">14</xref>) and COVID-19 diagnosis (<xref ref-type="bibr" rid="B15">15</xref>).</p>
<p>In this study, we developed a Vit-based deep learning model to predicting overall survival in patients with HGSOC based on preoperative CT images. Instead of delineating precise tumor boundaries which often used in conventional radiomics methods, our model requires only a rectangle region of tumor, thus reducing the interobserver error and manual segmentation time. Moreover, integrating the prediction features learned from CT images, clinicopathological and hematological markers, we established a comprehensive nomogram aiming to provide a non-invasive individualized recurrence prediction model in HGSOC.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<title>Material and methods</title>
<sec id="s2_1">
<title>Patients</title>
<p>Ethical clearance of this retrospective study was obtained and the requirement for informed consent was waived. Preoperative CT images of 734 patients were collect from the Qilu Hospital of Shandong University. The whole dataset was randomly split into 75% for training cohort and 25% validation cohort, which were mutually exclusive. All patients were conducted every three months during the first three years after surgery, and every six months thereafter. The primary endpoint of this study was the occurrence of death, the median follow-up time was 35.6 months.</p>
<p>Our inclusion criteria of the data were as follows: (1) pathologically confirmed primary HGSOC; (2) primary debulking surgery was performed and clinical complete remission was achieved after treatment; and (3) available preoperative ultrasound Contrast-enhanced CT data. Our exclusion criteria were as follows: (1) incomplete&#xa0;clinical data&#xa0;(preoperative CA-125, age, FIGO stage, etc.) or survival data; and (2) unqualified CT images (e.g., motion artifacts).</p>
</sec>
<sec id="s2_2">
<title>CT image</title>
<p>For all patients, contrast-enhanced CT scanning was acquired at diagnosis. All the patients were examined using a multi-detector row spiral CT (MDCT) scanner (Philips Brilliance iCT) with the following scanning parameters: tube voltage, 120 kVp; tube current, automatic; beam pitch, 1; reconstruction thickness, 1mm; reconstruction interval, 1&#xa0;mm. Contrast-enhanced venous phase CT scan was used in this study. The contrast agent used was as follows: Ultravist 300, Bayer, Germany; contrast medium dose, weight (kg) &#xd7;1.2 mL; injection rate, 3 mL/sec. Scanning began 70 s after injection using a power injector.</p>
<p>CT examinations in this study were strictly performed in accordance with the principle of &#x201c;ALARA&#x201d; (i.e. as low as reasonable achievable). During the period of examination, patient was scanned in suspended respiration. The scanning area was from the symphysis pubis to the diaphragm. A radiologist (10+ years&#x2019; experience, Dr. Fang Wang) manually selected a rectangle region of interest (ROI) containing the entire tumor in all CT slices form 734 patients. If multiple tumor areas are observed in one CT slice, multiple ROIs will be selected. At last, 16517 tumor images were got for the deep learning model training.</p>
</sec>
<sec id="s2_3">
<title>Development of the deep learning model</title>
<p>We developed a Vit-based deep learning model to predicting overall survival in patients with HGSOC based on preoperative CT images, as shown in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>. The model included two parts: the Vit part and the RNN part. The Vit part comprises a linear embedding layer, a transformer encoder block, and a feature-learning layer. In this part, after scaling to the same size (384 * 384 pixels), all tumor images were fed into the Vit (<xref ref-type="bibr" rid="B13">13</xref>), resulting in a semantically rich feature representation. Then, a recurrent neural network (RNN) was used to integrate the feature representation for each patient and reported the final image score which indicating the individual death risk. This image score was used for overall survival prediction and to stratify patients into different risk groups.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>The framework of the proposed Vit-based model. For each patient, being fed tumor images, the deep learning model output an image score which represent the patient&#x2019;s survival probability. This framework includes two parts: the ViT part <bold>(A)</bold> learned features and the RNN part <bold>(B)</bold> integrated the feature representation for each patient and reported the final image score.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-12-986089-g001.tif"/>
</fig>
<p>The deep learning model was implemented based on the PyTorch (vision 1.10.2) and Python 3.8.&#xa0;A variety of data augmentation techniques were used to increase the variability of training data during pre-training, including random flipping, rotation, and translation. Initializing with ImageNet pretrained weights, we trained the model with 70% of the data in the training cohort and use the rest of the dataset as the test set to get the best-performing model with a batch size of 16. Finally, the model was evaluated on the validation cohort. The patch size is 16 * 16, minimization of the loss was achieved <italic>via</italic> stochastic gradient descent (SGD) using the Adam optimizer and learning rate 0.0001. We used 128 dimensional vectors for the state representation of the recurrent unit and the recurrent steps was 100.</p>
</sec>
<sec id="s2_4">
<title>Overall survival analysis</title>
<p>To evaluate the prognostic value of the image score, we built a predictive model involving multimodal features and parameters such as image score, age, tumor diameter, FIGO stage, preoperative CA-125, and tumor location based on the resulting coefficients from the multivariate Cox analysis in the training cohort, and further constructed a nomogram. Then, calibration curves were drawn to evaluate and validate the application ability of the nomogram performance. Besides, the&#xa0;3- and 5-year&#xa0;survival&#xa0;probability were predicted by the nomogram and both predicted probabilities and the observed probabilities were illustrated by calibration curves.</p>
</sec>
<sec id="s2_5">
<title>Statistical analysis</title>
<p>Clinical characteristics were compared between the training set and the validation set by using the Mann-Whitney U test, x<sup>2</sup> test, or chi-square test, as appropriate. The significance of correlation between two covariates were assessed by Pearson correlation test. Based on&#xa0;quartiles&#xa0;of image scores, patients were categorized into four groups for survival analysis. Survival curves for overall survival were estimated using the Kaplan&#x2013;Meier method, and comparisons of statistical significance were performed with the stratified log-rank test within each group. Multivariable analyses were performed using the Cox proportional hazards model. Harrell&#x2019;s concordance-index (C-Index) was used to measure the concordance between the DL-predicted death risk and the actual survival time. The receiver operating characteristic curve (AUC) was used to measure the discriminatory power of the model to predict survival.</p>
<p>All the statistical analyses were performed by using R software (version 3.6.3; <uri xlink:href="http://www.R-project.org">http://www.R-project.org</uri>) with packages rms, survival, survminer, and Hmisc. A P value&#xa0;of&#xa0;less than 0.05 was considered to indicate statistical significance.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Characteristics of patients</title>
<p>
<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref> summarized the clinical characteristics in training and validation cohorts. The median follow-up interval was 35.6 months (interquartile range, 20.7-58.6 months), and death was observed in 25.89% (190 of 734). No significant difference was observed between the training and validation cohorts with regard to age, tumor diameter, FIGO stage, preoperative CA-125, tumor location, and vital status.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Clinical characteristics of patients in training and validation cohorts.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left">&#xa0;</th>
<th valign="top" align="center">Total N = 734</th>
<th valign="top" align="center">Training cohort N = 550</th>
<th valign="top" align="center">Validation cohort N = 184</th>
<th valign="top" align="center">p-value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Age at diagnosis, mean (SD), y</td>
<td valign="top" align="center">52.36 (15.18)</td>
<td valign="top" align="center">52.59 (15.24)</td>
<td valign="top" align="center">51.75 (15.08)</td>
<td valign="top" align="center">0.54</td>
</tr>
<tr>
<td valign="top" align="left">Follow-up time, median (IQR), m</td>
<td valign="top" align="center">35.6 (20.7, 58.6)</td>
<td valign="top" align="center">35.0 (20.0, 57.4)</td>
<td valign="top" align="center">36.9 (21.8, 61.1)</td>
<td valign="top" align="center"/>
</tr>
<tr>
<td valign="top" align="left">Tumor diameter, mean (SD), mm</td>
<td valign="top" align="center">10.10 (6.04)</td>
<td valign="top" align="center">10.23 (6.10)</td>
<td valign="top" align="center">9.73 (5.87)</td>
<td valign="top" align="center">0.36</td>
</tr>
<tr>
<td valign="top" align="left">CA-125, mean (SD) , U/ml</td>
<td valign="top" align="center">1019.3 (1381.1)</td>
<td valign="top" align="center">1042.7 (1437.9)</td>
<td valign="top" align="center">949.5 (1196.2)</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">Tumor location, No. (%)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Unilateral</td>
<td valign="top" align="center">307 (41.83)</td>
<td valign="top" align="center">220 (40.00)</td>
<td valign="top" align="center">87 (47.28)</td>
<td valign="top" rowspan="2" align="center">0.10</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Bilateral</td>
<td valign="top" align="center">427 (58.17)</td>
<td valign="top" align="center">330 (60.00)</td>
<td valign="top" align="center">97 (52.72)</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">FIGO stage, No. (%)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;I</td>
<td valign="top" align="center">209 (28.47)</td>
<td valign="top" align="center">155 (28.18)</td>
<td valign="top" align="center">54 (29.35)</td>
<td valign="top" rowspan="4" align="center">0.94</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;II</td>
<td valign="top" align="center">77 (10.49)</td>
<td valign="top" align="center">59 (10.73)</td>
<td valign="top" align="center">18 (9.78)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;III</td>
<td valign="top" align="center">386 (52.59)</td>
<td valign="top" align="center">291 (52.91)</td>
<td valign="top" align="center">95 (51.63)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;IV</td>
<td valign="top" align="center">62 (8.44)</td>
<td valign="top" align="center">45 (8.18)</td>
<td valign="top" align="center">17 (9.24)</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">Vital status , No. (%)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Alive</td>
<td valign="top" align="center">544 (74.11)</td>
<td valign="top" align="center">412 (74.91)</td>
<td valign="top" align="center">132 (71.74)</td>
<td valign="top" rowspan="2" align="center">0.45</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Dead</td>
<td valign="top" align="center">190 (25.89)</td>
<td valign="top" align="center">138 (25.09)</td>
<td valign="top" align="center">52 (28.26)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The&#xa0;correlations between&#xa0;all clinical characteristics and the image score got from the deep learning model in training cohort and validation cohort are presented on the scatterplot matrix shown in <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>. Notably, the correlation matrix showed that the image score was not correlated with all clinical characteristics in validation cohort and was only weakly&#xa0;correlated&#xa0;with&#xa0;stage in training cohort. The results suggested that the image score obtained from our deep learning model was an independent prognostic factor for the survival time of patients with HGSOC.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Correlation matrix of clinical characteristics and the image score in training cohort <bold>(A)</bold> and validation cohort <bold>(B)</bold>. Values in this figure indicated the correlation coefficient of two corresponding variables. The colour and the size of the circles represent the strength of the correlation. Lack of color means no correlation.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-12-986089-g002.tif"/>
</fig>
</sec>
<sec id="s3_2">
<title>Performance of the proposed deep learning model</title>
<p>A Vit-based deep learning model using preoperative CT images was proposed in this study to predict overall survival in patients with HGSOC. The model framework is described in the Materials and Methods and shown in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>.</p>
<p>Our Vit-based deep learning model showed promising results in predicting survival, with an AUC of 0.822 (95% CI: 0.804&#x2013;0.858) in the training cohort and 0.823 (95% CI: 0.795&#x2013;0.862) in the validation cohort (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3A</bold>
</xref>). The sensitivity of the model was 85.2% in the training cohort and 83.7% in the validation cohort, while specificity was 72.4% in the training cohort and 69.5% in the validation cohort. According to the ROC curve, this&#xa0;model&#xa0;has a prognostic value exceeding that&#xa0;of&#xa0;FIGO stage and all clinical characters (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>). When we combined image score and clinical characters together, we found that the&#xa0;ROC&#xa0;score appeared similar to that of image score alone.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>The receiver operating characteristic curve (ROC) in training cohort <bold>(A)</bold> and validation cohort <bold>(B)</bold>. IS, image score; CC, clinical characters.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-12-986089-g003.tif"/>
</fig>
</sec>
<sec id="s3_3">
<title>Multivariable Cox regression analysis</title>
<p>As we mentioned above, the correlation matrix suggested that the image score obtained from our model was an independent prognostic factor for OS, we next performed the univariate and multivariate&#xa0;cox&#xa0;regression analyses to further characterize the association between the image score and survival (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>). The multivariate Cox regression analysis indicated that the image score and FIGO stage were independent prognostic factors in the training and validation cohorts. The multivariable-adjusted HRs of the image score were 9.03 (95% CI: 4.38, 18.65; p &lt; 0.001) in training cohort and 9.59 (95% CI: 4.20, 21.92; p &lt; 0.001) in validation cohort.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Univariable and Multivariable Analyses of Overall Survival in training and validation cohorts.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" colspan="5" align="left">Univariable Analysis</th>
</tr>
<tr>
<th valign="top" align="left"/>
<th valign="top" colspan="2" align="center">Training cohort</th>
<th valign="top" colspan="2" align="center">Validation cohort</th>
</tr>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="center">HR (95%CI)</th>
<th valign="top" align="center">p-value</th>
<th valign="top" align="center">HR (95%CI)</th>
<th valign="top" align="center">p-value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Age at diagnosis</td>
<td valign="top" align="center">1.03 (1.01-1.04)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">1.02 (1.00-1.03)</td>
<td valign="top" align="center">0.13</td>
</tr>
<tr>
<td valign="top" align="left">Tumor diameter</td>
<td valign="top" align="center">0.97 (0.94-1.0)</td>
<td valign="top" align="center">0.04</td>
<td valign="top" align="center">0.97 (0.93-1.02)</td>
<td valign="top" align="center">0.289</td>
</tr>
<tr>
<td valign="top" align="left">CA-125</td>
<td valign="top" align="center">1 (1-1)</td>
<td valign="top" align="center">0.04</td>
<td valign="top" align="center">1 (1-1)</td>
<td valign="top" align="center">&lt; 0.001</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">Side</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Unilateral</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Bilateral</td>
<td valign="top" align="center">1.65 (1.14-2.38)</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">2.37 (1.28-4.38)</td>
<td valign="top" align="center">0.006</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">FIGO stage</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;I</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;II</td>
<td valign="top" align="center">4.05 (2.06-7.97)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">2.86 (1.35-6.09)</td>
<td valign="top" align="center">0.006</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;III</td>
<td valign="top" align="center">5.34 (3.13-9.12)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">4.34 (2.48-7.60)</td>
<td valign="top" align="center">&lt; 0.001</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;IV</td>
<td valign="top" align="center">6.54 (3.45-12.42)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">5.25 (2.63-10.48)</td>
<td valign="top" align="center">&lt; 0.001</td>
</tr>
<tr>
<td valign="top" align="left">Image_score</td>
<td valign="top" align="center">7.8 (3.83-15.88)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">6.84 (3.07-15.27)</td>
<td valign="top" align="center">&lt; 0.001</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">
<bold>Multivariable Analysis</bold>
</td>
</tr>
<tr>
<td valign="top" align="left"/>
<td valign="top" colspan="2" align="center">
<bold>Training cohort</bold>
</td>
<td valign="top" colspan="2" align="center">
<bold>Validation cohort</bold>
</td>
</tr>
<tr>
<td valign="top" align="left"/>
<td valign="top" align="center">
<bold>HR (95%CI)</bold>
</td>
<td valign="top" align="center">
<bold>p-value</bold>
</td>
<td valign="top" align="center">
<bold>HR (95%CI)</bold>
</td>
<td valign="top" align="center">
<bold>p-value</bold>
</td>
</tr>
<tr>
<td valign="top" align="left">Age at diagnosis</td>
<td valign="top" align="center">1.01 (1.00-1.03)</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">1.02 (0.99-1.05)</td>
<td valign="top" align="center">0.12</td>
</tr>
<tr>
<td valign="top" align="left">Tumor diameter</td>
<td valign="top" align="center">1.00 (0.96-1.03)</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.99 (0.94-1.04)</td>
<td valign="top" align="center">0.64</td>
</tr>
<tr>
<td valign="top" align="left">CA-125</td>
<td valign="top" align="center">1.00 (1.00-1.00)</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">1.00 (1.00-1.00)</td>
<td valign="top" align="center">0.02</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">Side, No.(%)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Unilateral</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Bilateral</td>
<td valign="top" align="center">0.84 (0.06-1.27)</td>
<td valign="top" align="center">0.41</td>
<td valign="top" align="center">0.91 (0.45-1.81)</td>
<td valign="top" align="center">0.78</td>
</tr>
<tr>
<td valign="top" colspan="5" align="left">FIGO stage, No.(%)</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;I</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
<td valign="top" align="center">1.0 (referent)</td>
<td valign="top" align="center">referent</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;II</td>
<td valign="top" align="center">3.50 (1.75-7.00)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">1.23 (0.24-6.29)</td>
<td valign="top" align="center">0.81</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;III</td>
<td valign="top" align="center">5.52 (3.03-10.04)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">3.38 (1.30-8.83)</td>
<td valign="top" align="center">0.01</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;IV</td>
<td valign="top" align="center">5.60 (2.72-11.50)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">4.65 (1.38-15.73)</td>
<td valign="top" align="center">0.01</td>
</tr>
<tr>
<td valign="top" align="left">Image_score</td>
<td valign="top" align="center">9.03 (4.38-18.65)</td>
<td valign="top" align="center">&lt; 0.001</td>
<td valign="top" align="center">9.59 (4.20-21.92)</td>
<td valign="top" align="center">&lt; 0.001</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Though FIGO stage is an acknowledged risk factor for HGSOC survival, the Kaplan&#x2013;Meier survival analysis suggested that only patients with stage&#xa0;I disease had significantly better OS compared to patients with stage II/III/IV disease (p &lt; 0.001), the survival did not differ significantly between stage&#xa0;II, III&#xa0;and&#xa0;IV&#xa0;patient subgroups (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>). Therefore, considering that FIGO stage could not stratified patients exactly, we categorized patients into four groups based on the&#xa0;cut-off&#xa0;values of the image score. Significant discrimination between the patient survival of the four groups was observed in the two cohorts (all p &lt; 0.05, log-rank test) (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>).</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Kaplan-Meier survival curves according to tumor stages in training cohort <bold>(A)</bold> and validation cohort <bold>(C)</bold>. Kaplan-Meier survival curves based on risk stratification according to image score in training cohort <bold>(B)</bold> and validation cohort <bold>(D)</bold>. The shadow indicates the 95% confidence interval.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-12-986089-g004.tif"/>
</fig>
</sec>
<sec id="s3_4">
<title>Nomogram construction and validation</title>
<p>Finally, we constructed a nomogram for HGSOC survival prediction on the basis of the selected clinical characters and model-based image score (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5A</bold>
</xref>). By drawing a vertical line down to the axis labeled points, each covariate in the model was assigned a score. A 3-year or 5-year survival probability can be calculated by summing the total score and placing it on the total points scale. The C-index value was 0.74 in the training cohort and 0.72 in the validation cohort. Further, the calibration curves demonstrated high consistency between the nomogram-predicted 3-year and 5-year survival probabilities and the actual outcome in both the training and validation cohorts (<xref ref-type="fig" rid="f5">
<bold>Figures&#xa0;5B&#x2013;E</bold>
</xref>). Our&#xa0;above&#xa0;results&#xa0;revealed&#xa0;that the nomogram for had a high discriminative and calibrating power.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Generation and evaluation of nomogram. <bold>(A)</bold> A constructed nomogram for prognostic prediction of 3-year and 5-year overall survival for patients with HGSOC. <bold>(B, C)</bold> Calibration curves of 3-year and 5-year OS for HGSOC patients in the training cohort. <bold>(D, E)</bold> Calibration curves of 3-year and 5-year OS for HGSOC patients in the validation cohort. Dash line represents the ideal agreement, the red dots are calculated by bootstrapping.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-12-986089-g005.tif"/>
</fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<p>Our work proposed a novel and innovative deep learning framework for prognosis and prediction in HGSOC based on preoperative CT images. Combined Vision Transformer with RNNs, our model extracted an image score from preoperative CT images for each patient, which was then shown to be an independent prognostic factor for HGSOC (AUC is 0.822 in the training cohort and 0.823 in the validation cohort) and had prognostic value above other clinical characters. Then, incorporated clinical characters and model-based image score, a concise nomogram was performed for survival prediction and intended to serve as a practical guide to clinicians when recommending an appropriate management strategy for HGSOC patients.</p>
<p>In recent years, radiomics has grown rapidly as a medical research field due to the combination of radiographic images and data about clinical outcomes. In earlier radiomics studies, medical images are assessed visually by trained physicians for the purpose of detecting, characterizing, and monitoring diseases, which often relies on education and experience and can be subjective at times. Deep learning, however, excels at identifying complex patterns and can provide automated quantitative assessments in contrast to qualitative reasoning (<xref ref-type="bibr" rid="B16">16</xref>). More meaningfully, the feature representations automatically learned by deep learning techniques hinting at the substantial clinical relevance of many of these radiographic features (<xref ref-type="bibr" rid="B17">17</xref>). Preoperative CT examination is very necessary for patients with solid tumors and it may contain multiple anatomic and nonanatomic elements which may enhance the capacity to prognosticate (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>). In ovarian cancer, preoperative CT is an important means of staging and treatment decision (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B21">21</xref>), however, the prognostic information provided by manual&#xa0;observation is limited, we believe that through deep learning, more information about prognosis could be directly extracted and could constitute the prognostic index for patients with ovarian cancer. Several previous studies reported the efficacy of deep learning for the survival prediction of cancer on radiology images (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B22">22</xref>&#x2013;<xref ref-type="bibr" rid="B24">24</xref>), but most of them have used traditional CNNs. Wang et&#xa0;al. constructed a non-invasive recurrence prediction model based on CNN (<xref ref-type="bibr" rid="B8">8</xref>). Avesani et&#xa0;al. used a CNN as feature extractor to predict progression free survival and BRCA mutational status (<xref ref-type="bibr" rid="B25">25</xref>). Comprising 44,732 slides from 15,187 patients, Gabriele et&#xa0;al. developed a deep learning frame-work that combined CNNs with RNNs to diagnose prostate cancer, the semantically rich tile-level feature representations resulted from CNNs were then used in a RNN to integrate the information across the whole slide and report the final classification result (<xref ref-type="bibr" rid="B26">26</xref>). The AUC of this model was above 0.98 and its clinical application would allow pathologists to exclude 65&#x2013;75% of slides while retaining 100% sensitivity. Although in the field of medical image analysis, CNNs have been widely adopted, they have inherent limitations. CNN is good at focusing on extraction of local information, but this means the receptive field is limited and the global feature is hard to be captured.</p>
<p>In contrast, mainly based on self-attention mechanisms, the primary advantage of Transformer is its global receptive field and focus on the aggregation of global information. In the past few years, Transformer have been dominant in the natural language processing field and have been used in speech recognition (<xref ref-type="bibr" rid="B27">27</xref>), machine translation (<xref ref-type="bibr" rid="B28">28</xref>), and language modeling (<xref ref-type="bibr" rid="B29">29</xref>). More recently, to overcome these limitations of CNN in computer vision problems, equipped with the Transformer architecture, Vision Transformer (ViT) was proposed to model long-range dependency among pixels through the self-attention mechanism (<xref ref-type="bibr" rid="B12">12</xref>), and has been demonstrated the state-of-the-art (SOTA) performance in a variety of vision tasks including object detection (<xref ref-type="bibr" rid="B30">30</xref>), classification (<xref ref-type="bibr" rid="B13">13</xref>), segmentation (<xref ref-type="bibr" rid="B31">31</xref>), and so on. At present, in the field of cancer, there are several researches using ViT for classify tasks (<xref ref-type="bibr" rid="B32">32</xref>&#x2013;<xref ref-type="bibr" rid="B34">34</xref>) and cancer region detection and segmentation tasks (<xref ref-type="bibr" rid="B35">35</xref>). As far as we know, our study is the first to attempt to apply ViT for the survival prediction of HGSOC. Our proposed ViT-based model integrates the advantages of ViT and RNN, enabling the model to have overwhelming effect on survival prediction.</p>
<p>Although our model performed well, there are also several limitations. First, the CT images to construct the model were collected from only one manufacture, different CT scanners may lead to distinct image features. Second, since we still require manual tumor annotation (although only bounding box) on CT images, our model is not fully automated. Third, we lacked an additional cohort for&#xa0;external&#xa0;validation. Finally, we only focus on high-grade serous carcinoma subtype, further validation studies are warranted for other ovarian cancer subtypes. Hence, in future, a more general and more robust model which was trained by more data should be considered. And our research team is currently collecting more data from other medical centers to further validate and improve the current model. In addition to preoperative CT images and clinical information, whole slide images (WSIs) of hematoxylin and eosin-stained postoperative pathological slides were also collected to develop the multimodal model.</p>
<p>In conclusion, this study shows a new deep learning model which could output an independent prognostic risk score for predicting survival in patients with HGSOC through their preoperative CT images. Combined with other clinical characters, the score was used to construct a simple, yet not trivial nomogram, which would have potential as a useful tool in creating optimal&#xa0;individualized&#xa0;therapeutic approaches for HGSOC patients. Our study could help predicting the survival prognostication for HGSOC patients and may facilitate clinical decision making in the era of individualized and precision&#xa0;medicine.</p>
</sec>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material. Further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="s6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by Institutional Ethics committee of Qilu Hospital. The patients/participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>YZ: Conceptualization, methodology, and writing (original draft). FW: Conceptualization, image processing and methodology. WZ: Formal analysis, data curation and writing (review and editing). YL: Formal analysis and data curation. BY: Image processing and formal analysis. XY: Conceptualization, and supervision. TD: Conceptualization, project administration and funding acquisition. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>This study was funded by Innovation and Development Joint Funds of Natural Science Foundation of Shandong Province (ZR2021LZL009).</p>
</sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="web">
<person-group person-group-type="author">
<collab>World Health Organization International Agency for Research on Cancer</collab>
</person-group>. <article-title>IARC factsheet</article-title>, in: <source>Ovary</source> . Available at: <uri xlink:href="https://gco.iarc.fr/today/data/factsheets/cancers/25-Ovary-fact-sheet.pdf">https://gco.iarc.fr/today/data/factsheets/cancers/25-Ovary-fact-sheet.pdf</uri> (Accessed <access-date>February 4, 2022</access-date>).</citation>
</ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schulz</surname> <given-names>H</given-names>
</name>
<name>
<surname>Kuhn</surname> <given-names>C</given-names>
</name>
<name>
<surname>Hofmann</surname> <given-names>S</given-names>
</name>
<name>
<surname>Mayr</surname> <given-names>D</given-names>
</name>
<name>
<surname>Mahner</surname> <given-names>S</given-names>
</name>
<name>
<surname>Jeschke</surname> <given-names>U</given-names>
</name>
<etal/>
</person-group>. <article-title>Overall survival of ovarian cancer patients is determined by expression of galectins-8 and-9</article-title>. <source>International journal of molecular sciences</source> (<year>2018</year>) <volume>19</volume>(<issue>1</issue>):<fpage>323</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijms19010323</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rizzuto</surname> <given-names>I</given-names>
</name>
<name>
<surname>Stavraka</surname> <given-names>C</given-names>
</name>
<name>
<surname>Chatterjee</surname> <given-names>J</given-names>
</name>
<name>
<surname>Borley</surname> <given-names>J</given-names>
</name>
<name>
<surname>Hopkins</surname> <given-names>TG</given-names>
</name>
<name>
<surname>Gabra</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>Risk of ovarian cancer relapse score: A prognostic algorithm to predict relapse following treatment for advanced ovarian cancer</article-title>. <source>International Journal of Gynecologic Cancer</source> (<year>2015</year>) <volume>25</volume>(<issue>3</issue>):<page-range>416&#x2013;22</page-range>. doi: <pub-id pub-id-type="doi">10.1097/IGC.0000000000000361</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lambin</surname> <given-names>P</given-names>
</name>
<name>
<surname>Leijenaar</surname> <given-names>RT</given-names>
</name>
<name>
<surname>Deist</surname> <given-names>TM</given-names>
</name>
<name>
<surname>Peerlings</surname> <given-names>J</given-names>
</name>
<name>
<surname>Jong De</surname> <given-names>EE</given-names>
</name>
<name>
<surname>Timmeren Van</surname> <given-names>J</given-names>
</name>
<etal/>
</person-group>. <article-title>Radiomics: The bridge between medical imaging and personalized medicine</article-title>. <source>Nature reviews Clinical oncology</source> (<year>2017</year>) <volume>14</volume>(<issue>12</issue>):<page-range>749&#x2013;62</page-range>. doi: <pub-id pub-id-type="doi">10.1038/nrclinonc.2017.141</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jensen</surname> <given-names>GL</given-names>
</name>
<name>
<surname>Yost</surname> <given-names>CM</given-names>
</name>
<name>
<surname>Mackin</surname> <given-names>DS</given-names>
</name>
<name>
<surname>Fried</surname> <given-names>DV</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>S</given-names>
</name>
<name>
<surname>Court</surname> <given-names>LE</given-names>
</name>
<etal/>
</person-group>. <article-title>Prognostic value of combining a quantitative image feature from positron emission tomography with clinical factors in oligometastatic non-small cell lung cancer</article-title>. <source>Radiotherapy and Oncology</source> (<year>2018</year>) <volume>126</volume>(<issue>2</issue>):<page-range>362&#x2013;7</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.radonc.2017.11.006</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Yuan</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>W</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Li</surname> <given-names>T</given-names>
</name>
<etal/>
</person-group>. <article-title>Predicting peritoneal recurrence and disease-free survival from CT images in gastric cancer with multitask deep learning: A retrospective study</article-title>. <source>Lancet Digit Health</source> (<year>2022</year>) <volume>4</volume>(<issue>5</issue>):<page-range>e340&#x2013;50</page-range>. doi: <pub-id pub-id-type="doi">10.1016/S2589-7500(22)00040-1</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>H</given-names>
</name>
<name>
<surname>Goo</surname> <given-names>JM</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>KH</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>YT</given-names>
</name>
<name>
<surname>Park</surname> <given-names>CM</given-names>
</name>
</person-group>. <article-title>Preoperative CT-based deep learning model for predicting disease-free survival in patients with lung adenocarcinomas</article-title>.  <source>Radiology</source>(<year>2020</year>) <volume>296</volume>(<issue>1</issue>):<page-range>216&#x2013;24</page-range>. doi: <pub-id pub-id-type="doi">10.1148/radiol.2020192764</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Rong</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>B</given-names>
</name>
<name>
<surname>Bai</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>W</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning provides a new computed tomography-based prognostic biomarker for recurrence prediction in high-grade serous ovarian cancer</article-title>. <source>Radiotherapy and Oncology</source> (<year>2019</year>) <volume>132</volume>:<page-range>171&#x2013;7</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.radonc.2018.10.019</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yin</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>W</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>J</given-names>
</name>
<name>
<surname>Quan</surname> <given-names>G</given-names>
</name>
<etal/>
</person-group>. <article-title>Domain progressive 3D residual convolution network to improve low-dose CT imaging</article-title>. <source>IEEE transactions on medical imaging</source> (<year>2019</year>) <volume>38</volume>(<issue>12</issue>):<page-range>2903&#x2013;13</page-range>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2019.2917258</pub-id>
</citation>
</ref>
<ref id="B10">
<label>10</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Parekh</surname> <given-names>VS</given-names>
</name>
<name>
<surname>Jacobs</surname> <given-names>MA</given-names>
</name>
</person-group>. <article-title>Deep learning and radiomics in precision medicine</article-title>. <source>Expert review of precision medicine and drug developmen</source> (<year>2019</year>) <volume>4</volume>(<issue>2</issue>):<fpage>59</fpage>&#x2013;<lpage>72</lpage>. d. development. doi: <pub-id pub-id-type="doi">10.1080/23808993.2019.1585805</pub-id>
</citation>
</ref>
<ref id="B11">
<label>11</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kermany</surname> <given-names>DS</given-names>
</name>
<name>
<surname>Goldbaum</surname> <given-names>M</given-names>
</name>
<name>
<surname>Cai</surname> <given-names>W</given-names>
</name>
<name>
<surname>Valentim</surname> <given-names>CCS</given-names>
</name>
<name>
<surname>Liang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Baxter</surname> <given-names>SL</given-names>
</name>
<etal/>
</person-group>. <article-title>Identifying medical diagnoses and treatable diseases by image-based deep learning</article-title>. <source>Cell</source> (<year>2018</year>) <volume>172</volume>(<issue>5</issue>):<fpage>1122</fpage>&#x2013;<lpage>1131. e9</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cell.2018.02.010</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Vaswani</surname> <given-names>A</given-names>
</name>
<name>
<surname>Shazeer</surname> <given-names>N</given-names>
</name>
<name>
<surname>Parmar</surname> <given-names>N</given-names>
</name>
<name>
<surname>Uszkoreit</surname> <given-names>J</given-names>
</name>
<name>
<surname>Jones</surname> <given-names>L</given-names>
</name>
<name>
<surname>Gomez</surname> <given-names>AN</given-names>
</name>
<etal/>
</person-group>. <source>Attention is all you need</source>. <article-title>Advances in neural information processing systems</article-title> (<year>2017</year>). p. <fpage>30</fpage>.</citation>
</ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dosovitskiy</surname> <given-names>A</given-names>
</name>
<name>
<surname>Beyer</surname> <given-names>L</given-names>
</name>
<name>
<surname>Kolesnikov</surname> <given-names>A</given-names>
</name>
<name>
<surname>Weissenborn</surname> <given-names>D</given-names>
</name>
<name>
<surname>Zhai</surname> <given-names>X</given-names>
</name>
<name>
<surname>Unterthiner</surname> <given-names>T</given-names>
</name>
<etal/>
</person-group>. <article-title>An image is worth 16x16 words: Transformers for image recognition at scale</article-title>. arXiv preprint arXiv:2010.11929 (<year>2020</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2010.11929</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>S</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Xia</surname> <given-names>S</given-names>
</name>
<name>
<surname>Yao</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Qian</surname> <given-names>WJPiM</given-names>
</name>
<etal/>
</person-group>. <article-title>A vision transformer for emphysema classification using CT images</article-title>. <source>Physics in Medicine &amp; Biology</source> (<year>2021</year>) <volume>66</volume>(<issue>24</issue>):<fpage>245016</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1361-6560/ac3dc8</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname> <given-names>S</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>G</given-names>
</name>
<name>
<surname>Oh</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Seo</surname> <given-names>JB</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>SM</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>JH</given-names>
</name>
<etal/>
</person-group>. <article-title>Multi-task vision transformer using low-level chest X-ray feature corpus for COVID-19 diagnosis and severity quantification</article-title>. <source>Medical Image Analysis</source> (<year>2022</year>) <volume>75</volume>:<fpage>102299</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2021.102299</pub-id>
</citation>
</ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hosny</surname> <given-names>A</given-names>
</name>
<name>
<surname>Parmar</surname> <given-names>C</given-names>
</name>
<name>
<surname>Quackenbush</surname> <given-names>J</given-names>
</name>
<name>
<surname>Schwartz</surname> <given-names>LH</given-names>
</name>
<name>
<surname>Aerts</surname> <given-names>H</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in radiology</article-title>. (<year>2018</year>) <volume>18</volume>(<issue>8</issue>):<page-range>500&#x2013;10</page-range>. doi: <pub-id pub-id-type="doi">10.1038/s41568-018-0016-5</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Litjens</surname> <given-names>G</given-names>
</name>
<name>
<surname>Kooi</surname> <given-names>T</given-names>
</name>
<name>
<surname>Bejnordi</surname> <given-names>BE</given-names>
</name>
<name>
<surname>Setio</surname> <given-names>AAA</given-names>
</name>
<name>
<surname>Ciompi</surname> <given-names>F</given-names>
</name>
<name>
<surname>Ghafoorian</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>A survey on deep learning in medical image analysis</article-title>. <source>Medical image analysis</source> (<year>2017</year>) <volume>42</volume>:<fpage>60</fpage>&#x2013;<lpage>88</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id>
</citation>
</ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sali</surname> <given-names>L</given-names>
</name>
<name>
<surname>Falchini</surname> <given-names>M</given-names>
</name>
<name>
<surname>Taddei</surname> <given-names>A</given-names>
</name>
<name>
<surname>Mascalchi</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>Role of preoperative CT colonography in patients with colorectal cancer</article-title>. <source>World Journal of Gastroenterology: WJG</source> (<year>2014</year>) <volume>20</volume>(<issue>14</issue>):<fpage>3795</fpage>. doi: <pub-id pub-id-type="doi">10.3748/wjg.v20.i14.3795</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heidenreich</surname> <given-names>A</given-names>
</name>
<name>
<surname>Ravery</surname> <given-names>VJ</given-names>
</name>
</person-group>. <article-title>Preoperative imaging in renal cell cancer</article-title>. <source>World journal of urology</source> (<year>2004</year>) <volume>22</volume>(<issue>5</issue>):<page-range>307&#x2013;15</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s00345-004-0411-2</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Avesani</surname> <given-names>G</given-names>
</name>
<name>
<surname>Arshad</surname> <given-names>M</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>H</given-names>
</name>
<name>
<surname>Fotopoulou</surname> <given-names>C</given-names>
</name>
<name>
<surname>Cannone</surname> <given-names>F</given-names>
</name>
<name>
<surname>Melotti</surname> <given-names>R</given-names>
</name>
<etal/>
</person-group>. <article-title>Radiological assessment of peritoneal cancer index on preoperative CT in ovarian cancer is related to surgical outcome and survival</article-title>. <source>La radiologia medica</source> (<year>2020</year>) <volume>125</volume>(<issue>8</issue>):<page-range>770&#x2013;6</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s11547-020-01170-6</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferrandina</surname> <given-names>G</given-names>
</name>
<name>
<surname>Sallustio</surname> <given-names>G</given-names>
</name>
<name>
<surname>Fagotti</surname> <given-names>A</given-names>
</name>
<name>
<surname>Vizzielli</surname> <given-names>G</given-names>
</name>
<name>
<surname>Paglia</surname> <given-names>A</given-names>
</name>
<name>
<surname>Cucci</surname> <given-names>E</given-names>
</name>
<etal/>
</person-group>. <article-title>Role of CT scan-based and clinical evaluation in the preoperative prediction of optimal cytoreduction in advanced ovarian cancer: A prospective trial</article-title>. <source>British Journal of Cance</source> (<year>2009</year>) <volume>101</volume>(<issue>7</issue>):<page-range>1066&#x2013;73</page-range>. doi: <pub-id pub-id-type="doi">10.1038/sj.bjc.6605292</pub-id>
</citation>
</ref>
<ref id="B22">
<label>22</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Korfiatis</surname> <given-names>P</given-names>
</name>
<name>
<surname>Kline</surname> <given-names>TL</given-names>
</name>
<name>
<surname>Lachance</surname> <given-names>DH</given-names>
</name>
<name>
<surname>Parney</surname> <given-names>IF</given-names>
</name>
<name>
<surname>Buckner</surname> <given-names>JC</given-names>
</name>
<name>
<surname>Erickson</surname> <given-names>BJ</given-names>
</name>
<etal/>
</person-group>. <article-title>Residual deep convolutional neural network predicts MGMT methylation status</article-title>. (<year>2017</year>) <volume>30</volume>(<issue>5</issue>):<page-range>622&#x2013;8</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s10278-017-0009-z</pub-id>
</citation>
</ref>
<ref id="B23">
<label>23</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>HS</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>YJ</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>KG</given-names>
</name>
<name>
<surname>Park</surname> <given-names>JS</given-names>
</name>
</person-group>. <article-title>Preoperative CT texture features predict prognosis after curative resection in pancreatic cancer</article-title>. <source>Scientific reports</source> (<year>2019</year>) <volume>9</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-019-53831-w</pub-id>
</citation>
</ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Li</surname> <given-names>X</given-names>
</name>
<name>
<surname>Li</surname> <given-names>XY</given-names>
</name>
<name>
<surname>Huo</surname> <given-names>JW</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>FJ</given-names>
</name>
<name>
<surname>Luo</surname> <given-names>TY</given-names>
</name>
<etal/>
</person-group>. <article-title>Spectral CT in lung cancer: usefulness of iodine concentration for evaluation of tumor angiogenesis and prognosis</article-title>. <source> American Journal of Roentgenology</source> (<year>2020</year>) <volume>215</volume>(<issue>3</issue>):<fpage>595</fpage>&#x2013;<lpage>602</lpage>. doi: <pub-id pub-id-type="doi">10.2214/AJR.19.22688</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Avesani</surname> <given-names>G</given-names>
</name>
<name>
<surname>Tran</surname> <given-names>HE</given-names>
</name>
<name>
<surname>Cammarata</surname> <given-names>G</given-names>
</name>
<name>
<surname>Botta</surname> <given-names>F</given-names>
</name>
<name>
<surname>Raimondi</surname> <given-names>S</given-names>
</name>
<name>
<surname>Russo</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>CT-based radiomics and deep learning for BRCA mutation and progression-free survival prediction in ovarian cancer using a multicentric dataset</article-title>. <source>Cancers</source> (<year>2022</year>) <volume>14</volume>(<issue>11</issue>):<fpage>2739</fpage>. doi: <pub-id pub-id-type="doi">10.3390/cancers14112739</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Campanella</surname> <given-names>G</given-names>
</name>
<name>
<surname>Hanna</surname> <given-names>MG</given-names>
</name>
<name>
<surname>Geneslaw</surname> <given-names>L</given-names>
</name>
<name>
<surname>Miraflor</surname> <given-names>A</given-names>
</name>
<name>
<surname>Silva Krauss Werneck</surname> <given-names>V</given-names>
</name>
<name>
<surname>Busam</surname> <given-names>KJ</given-names>
</name>
<etal/>
</person-group>. <article-title>Clinical-grade computational pathology using weakly supervised deep learning on whole slide images</article-title>. (<year>2019</year>) <volume>25</volume>(<issue>8</issue>):<page-range>1301&#x2013;9</page-range>. doi: <pub-id pub-id-type="doi">10.1038/s41591-019-0508-1</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>N</given-names>
</name>
<name>
<surname>Watanabe</surname> <given-names>S</given-names>
</name>
<name>
<surname>Villalba</surname> <given-names>J</given-names>
</name>
<name>
<surname>&#x17b;elasko</surname> <given-names>P</given-names>
</name>
<name>
<surname>Dehak</surname> <given-names>NJISPL</given-names>
</name>
</person-group>. <article-title>Non-autoregressive transformer for speech recognition</article-title>. (<year>2020</year>) <volume>28</volume>:<page-range>121&#x2013;5</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/LSP.2020.3044547</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Li</surname> <given-names>B</given-names>
</name>
<name>
<surname>Xiao</surname> <given-names>T</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Li</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wong</surname> <given-names>DF</given-names>
</name>
<etal/>
</person-group>. <article-title>Learning deep transformer models for machine translation</article-title>. arXiv preprint arXiv:1906.01787 (<year>2019</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.1906.01787</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dai</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Carbonell</surname> <given-names>J</given-names>
</name>
<name>
<surname>Salakhutdinov Le</surname> <given-names>RJapa QV</given-names>
</name>
<etal/>
</person-group>. <article-title>Transformer-xl: Attentive language models beyond a fixed-length context</article-title>. (<year>2019</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.1901.02860</pub-id>
</citation>
</ref>
<ref id="B30">
<label>30</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Su</surname> <given-names>W</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>L</given-names>
</name>
<name>
<surname>Li</surname> <given-names>B</given-names>
</name>
<name>
<surname>Dai Wang</surname> <given-names>JJapa X</given-names>
</name>
<etal/>
</person-group>. <article-title>Deformable detr: Deformable transformers for end-to-end object detection</article-title>. arXiv preprint arXiv:2010.04159 (<year>2020</year>). 10.48550/arXiv.2010.04159</citation>
</ref>
<ref id="B31">
<label>31</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zheng</surname> <given-names>S</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>H</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Luo</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers</article-title>. In: <source>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition</source> (<year>2021</year>) <volume>2021</volume>:<page-range>6881&#x2013;90</page-range>.</citation>
</ref>
<ref id="B32">
<label>32</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Gao</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Hong</surname> <given-names>B</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Jia</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>J</given-names>
</name>
<etal/>
</person-group>. <article-title>Instance-based vision transformer for subtyping of papillary renal cell carcinoma in histopathological image</article-title>. In: <source>International conference on medical image computing and computer-assisted intervention</source>. <publisher-loc>Springer, Cham</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2021</year>).</citation>
</ref>
<ref id="B33">
<label>33</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pan</surname> <given-names>L</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>L</given-names>
</name>
<name>
<surname>Ji</surname> <given-names>B</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>M</given-names>
</name>
<name>
<surname>Chongcheawchamnan</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>Noise-reducing attention cross fusion learning transformer for histological image classification of osteosarcoma</article-title>. (<year>2022</year>) <volume>77</volume>:<fpage>103824</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.bspc.2022.103824</pub-id>
</citation>
</ref>
<ref id="B34">
<label>34</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ikromjanov</surname> <given-names>K</given-names>
</name>
<name>
<surname>Bhattacharjee</surname> <given-names>S</given-names>
</name>
<name>
<surname>Hwang</surname> <given-names>Y-B</given-names>
</name>
<name>
<surname>Sumon</surname> <given-names>RI</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>H-C</given-names>
</name>
<name>
<surname>Choi</surname> <given-names>H-K</given-names>
</name>
<etal/>
</person-group>. <article-title>Whole slide image analysis and detection of prostate cancer using vision transformers</article-title>. In: <source>2022 international conference on artificial intelligence in information and communication (ICAIIC)</source>. <publisher-name>IEEE</publisher-name> (<year>2022</year>).</citation>
</ref>
<ref id="B35">
<label>35</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sui</surname> <given-names>D</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>K</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>W</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>J</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>X</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>Z</given-names>
</name>
<etal/>
</person-group>. <article-title>Cst: A multitask learning framework for colorectal cancer region mining based on transformer</article-title>. <source>BioMed Research International</source> (<year>2021</year>) <volume>2021</volume>:<fpage>6207964</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2021/6207964</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>