<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Genet.</journal-id>
<journal-title>Frontiers in Genetics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Genet.</abbrev-journal-title>
<issn pub-type="epub">1664-8021</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1119990</article-id>
<article-id pub-id-type="doi">10.3389/fgene.2023.1119990</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Genetics</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Intelligent diagnosis value of preoperative T staging of colorectal cancer based on MR medical imaging</article-title>
<alt-title alt-title-type="left-running-head">Wang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fgene.2023.1119990">10.3389/fgene.2023.1119990</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Junqing</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Bingqian</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhu</surname>
<given-names>Jing</given-names>
</name>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Junfeng</given-names>
</name>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Jiang</surname>
<given-names>Rui</given-names>
</name>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2133689/overview"/>
</contrib>
</contrib-group>
<aff>
<institution>Department of Radiodiagnosis</institution>, <institution>General Hospital of Western Warfare Zone</institution>, <addr-line>Chengdu</addr-line>, <addr-line>Sichuan</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1400481/overview">Deepak Kumar Jain</ext-link>, Chongqing University of Posts and Telecommunications, China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1867033/overview">Youbao Ma</ext-link>, Zhejiang University of Technology, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1868431/overview">Yanmin Wu</ext-link>, Zhengzhou University of Light Industry, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1647021/overview">Jun Zheng</ext-link>, Baotou Teachers&#x2019; College, China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Rui Jiang, <email>192931103@st.usst.edu.cn</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Computational Genomics, a section of the journal Frontiers in Genetics</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>16</day>
<month>02</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>14</volume>
<elocation-id>1119990</elocation-id>
<history>
<date date-type="received">
<day>09</day>
<month>12</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Wang, Chen, Zhu, Zhang and Jiang.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Wang, Chen, Zhu, Zhang and Jiang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Colorectal cancer is a common malignant tumor in clinic. With the change of people's diet, living environment and living habits, the incidence of colorectal cancer has risen sharply in recent years, which poses a great threat to people's health and quality of life. This paper aims to investigate the pathogenesis of colorectal cancer and improve the efficiency of clinical diagnosis and treatment. This paper firstly introduces MR Medical imaging technology and related theories of colorectal cancer through literature survey, and then applies MR technology to preoperative T staging of colorectal cancer. 150 patients with colorectal cancer admitted to our hospital every month from January 2019 to January 2020 were used as research objects to carry out the application experiment of MR Medical imaging in the intelligent diagnosis of preoperative T staging of colorectal cancer, and to explore the diagnostic sensitivity, specificity and histopathological T staging diagnosis coincidence rate of MR Staging. The final study results showed that there was no statistical significance in the general data of stage T1-2, T3 and T4 patients (p &#x003e; 0.05); for patients with preoperative T stage of colorectal cancer, the overall diagnosis coincidence rate of MR Was 89.73%, indicating that it was highly consistent with pathological T stage; compared with MR Staging, the overall diagnosis coincidence rate of CT for preoperative T staging of colorectal cancer patients was 86.73%, which was basically consistent with the diagnosis of pathological T staging. At the same time, three different dictionary learning depth techniques are proposed in this study to solve the shortcomings of long MR Scanning time and slow imaging speed. Through performance testing and comparison, it is found that the structural similarity of MR Image reconstructed by depth dictionary method based on convolutional neural network is up to 99.67%, higher than that of analytic dictionary and synthetic dictionary, which proves that it has the best optimization effect on MR Technology. The study indicated the importance of MR Medical imaging in preoperative T staging diagnosis of colorectal cancer and the necessity of its popularization.</p>
</abstract>
<kwd-group>
<kwd>medical imaging technology</kwd>
<kwd>magnetic resonance imaging</kwd>
<kwd>colorectal cancer</kwd>
<kwd>preoperative T staging</kwd>
<kwd>intelligent diagnosis and treatment</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<sec id="s1-1">
<title>1.1 Background and meaning</title>
<p>The recent changes in people&#x2019;s diet, living environment, and living habits have also caused a series of malignant diseases such as colorectal cancer. Colorectal cancer is a malignant tumor disease common in middle-aged and elderly people. It is a type of disease in the digestive system and has a very high incidence in our country and the world. Because its early symptoms are hidden, it is easy to miss early diagnosis and treatment. Most patients have reached the middle and late stages when they are diagnosed, so they have a higher mortality and disability rate. Nowadays, MR medical imaging technology is widely used in clinical practice and has achieved good results (<xref ref-type="bibr" rid="B23">Xiao and Ding, 2019</xref>). This article&#x2019;s goal is to examine the value of MR medical imaging in the accurate diagnosis of colorectal cancer&#x2019;s preoperative T staging and to discover a successful method for early detection and treatment of colorectal cancer. Improving the diagnosis rate of patients will also improve their quality of life.</p>
</sec>
<sec id="s1-2">
<title>1.2 Related work</title>
<p>The incidence of early colorectal cancer is insidious, the clinical symptoms are not prominent, and there are many uncertainties with the increase of cancer. Because of the great harm of colorectal cancer to humans (<xref ref-type="bibr" rid="B28">Zhu et al., 2020</xref>), colorectal cancer has been studied very early, and many methods of diagnosis and treatment of colorectal cancer have been explored. So J S, Cheong C, and Oh S Y once stated that for patients with colorectal cancer, preoperative staging and application of various imaging techniques are of great significance to formulating treatment plans and predicting prognosis. For this reason, they discussed the use of CT technology to diagnose colorectal cancer (<xref ref-type="bibr" rid="B20">So et al., 2017</xref>). Xu Jiayi, Wang Jinkai and Zhou Lu discussed the value of serum C-reactive protein (CRP), sugar chain antigen 19-9 (CA19-9) and carcinoembryonic antigen (CEA) in the preoperative diagnosis of colorectal cancer (<xref ref-type="bibr" rid="B24">Xu et al., 2017</xref>). Ma K investigated the use of CT in the treatment of common malignant tumors including lung and colorectal cancer. They emphasized that while CT technology does not significantly contribute to the management of cholangiocarcinoma, it is helpful in the detection and management of colorectal cancer (<xref ref-type="bibr" rid="B12">Ma et al., 2018</xref>). Jaramillo FA and Daniel Upegui Jim&#xe9;nez proposed that CT colorectal cancer is the fourth leading cause of death in the world and the fifth leading cause of cancer death in Colombia. They believe that MRI is an ideal method to evaluate colorectal cancer, especially for screening, because it can be staged by determining the degree of invasion of the muscle layer and adjacent organs, which is useful for determining candidates for chemotherapy or preoperative radiotherapy and planning surgery procedure is crucial (<xref ref-type="bibr" rid="B9">Jaramillo and Upegui Jim&#xe9;nez, 2016</xref>). In addition, Park SH et al. stated that the preoperative colorectal tumor location is essential for proper resection and treatment planning. In response to the low positioning accuracy of traditional colonoscopy, they proposed to develop several new positioning techniques. They reviewed the tumor localization error rates of several preoperative endoscopic techniques, combined information about localization errors and risk factors for surgery-related adverse events, and concluded an effective method for accurately localizing colorectal tumors (<xref ref-type="bibr" rid="B14">Park et al., 2017</xref>). It can be seen from the above research results that although there are various methods for the diagnosis and treatment of colorectal cancer, there are still relatively few studies using MR medical imaging technology to intelligently diagnose the preoperative staging of colorectal cancer. Therefore, this article attempts to use this technology to explore the clinical intelligent diagnosis and treatment of colorectal cancer, with a view to adding a new treatment method to clinical treatment.</p>
</sec>
<sec id="s1-3">
<title>1.3 Innovations in this article</title>
<p>The innovations of this article are mainly reflected in the following aspects: 1) A malignant gastrointestinal tumor, colorectal cancer is exceedingly dangerous to people&#x2019;s health and has a negative impact on patients&#x2019; quality of life. The clinical aspects of colorectal cancer are covered in this article. Methods of diagnosis and treatment greatly aid in the improvement of patient quality of life, the rate of early diagnosis, and the social practical importance and value; 2) The use of MR medical imaging technologies in the preoperative T staging of colorectal cancer is explored in this study. It is also suggested to apply deep learning algorithms to address the shortcomings of sluggish imaging speed and lengthy MR scanning times, which is crucial for enhancing and optimizing the performance of MR technology and enhancing its use in clinical diagnosis.</p>
</sec>
</sec>
<sec id="s2">
<title>2 MR medical imaging technology and its intelligent diagnostic value for preoperative T staging of colorectal cancer</title>
<sec id="s2-1">
<title>2.1 MR medical imaging</title>
<p>
<list list-type="simple">
<list-item>
<p>(1) MR</p>
</list-item>
</list>
</p>
<p>MR, or magnetic resonance, is a physical phenomenon related to the gravitational theory of magnetic fields (<xref ref-type="bibr" rid="B10">Joany and Logashanmugam, 2018</xref>). The principle is to combine the externally applied radio frequency energy field with the proton energy field of the human body. In a strong magnetic field environment, the proton can be thought of as a spin-nucleus system that can reflect certain aspects of the system by absorbing the externally delivered matching radio frequency energy field (<xref ref-type="bibr" rid="B21">Song et al., 2017</xref>; <xref ref-type="bibr" rid="B5">Guo et al., 2019</xref>).<list list-type="simple">
<list-item>
<p>(2) MRI</p>
</list-item>
</list>
</p>
<p>MRI, is magnetic resonance imaging, that is, magnetic resonance medical image. It is a type of magnetic resonance imaging-based imaging technology used for clinical disease diagnosis and treatment. When protons in the human body or the spin-nucleus system are magnetized to form a macroscopic magnetization vector, they will be driven by the radio frequency field. After being forced to return to its equilibrium position, the magnetization vector will keep circling the spin-nucleus system, eventually forming a closed coil. The coil will generate a magnetic resonance signal as a result of the nuclear system&#x2019;s steady rotation. Since this kind of magnetic resonance signal cannot distinguish between the various positions of the spin nucleus, gradient fields must be applied in different directions of the spin nucleus. Through independent marking of its spatial position information during rotation, the spin nucleus is able to do so (<xref ref-type="bibr" rid="B2">Ali et al., 2019</xref>; <xref ref-type="bibr" rid="B7">Hoshino et al., 2019</xref>).<list list-type="simple">
<list-item>
<p>(3) Three-dimensional modeling of MR medical images</p>
</list-item>
</list>
</p>
<p>MR medical imaging uses the gravitational force of a magnetic field to treat the protons of the human body as a spin nucleus system, and absorbs the externally applied matching RF power field to generate magnetic resonance signals, creating a two-dimensional image in the process. The spatial position of the MR image will continue to consistently correspond to distinct pixel coordinates because of the nuclear system&#x2019;s continual rotation (<xref ref-type="bibr" rid="B27">Yao et al., 2016</xref>; <xref ref-type="bibr" rid="B16">Pradhan et al., 2020</xref>). The patient&#x2019;s lesion location is eventually localized in three dimensions using a three-dimensional picture. Transverse, coronal, sagittal, and any other cross-sectional images of the human body are included in general MR three-dimensional medical imaging (<xref ref-type="bibr" rid="B1">Acuna et al., 2017</xref>).<list list-type="simple">
<list-item>
<p>(4) MR image processing</p>
</list-item>
</list>
</p>
<p>Before they can be used effectively, medical photographs typically need to be cleaned, denoised, improved, and subject to other processes. This is because, during the picture capture process, the image will be susceptible to varying degrees of external interference, and the final image will be fuzzy or noisy (<xref ref-type="bibr" rid="B11">Lu and Wang, 2018</xref>). The magnetic field of the human body interacts with the radio waves of the MRI scanner to produce magnetic resonance images. Magnetic resonance signals are produced when the protons in the human body interact with radio wave energy. The coordinates of the various places of the tissues being investigated are represented by these signals. A tissue image of the human body is created by the fusion of many locations (<xref ref-type="bibr" rid="B18">Shakeel et al., 2020</xref>; <xref ref-type="bibr" rid="B25">Xu et al., 2022</xref>). We frequently employ image data processing technology for picture processing, and the procedure is as follows:<list list-type="simple">
<list-item>
<p>1) Image denoising and filtering. There are two widely used algorithms: the top-hat transform algorithm and the dual-tree complex wavelet transform algorithm.</p>
</list-item>
</list>
</p>
<p>The dual-tree complex wavelet transform approach comes first. On the basis of the complex wavelet, the dual-tree complex wavelet is produced. The formula for one-dimensional data transformation is:<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mi>h</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mi>g</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
<inline-formula id="inf1">
<mml:math id="m2">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the time, <inline-formula id="inf2">
<mml:math id="m3">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the total function.</p>
<p>When the complex wavelet becomes a dual-tree complex wavelet, its two-dimensional data transformation formula is:<disp-formula id="e2">
<mml:math id="m4">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf3">
<mml:math id="m5">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is a two-tree complex wavelet. Second: the top-hat transformation method. Open and close the original image as follows:<disp-formula id="e3">
<mml:math id="m6">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mo>&#x2218;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mi mathvariant="normal">&#x398;</mml:mi>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2295;</mml:mo>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mo>&#x2022;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mo>&#x2295;</mml:mo>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi mathvariant="normal">&#x398;</mml:mi>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
</p>
<p>Then the top hat transformation is as follows:<disp-formula id="e4">
<mml:math id="m7">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo>&#x2218;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo>&#x2022;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
</p>
<p>WTH (.) is the form of image transformation, and BTH (.) is the result of transformation.<list list-type="simple">
<list-item>
<p>2) Image augmentation. Typically, frequency domain and air domain methods are used.</p>
</list-item>
</list>
</p>
<p>First: To achieve the goal of boosting the contrast, the spatial technique involves processing the picture pixels in the space where the image is placed. The airspace method is expressed as follows:<disp-formula id="e5">
<mml:math id="m8">
<mml:mrow>
<mml:mi>g</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>U</mml:mi>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
</p>
<p>g (a, b) is the image pixel in space, and U is the transformation function.</p>
<p>Second: The image is first transformed by the frequency domain method, the parameters are then improved, and lastly the enhanced image is changed back to the original region (<xref ref-type="bibr" rid="B6">Guo et al., 2018</xref>).<list list-type="simple">
<list-item>
<p>(6) Advantages and disadvantages of magnetic resonance imaging</p>
</list-item>
</list>
</p>
<p>At present, in addition to MR, the commonly used medical diagnostic imaging techniques in clinical use include CT and X-ray. X-ray is the earliest and most common medical imaging technology. It has strong penetrability and is suitable for imaging examinations of high-density tissues. It is generally used for bone examination, but the imaging is not clear and will produce harmful radiation to the human body. CT images are clearer than X-ray images and can be used to examine internal organs and brain tissue, but the radiation it produces is more harmful than X-rays. In contrast, MR not only has a high-definition resolution, but also does not generate harmful radiation to the human body. It can also perform arbitrary slices in different directions according to the patient&#x2019;s different body positions. However, MR images also have a shortcoming that cannot be ignored. The equipment takes a long time to scan, and the inspection of one part often takes a long time, which not only aggravates the patient&#x2019;s pain, but also increases its economic burden (<xref ref-type="bibr" rid="B8">Hu et al., 2020</xref>; <xref ref-type="bibr" rid="B13">Monroe et al., 2021</xref>). Therefore, in order to improve this problem and improve the speed and efficiency of MR image scanning, this article attempts to optimize and improve MRI. This content will be specifically introduced in the third section with colorectal cancer as an example.</p>
</sec>
<sec id="s2-2">
<title>2.2 Colorectal cancer</title>
<p>
<list list-type="simple">
<list-item>
<p>(1) The symptoms of colorectal cancer</p>
</list-item>
</list>
</p>
<p>Colorectal cancer, also known as colorectal cancer, is a common malignant tumor of the digestive tract, including colon cancer and rectal cancer. Its lesions often occur in the colorectal epithelial tissue. The high incidence of colorectal cancer is mainly middle-aged and elderly people between 54 and 81&#xa0;years old, mostly in developed countries. At present, the pathological mechanism of colorectal cancer is not yet clear, but it is closely related to factors such as people&#x2019;s diet, living environment, family hereditary polyps and chronic inflammation. Studies have shown that people who eat high-fat, high-calorie, low-fiber foods are more likely to develop colon cancer (<xref ref-type="bibr" rid="B26">Yang et al., 2019</xref>). China has long been a country with a low incidence of colorectal cancer, but with the improvement of living conditions, people&#x2019;s diet and living conditions have undergone significant changes, and the incidence of colorectal cancer has gradually increased.<list list-type="simple">
<list-item>
<p>1) The early stage of the disease is difficult to detect, and the symptoms are extremely hidden, which often leads to missed and misdiagnosed phenomena. Many patients are often in the late stage of the disease when they are diagnosed. Therefore, the disease has a high mortality and disability rate;</p>
</list-item>
<list-item>
<p>2) The elderly are a frequent group of the disease, which is extremely harmful to the quality of life and health of the elderly;</p>
</list-item>
<list-item>
<p>3) The tumor cells in the lesion are prone to metastasis, which affects the normal physiological functions of the surrounding organs and other parts of the body;</p>
</list-item>
<list-item>
<p>4) Postoperative complications are obvious and difficult to cure, and the quality of life of patients has significantly decreased (<xref ref-type="bibr" rid="B22">Sun et al., 2017</xref>).</p>
</list-item>
<list-item>
<p>(2) Clinical manifestations of colorectal cancer</p>
</list-item>
<list-item>
<p>1) Early clinical symptoms. In the early stages of the disease, the symptoms of colorectal cancer are not yet obvious. However, when the tumor in the intestine grows larger, the patient&#x2019;s bowel habits will gradually change, showing symptoms such as bleeding stool, diarrhea, alternating diarrhea and constipation, and local abdominal pain. The frequency of excretion increases, accompanied by a small amount of mucus and blood in the stool.</p>
</list-item>
<list-item>
<p>2) Middle and late clinical symptoms. In the middle and late stages, the tissues and organs around the colorectal have different degrees of necrosis and changes, and their functions are obviously impaired. For example, the surrounding tissues such as the bladder and prostate will have symptoms such as frequent urination, urgency and difficulty urinating. In severe cases, the tumor cells of colorectal cancer will also move and metastasize to distant tissues, such as liver and lungs. Patients with advanced colorectal cancer may also experience weight loss, loss of appetite, and anemia (<xref ref-type="bibr" rid="B17">Que, 2018</xref>).</p>
</list-item>
<list-item>
<p>(3) Preoperative T staging of colorectal cancer</p>
</list-item>
</list>
</p>
<p>T stage: The tumors located in the local intestinal mucosa were divided into T1, T2, T3 and T4 according to the depth of invasion. T4 suggested that the surrounding structures and tissues were invaded. The larger the number, the later the stage. Preoperative T staging is a preoperative preparation for the treatment of gastrointestinal diseases. Colorectal cancer is a malignant gastrointestinal tumor, and surgery is quite risky, coupled with the special physiological structure and function of the human rectum. Therefore, in order to increase the success rate of surgery and reduce the risk of surgery, it is very important to determine and stage the condition of colorectal cancer patients before surgery. The more accurate the preoperative staging is, the better it is for patients to undergo adjuvant chemotherapy before surgery, which can reduce the stage of malignant tumors, and also facilitate the selection and arrangement of surgical plans, thereby achieving the purpose of improving the success rate of surgery and the survival rate of patients. It is also very effective for the prognosis of patients, reducing the recurrence rate of colorectal cancer patients after surgery and greatly improving their quality of life (<xref ref-type="bibr" rid="B4">Frank et al., 2018</xref>).</p>
</sec>
<sec id="s2-3">
<title>2.3 Value of MR medical imaging on the intelligent diagnosis of preoperative T staging of colorectal cancer</title>
<p>With the development of medical technology, X-ray, CT and MR technologies have gradually been widely used in the medical field. Although MR has advantages over X-ray and CT in terms of imaging quality and impact on the human body, long scanning time and slow imaging speed are also its biggest disadvantages. High-quality images provide more accurate positioning for the preoperative staging of colorectal cancer. This article attempts to use colorectal cancer as a research sample to reconstruct its MR images by improving and optimizing MR imaging technology (<xref ref-type="bibr" rid="B3">Fan et al., 2017</xref>). Here, we will use the concept of deep learning to improve MR technology through deep learning algorithms, and use three different dictionary learning methods to reconstruct MR images. Deep dictionary method based on convolutional neural network can better process the structural details in the image to produce clearer results when training the convolutional neural network model.<list list-type="simple">
<list-item>
<p>(1) Reconstruction of magnetic resonance images using a parsing dictionary</p>
</list-item>
</list>
<disp-formula id="e6">
<mml:math id="m9">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>V</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x222c;</mml:mo>
<mml:mo>&#x2207;</mml:mo>
<mml:mi>v</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:msub>
<mml:mo>&#x2016;</mml:mo>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x222c;</mml:mo>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
<disp-formula id="e7">
<mml:math id="m10">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>V</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mo>&#x222c;</mml:mo>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:mo>&#x2207;</mml:mo>
<mml:mi>v</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mn>2</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mi>d</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mo>&#x222c;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mi mathvariant="normal">&#x3a6;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mi>d</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>
</p>
<p>The MRI image is optimized and solved using the norm of total variation, as in <xref ref-type="disp-formula" rid="e8">formula (8)</xref>
<disp-formula id="e8">
<mml:math id="m11">
<mml:mrow>
<mml:munder>
<mml:mi>min</mml:mi>
<mml:mi>x</mml:mi>
</mml:munder>
<mml:msubsup>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>u</mml:mi>
</mml:msub>
<mml:mi>x</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>&#x3bb;</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>V</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
<list list-type="simple">
<list-item>
<p>(2) Reconstruction of magnetic resonance images using a synthetic dictionary</p>
</list-item>
</list>
</p>
<p>In contrast to the analytical dictionary&#x2019;s quick and easy computation approach, the synthetic dictionary can describe more complicated images, has some adaptability, and can lessen the noise and filtering issues that the analytical dictionary has because of down sampling (<xref ref-type="bibr" rid="B19">Sibertinblanc et al., 2016</xref>). Here, we focus primarily on introducing the sparse representation, dictionary building, and synthetic dictionary reconstruction processes.</p>
<p>A transformation matrix created using the corresponding points of the magnetic resonance signal is the so-called dictionary. The magnetic resonance image&#x2019;s sparse representation is<disp-formula id="e9">
<mml:math id="m12">
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>D</mml:mi>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
</p>
<p>The sparse coefficient is represented by a, while D is the dictionary matrix.</p>
<p>Create a synthetic dictionary after that, using the formula for the mathematical model (10)<disp-formula id="e10">
<mml:math id="m13">
<mml:mrow>
<mml:munder>
<mml:mi mathvariant="bold">min</mml:mi>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:munder>
<mml:msubsup>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:mi>Y</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>D</mml:mi>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mo>,</mml:mo>
<mml:mo>&#x2200;</mml:mo>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>
</p>
<p>Split the model into a sparse representation problem of a single sample, and get:<disp-formula id="e11">
<mml:math id="m14">
<mml:mrow>
<mml:munder>
<mml:mi mathvariant="bold">min</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:munder>
<mml:msubsup>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>D</mml:mi>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mo>,</mml:mo>
<mml:mo>&#x2200;</mml:mo>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>&#x2264;</mml:mo>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>
<list list-type="simple">
<list-item>
<p>(3) Deep dictionary-based magnetic resonance image reconstruction</p>
</list-item>
</list>
</p>
<p>Convolutional neural network algorithms are combined in this deep dictionary, which reconstructs MRI images by utilizing the powerful flexibility and self-learning capabilities of ANNs. We previously presented two strategies for learning dictionaries. While the synthetic dictionary has some adaptability, its ability to denoise and filter the image is superior. The analytical dictionary is a fixed transformation that can only handle a few simple image changes, and its sparse expressiveness is weak. Strong, however the reconstructed image effect is unsatisfactory when there is less image data (<xref ref-type="bibr" rid="B15">Park and Lee, 2017</xref>). So, we once more suggest a convolutional neural network-based deep dictionary approach.<disp-formula id="e12">
<mml:math id="m15">
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
<mml:mo>;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:msup>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>which represents the cost function, represents the connection parameters between the layers. Assuming a data set containing n samples, the overall cost function is:<disp-formula id="e13">
<mml:math id="m16">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
<mml:mo>;</mml:mo>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3bb;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:msup>
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mi>l</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>l</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mtd>
<mml:mtd>
<mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>m</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:msup>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>w</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3bb;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mi>l</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>l</mml:mi>
</mml:msub>
</mml:msubsup>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>s</mml:mi>
<mml:mi>l</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
<label>(13)</label>
</disp-formula>
</p>
<p>The cost function is iterated by the dimensionality reduction method, and then<disp-formula id="e14">
<mml:math id="m17">
<mml:mrow>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x3b1;</mml:mi>
<mml:mfrac>
<mml:mo>&#x2202;</mml:mo>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:msubsup>
<mml:mi>w</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(14)</label>
</disp-formula>
<disp-formula id="e15">
<mml:math id="m18">
<mml:mrow>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x3b1;</mml:mi>
<mml:mfrac>
<mml:mo>&#x2202;</mml:mo>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(15)</label>
</disp-formula>
</p>
<p>Among them, <inline-formula id="inf4">
<mml:math id="m19">
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the rate of learning.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Application experiment of MR medical imaging in the intelligent diagnosis of preoperative T staging of colorectal cancer</title>
<sec id="s3-1">
<title>3.1 General information collection</title>
<p>In order to specifically explore the application value of MR medical imaging in the preoperative T-analysis intelligent diagnosis of colorectal cancer, this article collected 150 colorectal cancer patients admitted to our hospital from January 2019 to January 2020. All patients were admitted to the hospital. Routine examinations were performed without any preoperative treatment. MR examinations were performed before the operation. Patients who could not tolerate MR technical examinations and surgical contraindications were excluded. Finally, 146 effective cases were selected, including 86 males and 60 females; age 38&#x2013;73&#xa0;years old, the average age is 56 &#xb1; 2.45&#xa0;years, and the average age of illness is 14 &#xb1; 3.56&#xa0;years. The general information of the patient is shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>General information of patients.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Group</th>
<th align="left">Man</th>
<th align="left">Woman</th>
<th align="left">Average age</th>
<th align="left">Average age of disease</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">T1-2</td>
<td align="left">27</td>
<td align="left">14</td>
<td align="left">51 &#xb1; 2.14</td>
<td align="left">9 &#xb1; 2.17</td>
</tr>
<tr>
<td align="left">T3</td>
<td align="left">34</td>
<td align="left">24</td>
<td align="left">54 &#xb1; 3.22</td>
<td align="left">7 &#xb1; 3.16</td>
</tr>
<tr>
<td align="left">T4</td>
<td align="left">27</td>
<td align="left">20</td>
<td align="left">53 &#xb1; 2.57</td>
<td align="left">11 &#xb1; 2.34</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-2">
<title>3.2 MR inspection method</title>
<p>After grouping every patient in accordance with the preoperative T staging, they were subjected to preoperative MR technical examination to collect colorectal tumor cell images. The night before the examination, all patients took bowel cleansing drugs and fasted food and water. Use our own MRI scanner for operation, with 16-channel body phased array coils. The parameters of the scanner are: pulse sequence repetition time 2600&#x2013;3400&#xa0;ms, echo time 150&#xa0;ms, field of view 400&#xa0;mm, the thickness is 5&#xa0;mm, the layer spacing is 1&#xa0;mm, the matrix is 260 &#xd7; 298, the number of excitations is 3, and the flip angle is 90&#xb0;. First, the patients in each group were injected with 0.1&#xa0;mmol/kg gabapentin meglumine as a contrast agent by intravenous injection, and kept advancing at a speed of 2&#xa0;ml/s. MR scanning was performed immediately after the injection was completed. At the same time, the patient was instructed to keep breath-hold during the scan, first scan at a uniform speed, and then strengthen the local scan until a complete colorectal image is obtained.</p>
</sec>
<sec id="s3-3">
<title>3.3 Staging standards and observation indicators</title>
<p>Stage T1-2: there are obvious gaps in the fat at the lesion of the colorectal intestinal wall. Even the enhanced scan shows that the outer edge of the intestinal wall is smooth and there is no sign of nodules. T3: fat around the lesion on the intestinal wall. There are sparse markings, and the muscle layer has been invaded to a certain extent. When the scan is increased, the outer edge of the intestinal wall is uneven, and nodules are slightly prominent; stage T4: there are no gaps in the fat around the lesion of the intestinal wall and the adjacent tissues and organs. The boundary is blurred during enhanced scanning. The tumor has seriously invaded the colorectal fascia and surrounding organs.</p>
<p>According to the results of MR examination, the degree of invasion of each layer of the colorectal wall is evaluated, and the sensitivity, specificity, and rate of diagnostic coincidence for each colorectal cancer stage are assessed in comparison to the histological T staging.</p>
</sec>
<sec id="s3-4">
<title>3.4 Data processing</title>
<p>The data processing and analysis of this experiment used SPSS 22.0 statistical analysis software. The MR staging results of colorectal cancer and the pathological T staging results were tested for variance and chi-square, and the sensitivity, specificity and diagnostic coincidence rate of MR staging were compared. Among them, <italic>p</italic> &#x3e; 0.05 indicates no significant statistical difference, <italic>p</italic> &#x3c; 0.05 indicates significant statistical difference, <italic>p</italic> &#x3c; 0.01 indicates extremely significant statistical difference; Chi-square 1 indicates high degree of agreement, 2 indicates general agreement, 3 indicates a low degree of compliance.</p>
</sec>
</sec>
<sec id="s4">
<title>4 Discussion of experimental results</title>
<sec id="s4-1">
<title>4.1 Comparison of differences in general patient information</title>
<p>According to the general data of colorectal cancer patients collected in the third part, the general conditions of each group of patients are counted, and the <italic>p</italic>-value and chi-square value of each group of patients on gender, age and disease age are calculated, and plotted as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The histogram to compare the differences in the general conditions of patients in each group.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Comparison of general patient information.</p>
</caption>
<graphic xlink:href="fgene-14-1119990-g001.tif"/>
</fig>
<p>As can be seen from <xref ref-type="fig" rid="F1">Figure 1</xref>, in the samples collected in T1-2, there are 27 men and 14 women, with an average age of 51&#xa0;years, and the average age of patients is 42&#xa0;years. In the samples collected in T3, there are 34 men and 24 women, with an average age of 54&#xa0;years, and the average age of patients is 47&#xa0;years. In T4, there are 27 men and 20 women, with an average age of 53&#xa0;years, and the average age of patients is 42&#xa0;years. There was no significant difference in general information among T1-2, T3 and T4 patients (<italic>p</italic> &#x3e; 0.05). The age of patients in each group is 50&#x2013;60&#xa0;years old, and the age of onset is about 10&#xa0;years. The majority of men and women are men. Among the three groups, the number of patients in T3 group was the largest, with 58 cases, followed by T4 group with 47 cases, and T1-2 group with 41 cases.</p>
</sec>
<sec id="s4-2">
<title>4.2 Comparison of results between MR staging and pathological T staging</title>
<p>To ascertain the sensitivity, specificity, and diagnostic coincidence rate of MR staging, the patients&#x2019; preoperative T staging was performed post-experimentally in accordance with the findings of the MR scan. The results were then compared with the pathological T staging performed prior to the experiment.<list list-type="simple">
<list-item>
<p>(1) Comparison of results between MR staging and pathological T staging</p>
</list-item>
</list>
</p>
<p>The results of MR staging and pathological T staging are compared as shown in <xref ref-type="table" rid="T2">Table 2</xref> and <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Comparison of MR staging and pathological T staging.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="4" align="left">Pathological staging MR staging</th>
<th align="left">T1-2</th>
<th align="left">T3</th>
<th align="left">T4</th>
<th align="left">Total</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td colspan="4" align="left">T1-2</td>
<td align="left">19</td>
<td align="left">25</td>
<td align="left">11</td>
<td align="left">55</td>
</tr>
<tr>
<td colspan="4" align="left">T3</td>
<td align="left">17</td>
<td align="left">18</td>
<td align="left">21</td>
<td align="left">56</td>
</tr>
<tr>
<td colspan="4" align="left">T4</td>
<td align="left">5</td>
<td align="left">15</td>
<td align="left">15</td>
<td align="left">35</td>
</tr>
<tr>
<td align="left">Total</td>
<td align="left">41</td>
<td align="left">58</td>
<td align="left">47</td>
<td colspan="4" align="left">146</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Comparison of MR staging and pathological T staging.</p>
</caption>
<graphic xlink:href="fgene-14-1119990-g002.tif"/>
</fig>
<p>
<xref ref-type="table" rid="T2">Table 2</xref> and <xref ref-type="fig" rid="F2">Figure 2</xref> show that the pathological T staging detected and diagnosed by MR technology differs in certain ways from the T staging (<italic>p</italic> &#x3c; 0.05). According to the pathological T staging, 146 patients were divided into T1-2 stages. 41 cases, 58 cases in T3 stage, 47 cases in T4 stage, and after MR examination, the T stage of the patients was 55 cases in T1-2 stage, 56 cases in T3 stage, and 35 cases in T4 stage. This shows that the symptoms of 14 patients were overestimated, of which 12 were misestimated as T4 stage, and 2 were misestimated as T3 stage. These results show that the steps and contents of the study need to be more specific, so this is only an individual case.<list list-type="simple">
<list-item>
<p>(2) Evaluation of the sensitivity, specificity, and diagnostic coincidence rate of MR staging</p>
</list-item>
</list>
</p>
<p>The findings, which are shown in <xref ref-type="table" rid="T3">Table 3</xref> and <xref ref-type="fig" rid="F3">Figure 3</xref>, include evaluations of the sensitivity, specificity, and rate of diagnostic coincidence for MR staging as well as comparisons between pathological T staging results and MR staging results.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Diagnostic coincidence rate, sensitivity, and specificity of MR staging.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left"/>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">Diagnosis coincidence rate</th>
<th align="left">
<italic>p</italic>
</th>
<th align="left">X2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">T1-2</td>
<td align="left">46.34% (19/41)</td>
<td align="left">90.67% (68/75)</td>
<td align="left">89.73% (131/146)</td>
<td align="left">0.021</td>
<td align="left">0.85</td>
</tr>
<tr>
<td align="left">T3</td>
<td align="left">31.03% (18/58)</td>
<td align="left">92.1% (70/76)</td>
<td align="left">95.89% (140/146)</td>
<td align="left">0.025</td>
<td align="left">0.83</td>
</tr>
<tr>
<td align="left">T4</td>
<td align="left">31.91% (15/47)</td>
<td align="left">96.67% (87/90)</td>
<td align="left">99.32% (145/146)</td>
<td align="left">0.042</td>
<td align="left">0.81</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Sensitivity, specificity and diagnostic coincidence rate of MR staging.</p>
</caption>
<graphic xlink:href="fgene-14-1119990-g003.tif"/>
</fig>
<p>In <xref ref-type="table" rid="T3">Table 3</xref>, in T1-2 phase, the sensitivity, specificity and coincidence rate of MRI were 46.34%, 90.67% and 89.73% respectively. In T3 phase, their sensitivity, specificity and coincidence rate were 31.03%, 92.1% and 95.89% respectively. In T4 phase, their sensitivity, specificity and coincidence rate were 31.91%, 96.67% and 99.32% respectively.</p>
<p>From <xref ref-type="table" rid="T3">Table 3</xref> and <xref ref-type="fig" rid="F3">Figure 3</xref>, it can be seen that the total diagnostic coincidence rate of MR for preoperative T staging of colorectal cancer patients is 89.73%, the sensitivity of each stage is 46.34%, 31.03% and 31.91%, and the specificity is 90.67%, 92.1% and 96.67%, <italic>p</italic>-values were 0.021, 0.025, and 0.042, respectively, and the chi-square values were all greater than 0.8, which indicates that the preoperative T staging of MR and the pathological T staging have a higher degree of agreement, and the agreement and consistency strong.</p>
</sec>
<sec id="s4-3">
<title>4.3 Comparison of results between CT staging and pathological T staging</title>
<p>In order to further explore the application value of MR technology in the intelligent diagnosis of colorectal cancer preoperative T staging, and at the same time apply CT technology to the preoperative T staging diagnosis of colorectal cancer, compare the difference between CT staging and pathological T staging, and it is compared with MR staging to analyze the advantages of MR staging and CT staging. In addition, 98 patients with colorectal cancer admitted to our hospital were selected. The T stages were 36 cases in T1-2 stage, 37 cases in T3 stage, and 25 cases in T4 stage. According to the operation steps of MR examination, these 98 patients were examined by CT, and finally the CT staging was obtained, and the results were compared with the pathological T staging, as shown in <xref ref-type="table" rid="T4">Table 4</xref> and <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Comparison of CT staging and pathological T staging results.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Pathological staging CT staging</th>
<th align="left">T1-2</th>
<th align="left">T3</th>
<th align="left">T4</th>
<th align="left">Total</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">T1-2</td>
<td align="left">14</td>
<td align="left">18</td>
<td align="left">9</td>
<td align="left">41</td>
</tr>
<tr>
<td align="left">T3</td>
<td align="left">12</td>
<td align="left">11</td>
<td align="left">11</td>
<td align="left">34</td>
</tr>
<tr>
<td align="left">T4</td>
<td align="left">10</td>
<td align="left">8</td>
<td align="left">5</td>
<td align="left">23</td>
</tr>
<tr>
<td align="left">Total</td>
<td align="left">36</td>
<td align="left">37</td>
<td align="left">25</td>
<td align="left">98</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Comparison of CT staging and pathological T staging results.</p>
</caption>
<graphic xlink:href="fgene-14-1119990-g004.tif"/>
</fig>
<p>
<xref ref-type="table" rid="T4">Table 4</xref> and <xref ref-type="fig" rid="F4">Figure 4</xref> demonstrate that there are several differences between the T staging and the pathological T staging detected and diagnosed by CT technology (<italic>p</italic> &#x3c; 0.05). According to the pathological T staging, 98 patients were divided into T1-2 stages. There were 36 cases in T3 stage, 37 cases in T4 stage, 25 cases in T4 stage. After CT examination, the T stage of patients was 41 cases in T1-2 stage, 34 cases in T3 stage, and 23 cases in T4 stage. This shows that the symptoms of five patients were overestimated, of which 3 were overestimated as T3 stage, and 2 were overestimated as T4 stage.</p>
<p>The <italic>p</italic>-value and chi-square value of the two were determined in accordance with the distinction between CT staging and pathological staging, and the sensitivity, specificity, and diagnostic coincidence rate of CT staging were examined. <xref ref-type="table" rid="T5">Table 5</xref> and <xref ref-type="fig" rid="F5">Figure 5</xref> present the findings.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>CT staging&#x2019;s sensitivity, specificity, and rate of diagnostic concordance.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left"/>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">Diagnosis coincidence rate</th>
<th align="left">
<italic>p</italic>
</th>
<th align="left">X2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">T1-2</td>
<td align="left">33.33% (12/36)</td>
<td align="left">87.8% (36/41)</td>
<td align="left">86.73% (85/98)</td>
<td align="left">0.036</td>
<td align="left">0.75</td>
</tr>
<tr>
<td align="left">T3</td>
<td align="left">21.62% (8/37)</td>
<td align="left">76.79% (43/56)</td>
<td align="left">87.76% (86/98)</td>
<td align="left">0.026</td>
<td align="left">0.79</td>
</tr>
<tr>
<td align="left">T4</td>
<td align="left">28% (7/25)</td>
<td align="left">97.22% (70/72)</td>
<td align="left">91.84%90/98)</td>
<td align="left">0.038</td>
<td align="left">0.81</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>CT staging&#x2019;s sensitivity, specificity, and diagnostic coincidence rate.</p>
</caption>
<graphic xlink:href="fgene-14-1119990-g005.tif"/>
</fig>
<p>
<xref ref-type="table" rid="T5">Table 5</xref> and <xref ref-type="fig" rid="F5">Figure 5</xref> show that the overall CT diagnostic coincidence rate for colorectal cancer patients&#x2019; preoperative T staging is 86.73%, the sensitivity of each stage is 33.33%, 21.62% and 28%, and the specificity is 87.8%, 76.79% and 97.22%, the <italic>p</italic>-values were 0.036, 0.026 and 0.038, and the chi-square values were all less than 0.8. This shows that although the diagnosis of CT staging and pathological T staging has a significant statistical difference, the agreement is average. Compared with MR staging, its agreement is slightly lower, which proves that MR technology is useful for preoperative T staging of colorectal cancer.</p>
</sec>
<sec id="s4-4">
<title>4.4 Effect of optimization of MR technology using various deep learning algorithms</title>
<p>According to the foregoing, among the existing clinical auxiliary diagnostic imaging technologies, although many studies have shown that MR technology is the most effective in auxiliary diagnostics, it also has serious problems such as long scanning time and slow acquisition of data and images. This article attempts to improve and optimize its core steps based on the working principle of MR technology. In order to improve the reconstruction of colorectal cancer pictures obtained by MR, three distinct dictionary learning methods are proposed (The complete calculation procedure and deduction steps refer to <xref ref-type="sec" rid="s2-3">Section 2.3</xref> of this article). In this section, we will compare the differences in peak signal-to-noise ratio and structural similarity of the reconstructed MR images produced by three different dictionary learning algorithms, and identify the algorithm that produces the best image reconstruction quality and the quickest imaging speed.<list list-type="simple">
<list-item>
<p>(1) Results of analytic and synthetic dictionary optimization</p>
</list-item>
</list>
</p>
<p>To sample MR pictures, use analytical dictionary and synthetic dictionary learning methods. Set the sampling rates to 20%, 30%, 40%, and 50% depending on the desired sampling rate. Under the same sample rate background, compare the two. Different learning is used to determine the reconstructed image&#x2019;s peak signal-to-noise ratio and structural similarity. <xref ref-type="table" rid="T6">Table 6</xref> display the statistical findings. Analytical dictionary is 32.25% when sampling rates are 30%.</p>
<table-wrap id="T6" position="float">
<label>TABLE 6</label>
<caption>
<p>Effect of optimization of MR technology using various deep learning algorithms.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="2" align="left">Learning algorithm</th>
<th align="left">30%</th>
<th align="left">40%</th>
<th align="left">50%</th>
<th align="left">60%</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="2" align="left">Analytic dictionary</td>
<td align="left">Peak signal to noise ratio(A)</td>
<td align="left">32.25</td>
<td align="left">36.29</td>
<td align="left">38.47</td>
<td align="left">41.56</td>
</tr>
<tr>
<td align="left">Structural similarity(A)</td>
<td align="left">0.8521</td>
<td align="left">0.8735</td>
<td align="left">0.9174</td>
<td align="left">0.9387</td>
</tr>
<tr>
<td rowspan="2" align="left">Synthetic dictionary</td>
<td align="left">Peak signal to noise ratio(S)</td>
<td align="left">33.54</td>
<td align="left">37.55</td>
<td align="left">41.45</td>
<td align="left">44.67</td>
</tr>
<tr>
<td align="left">Structural similarity(S)</td>
<td align="left">0.8741</td>
<td align="left">0.9145</td>
<td align="left">0.9279</td>
<td align="left">0.9536</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>
<xref ref-type="table" rid="T6">Table 6</xref> shows that the peak signal-to-noise ratio and structural similarity of the synthetic vocabulary are higher than the values of the analytical dictionary under the same sampling rate, demonstrating that the synthetic dictionary has a superior influence on picture reconstruction.<list list-type="simple">
<list-item>
<p>(2) The optimization result of the deep dictionary</p>
</list-item>
</list>
</p>
<p>The deep dictionary algorithm we suggested is built on a convolutional neural network, based on the aforementioned. Sample training and testing are necessary depending on the neural network&#x2019;s properties. Select the first 200&#xa0;MR images as the training group, the last 100 as the test group, and 300&#xa0;MR images as the sample data. With 50, 100, 150, 200, 250, and 300 samples, respectively, iterative training and testing are conducted. Each sample interval is recorded. The results are displayed in <xref ref-type="table" rid="T7">Table 7</xref> compare the peak-to-noise ratio and structural similarity of the image. They also compare the difference between the <italic>p</italic>-value and the chi-square value of each sample interval.</p>
<table-wrap id="T7" position="float">
<label>TABLE 7</label>
<caption>
<p>Optimization results based on deep dictionary.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left"/>
<th align="left">50</th>
<th align="left">100</th>
<th align="left">150</th>
<th align="left">200</th>
<th align="left">250</th>
<th align="left">300</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Peak signal to noise ratio</td>
<td align="left">34.47</td>
<td align="left">34.47</td>
<td align="left">36.55</td>
<td align="left">41.89</td>
<td align="left">42.38</td>
<td align="left">45.54</td>
</tr>
<tr>
<td align="left">Structural similarity</td>
<td align="left">0.9118</td>
<td align="left">0.9357</td>
<td align="left">0.9472</td>
<td align="left">0.9551</td>
<td align="left">0.9678</td>
<td align="left">0.9967</td>
</tr>
<tr>
<td align="left">
<italic>p</italic>-value</td>
<td align="left">0.256</td>
<td align="left">0.478</td>
<td align="left">0.351</td>
<td align="left">0.571</td>
<td align="left">0.610</td>
<td align="left">0.617</td>
</tr>
<tr>
<td align="left">X2</td>
<td align="left">0.897</td>
<td align="left">0.880</td>
<td align="left">0.857</td>
<td align="left">0.872</td>
<td align="left">0.810</td>
<td align="left">0.835</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>
<xref ref-type="table" rid="T7">Table 7</xref> show that when the sample size grows, the quantitative evaluation value of the deep dictionary-reconstructed image rises, showing a greater effect. The structural similarity also shows that the deep dictionary-reconstructed image is superior to the prior image. The two algorithms produce images that are more faithful to real MR scans. The structural similarity value reaches 0.9967 when the sample size is 300, which is significantly higher than the values obtained by the first two techniques. The sample training results are compatible with the test results (X2 &#x3e; 0.8) and there is no statistically significant difference between the sample intervals, as can be observed from the <italic>p</italic>-value and Chi-square value (<italic>p</italic> &#x3e; 0.05). As can be observed, the MRI picture generated by the deep dictionary approach based on the convolutional neural network has the best quality and the fastest imaging speed of the three dictionary learning methods.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>Colorectal cancer is a malignant tumor disease that threatens human health and quality of life, especially among middle-aged and elderly people. Because its early symptoms are hidden and difficult to detect, its death and disability rates are high. Preoperative T staging of colorectal cancer is important for creating a precise surgical treatment plan, which is important for enhancing patient quality of life and increasing long-term survival rates.</p>
<p>MR is a cutting-edge imaging technique used in clinical and auxiliary medicine. In individuals with colorectal cancer, it is crucial to examine the tumor&#x2019;s location, its depth, and its connections to other tissues. Compared to CT and conventional X-rays, it offers additional benefits. Rectal cancer preoperative T staging offers significant diagnostic significance.</p>
<p>This study confirmed through experiments that MR has higher sensitivity, specificity and consistency in the diagnosis of each stage than CT. At the same time, it makes an algorithm based on convolutional neural network and compares the performance tests of analytic, synthetic, and deep dictionaries. A deeper lexicon is better for optimizing MR and is better for enhancing MR scanning and imaging speed. However, this article is not very proficient in the clinical diagnosis and treatment of colorectal cancer and needs further improvement. This is very helpful to increase the early diagnosis rate and improve the quality of life of patients. The inadequacy of the article is that it does not explain the situation of patients in each T stage, and how to care for patients in each T stage. The results of each stage are still not enough. I hope that we can strengthen, guide and formulate treatment plans and evaluate the prognosis in the future.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by Ethics Committee of the General Hospital of the Western Theater of War, managed by the Health Service Department of the General Hospital of the Western Theater of War. Written informed consent for participation was not required for this study in accordance with the national legislation and the institutional requirements.</p>
</sec>
<sec id="s8">
<title>Author contributions</title>
<p>All authors listed have made a substantial, direct, and intellectual contribution to the work and approved it for publication.</p>
</sec>
<sec id="s9">
<title>Funding</title>
<p>This study was supported by the Research and Development Program of The General Hospital of Western Theater Command (Grant No. 2021-XZYG-C04 and 2021-XZYG-C05).</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Acuna</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Elmi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Shah</surname>
<given-names>P. S.</given-names>
</name>
<name>
<surname>Coburn</surname>
<given-names>N. G.</given-names>
</name>
<name>
<surname>Quereshy</surname>
<given-names>F. A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Preoperative localization of colorectal cancer: A systematic review and meta-analysis</article-title>. <source>Surg. Endosc.</source> <volume>31</volume> (<issue>6</issue>), <fpage>2366</fpage>&#x2013;<lpage>2379</lpage>. <pub-id pub-id-type="doi">10.1007/s00464-016-5236-8</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ali</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sarwar</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sharma</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Suri</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Artificial neural network based screening of cervical cancer using a hierarchical modular neural network architecture (HMNNA) and novel benchmark uterine cervix cancer database</article-title>. <source>Neural Comput. Applic</source> <volume>31</volume>, <fpage>2979</fpage>&#x2013;<lpage>2993</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-017-3246-7</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fan</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The clinical application of endoscopic ultrasonography on preoperative staging of esophageal carcinoma</article-title>. <source>J. Xinjiang Med. Univ.</source> <volume>040</volume> (<issue>005</issue>), <fpage>596</fpage>&#x2013;<lpage>598</lpage>.</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Frank</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Nina</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gerhard</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Markl</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Messmann</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Anthuber</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Impact of primary tumor localization on the efficacy of bevacizumab in metastatic colorectal cancer</article-title>. <source>Anticancer Res.</source> <volume>38</volume> (<issue>9</issue>), <fpage>5539</fpage>&#x2013;<lpage>5546</lpage>. <pub-id pub-id-type="doi">10.21873/anticanres.12889</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guo</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Cui</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>The influence of different CEA level at colorectal cancer preoperative on prognosis and metastasis</article-title>. <source>Pract. J. Cancer</source> <volume>034</volume> (<issue>004</issue>), <fpage>561</fpage>&#x2013;<lpage>563</lpage>.</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guo</surname>
<given-names>Yu</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
</person-group>, (<year>2018</year>). <article-title>The diagnostic value of CT-based radiomics in liver metastasis of colorectal cancer</article-title>. <source>Chin. J. Clin. Med. Imaging</source> <volume>029</volume> (<issue>011</issue>), <fpage>798</fpage>&#x2013;<lpage>802</lpage>.</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hoshino</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Sakamoto</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hida</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sakai</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Diagnostic accuracy of computed tomography colonography for tumor depth in colorectal cancer: A systematic review and meta-analysis</article-title>. <source>Surg. Oncol.</source> <volume>30</volume>, <fpage>126</fpage>&#x2013;<lpage>130</lpage>. <pub-id pub-id-type="doi">10.1016/j.suronc.2019.08.003</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Automatic segmentation of intracerebral hemorrhage in CT images using encoder-decoder convolutional neural network</article-title>. <source>Inf. Process. Manag.</source> <volume>57</volume> (<issue>6</issue>), <fpage>102352</fpage>. <pub-id pub-id-type="doi">10.1016/j.ipm.2020.102352</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jaramillo</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Upegui Jim&#xe9;nez</surname>
<given-names>Daniel</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>MRI staging of colorectal cancer</article-title>. <source>Rev. Colomb. De. Gastroenterol.</source> <volume>31</volume> (<issue>3</issue>), <fpage>273</fpage>&#x2013;<lpage>282</lpage>.</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Joany</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>Logashanmugam</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Design and analysis of single pixel imaging on magnetic resonance imaging</article-title>. <source>J. Med. IMAGING HEALTH Inf.</source> <volume>8</volume> (<issue>3</issue>), <fpage>596</fpage>&#x2013;<lpage>601</lpage>. <pub-id pub-id-type="doi">10.1166/jmihi.2018.2338</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Prognostic value of preoperative fibrinogen and D-dimer levels in patients with colorectal cancer</article-title>. <source>J. China Med. Univ.</source> <volume>047</volume> (<issue>006</issue>), <fpage>513</fpage>&#x2013;<lpage>518</lpage>.</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Cheung</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>She</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Chok</surname>
<given-names>K. S. H.</given-names>
</name>
<name>
<surname>Chan</surname>
<given-names>A. C. Y.</given-names>
</name>
<name>
<surname>Dai</surname>
<given-names>W. C.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Diagnostic and prognostic role of 18-FDG PET/CT in the management of resectable biliary tract cancer</article-title>. <source>World J. Surg.</source> <volume>42</volume> (<issue>3</issue>), <fpage>823</fpage>&#x2013;<lpage>834</lpage>. <pub-id pub-id-type="doi">10.1007/s00268-017-4192-3</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Monroe</surname>
<given-names>W. S.</given-names>
</name>
<name>
<surname>Skidmore</surname>
<given-names>F. M.</given-names>
</name>
<name>
<surname>Odaibo</surname>
<given-names>D. G.</given-names>
</name>
<name>
<surname>Tanik</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>HihO: Accelerating artificial intelligence interpretability for medical imaging in IoT applications using hierarchical occlusion</article-title>. <source>Neural Comput. Applic</source> <volume>33</volume>, <fpage>6027</fpage>&#x2013;<lpage>6038</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-020-05379-4</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>E. K.</given-names>
</name>
<name>
<surname>Choi</surname>
<given-names>D. K.</given-names>
</name>
<name>
<surname>Chung</surname>
<given-names>Y. E.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>M. J.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Aberrant expression of OATP1B3 in colorectal cancer liver metastases and its clinical implication on gadoxetic acid-enhanced MRI</article-title>. <source>Oncotarget</source> <volume>8</volume> (<issue>41</issue>), <fpage>71012</fpage>&#x2013;<lpage>71023</lpage>. <pub-id pub-id-type="doi">10.18632/oncotarget.20295</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Impact of kras mutation &#x26; ercc1 over-expression on oxaliplatin-based chemotherapy in metastatic colorectal cancer patients</article-title>. <source>Dis. Colon Rectum</source> <volume>60</volume> (<issue>6</issue>), <fpage>E342</fpage>.</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pradhan</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Dhaka</surname>
<given-names>V. S.</given-names>
</name>
<name>
<surname>Rani</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Chaudhary</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Transforming view of medical images using deep learning</article-title>. <source>Neural Comput. Applic</source> <volume>32</volume>, <fpage>15043</fpage>&#x2013;<lpage>15054</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-020-04857-z</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Que</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Value of CT and MRI in clinical diagnosis of preoperative local staging for rectal cancer</article-title>. <source>Chin. Foreign Med.</source> <volume>037</volume> (<issue>017</issue>), <fpage>180</fpage>&#x2013;<lpage>181</lpage>.</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shakeel</surname>
<given-names>P. M.</given-names>
</name>
<name>
<surname>Tolba</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Al-Makhadmeh</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Jaber</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Automatic detection of lung cancer from biomedical data set using discrete AdaBoost optimized ensemble learning generalized neural networks</article-title>. <source>Neural Comput. Applic</source> <volume>32</volume>, <fpage>777</fpage>&#x2013;<lpage>790</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-018-03972-2</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sibertinblanc</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Fabre</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Aparicio</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Le Malicot</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bennouna</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ghiringhelli</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Impact of genetic polymorphisms of VEGF pathway on the response to bevacizumab in metastatic colorectal cancer (mCRC): Ancillary study of PRODIGE 9 trial</article-title>. <source>J. Clin. Oncol.</source> <volume>34</volume> (<issue>15</issue>), <fpage>3534</fpage>. <pub-id pub-id-type="doi">10.1200/jco.2016.34.15_suppl.3534</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>So</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Cheong</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Oh</surname>
<given-names>S. Y.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>Y. B.</given-names>
</name>
<name>
<surname>Suh</surname>
<given-names>K. W.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Accuracy of preoperative local staging of primary colorectal cancer by using computed tomography: Reappraisal based on data collected at a highly organized cancer center</article-title>. <source>Ann. Coloproctology</source> <volume>33</volume> (<issue>5</issue>), <fpage>192</fpage>&#x2013;<lpage>196</lpage>. <pub-id pub-id-type="doi">10.3393/ac.2017.33.5.192</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Song</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pei</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Z.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Significance of inflammation-based indices in the prognosis of patients with non-metastatic colorectal cancer</article-title>. <source>Oncotarget</source> <volume>8</volume> (<issue>28</issue>), <fpage>45178</fpage>&#x2013;<lpage>45189</lpage>. <pub-id pub-id-type="doi">10.18632/oncotarget.16774</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jing</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>S. X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The diagnostic and prognostic value of CHFR hypermethylation in colorectal cancer, a meta-analysis and literature review</article-title>. <source>Oncotarget</source> <volume>8</volume> (<issue>51</issue>), <fpage>89142</fpage>&#x2013;<lpage>89148</lpage>. <pub-id pub-id-type="doi">10.18632/oncotarget.19408</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiao</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Divergence measure of pythagorean fuzzy sets and its application in medical diagnosis</article-title>. <source>Appl. Soft Comput.</source> <volume>79</volume>, <fpage>254</fpage>&#x2013;<lpage>267</lpage>.</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The diagnostic value of proactive of serum CRP, CA19-9 and CEA in patients with colorectal cancer</article-title>. <source>J. Ningxia Med. Univ.</source> <volume>039</volume> (<issue>006</issue>), <fpage>641</fpage>&#x2013;<lpage>644</lpage>.</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Kamruzzaman</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Method of generating face image based on text description of generating adversarial network</article-title>. <source>J. Electron. Imaging</source> <volume>31</volume> (<issue>5</issue>), <fpage>051411</fpage>. <pub-id pub-id-type="doi">10.1117/1.jei.31.5.051411</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Yi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Correction to: Colorectal cancer diagnostic algorithm based on sub-patch weight color histogram in combination of improved least squares support vector machine for pathological image</article-title>. <source>J. Med. Syst.</source> <volume>43</volume> (<issue>12</issue>), <fpage>333</fpage>. <pub-id pub-id-type="doi">10.1007/s10916-019-1449-4</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Value of magnetic resonance imaging(MRI) in local staging of colorectal cancer before surgery</article-title>. <source>China Mod. Dr.</source> <volume>054</volume> (<issue>020</issue>), <fpage>92</fpage>&#x2013;<lpage>94</lpage>.</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Tumor-specific nascent nine-peptide-epitopes prediction and bioinformatics characterization in human colorectal cancer</article-title>. <source>J. Med. IMAGING HEALTH Inf.</source> <volume>10</volume> (<issue>6</issue>), <fpage>1338</fpage>&#x2013;<lpage>1345</lpage>.</citation>
</ref>
</ref-list>
</back>
</article>