<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2022.980950</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Medicine</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Multi-modal data combination strategy based on chest HRCT images and PFT parameters for intelligent dyspnea identification in COPD</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Yang</surname> <given-names>Yingjian</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x02020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1446610/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Ziran</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x02020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1896279/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Wei</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Zeng</surname> <given-names>Nanrong</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Guo</surname> <given-names>Yingwei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1989487/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wang</surname> <given-names>Shicong</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1738008/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Duan</surname> <given-names>Wenxin</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1737949/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Yang</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Huai</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Xian</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chen</surname> <given-names>Rongchang</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Kang</surname> <given-names>Yan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>College of Medicine and Biological Information Engineering, Northeastern University</institution>, <addr-line>Shenyang</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>College of Health Science and Environmental Engineering, Shenzhen Technology University</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Applied Technology, Shenzhen University</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Radiology, The First Affiliated Hospital of Guangzhou Medical University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff5"><sup>5</sup><institution>Shenzhen Institute of Respiratory Diseases, Shenzhen People&#x00027;s Hospital</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff6"><sup>6</sup><institution>The Second Clinical Medical College, Jinan University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff7"><sup>7</sup><institution>The First Affiliated Hospital, Southern University of Science and Technology</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff8"><sup>8</sup><institution>Engineering Research Centre of Medical Imaging and Intelligent Analysis, Ministry of Education</institution>, <addr-line>Shenyang</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Hyunjin Park, Sungkyunkwan University, South Korea</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Christophe Delclaux, H&#x000F4;pital Robert Debr&#x000E9;, France; Yukun Dong, China University of Petroleum, Huadong, China; Hui Zhou, Nanjing University of Science and Technology, China</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Yan Kang &#x02709;<email>kangyan&#x00040;sztu.edu.cn</email></corresp>
<corresp id="c002">Rongchang Chen &#x02709;<email>chenrc&#x00040;vip.163.com</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Precision Medicine, a section of the journal Frontiers in Medicine</p></fn>
<fn fn-type="equal" id="fn002"><p>&#x02020;These authors have contributed equally to this work and share first authorship</p></fn></author-notes>
<pub-date pub-type="epub">
<day>21</day>
<month>12</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>9</volume>
<elocation-id>980950</elocation-id>
<history>
<date date-type="received">
<day>30</day>
<month>06</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>12</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2022 Yang, Chen, Li, Zeng, Guo, Wang, Duan, Liu, Chen, Li, Chen and Kang.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Yang, Chen, Li, Zeng, Guo, Wang, Duan, Liu, Chen, Li, Chen and Kang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Because of persistent airflow limitation in chronic obstructive pulmonary disease (COPD), patients with COPD often have complications of dyspnea. However, as a leading symptom of COPD, dyspnea in COPD deserves special consideration regarding treatment in this fragile population for pre-clinical health management in COPD. Methods: Based on the above, this paper proposes a multi-modal data combination strategy by combining the local and global features for dyspnea identification in COPD based on the multi-layer perceptron (MLP) classifier.</p></sec>
<sec>
<title>Methods</title>
<p>First, lung region images are automatically segmented from chest HRCT images for extracting the original 1,316 lung radiomics (OLR, 1,316) and 13,824 3D CNN features (O3C, 13,824). Second, the local features, including five selected pulmonary function test (PFT) parameters (SLF, 5), 28 selected lung radiomics (SLR, 28), and 22 selected 3D CNN features (S3C, 22), are respectively selected from the original 11 PFT parameters (OLF, 11), 1,316 OLR, and 13,824 O3C by the least absolute shrinkage and selection operator (Lasso) algorithm. Meantime, the global features, including two fused PFT parameters (FLF, 2), six fused lung radiomics (FLR, 6), and 34 fused 3D CNN features (F3C, 34), are respectively fused by 11 OLF, 1,316 OLR, and 13,824 O3C using the principal component analysis (PCA) algorithm. Finally, we combine all the local and global features (SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C, 5&#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34) for dyspnea identification in COPD based on the MLP classifier.</p></sec>
<sec>
<title>Results</title>
<p>Our proposed method comprehensively improves classification performance. The MLP classifier with all the local and global features achieves the best classification performance at 87.7% of accuracy, 87.7% of precision, 87.7% of recall, 87.7% of F1-scorel, and 89.3% of AUC, respectively.</p></sec>
<sec>
<title>Discussion</title>
<p>Compared with single-modal data, the proposed strategy effectively improves the classification performance for dyspnea identification in COPD, providing an objective and effective tool for COPD management.</p></sec></abstract>
<kwd-group>
<kwd>dyspnea identification</kwd>
<kwd>COPD</kwd>
<kwd>multi-modal data</kwd>
<kwd>combination strategy</kwd>
<kwd>PFT parameters</kwd>
<kwd>lung radiomics features</kwd>
<kwd>3D CNN features</kwd>
<kwd>machine learning</kwd>
</kwd-group>
<counts>
<fig-count count="8"/>
<table-count count="7"/>
<equation-count count="7"/>
<ref-count count="55"/>
<page-count count="21"/>
<word-count count="10825"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1. Introduction</title>
<p>Chronic obstructive pulmonary disease (COPD) is a common lung disease characterized by persistent airflow limitation (<xref ref-type="bibr" rid="B1">1</xref>&#x02013;<xref ref-type="bibr" rid="B3">3</xref>). Because of this characterization, patients with COPD often have complications of dyspnea (<xref ref-type="bibr" rid="B4">4</xref>). However, as a leading symptom of COPD (<xref ref-type="bibr" rid="B5">5</xref>), dyspnea in COPD deserves special consideration regarding treatment in this fragile population for pre-clinical health management in COPD. Furthermore, multi-modal biomedical data combination has been a hot research topic for facilitating precision and/or personalized medicine (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>). Therefore, multi-modal biomedical data combination is also crucial for pre-clinical health management in dyspnea caused by COPD.</p>
<p>Pulmonary function test (PFT) and computed tomography (CT) have become indispensable for COPD assessment and diagnosis. The PFT and CT have their own advantages in diagnosing and evaluating COPD and are complementary. Compared with CT, PFT is a non-invasive way to diagnose COPD from stage 0 to IV, according to Global Initiative for Chronic Obstructive Lung Disease (GOLD) criteria accepted by the American Thoracic Society and the European Respiratory Society (<xref ref-type="bibr" rid="B3">3</xref>). Specifically, the forced expiratory volume in 1 s/forced vital capacity (FEV<sub>1</sub>/FVC) and FEV<sub>1</sub>% predicted in PFT are the gold standards for diagnosing the COPD stage. Meanwhile, in patients with COPD, forced inspiration, particularly the assessment of FTV<sub>1</sub>, yields objective information that correlates closely with subjective dyspnea ratings after bronchodilator inhalation (<xref ref-type="bibr" rid="B8">8</xref>). In addition, compared with PFT, CT images can reflect the change in the lung tissue of COPD patients. Thus, CT has been regarded as the most effective modality for characterizing and quantifying COPD (<xref ref-type="bibr" rid="B9">9</xref>). Specifically, chest CT images can indicate that the patients have suffered from mild lobular central emphysema and reveal decreased exercise tolerance in smokers without airflow limitations in their PFT results (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B10">10</xref>). In addition, chest CT images also can quantitatively analyze the bronchial, airway disease, emphysema, and vascular problems in COPD patients by measuring the parameters of the bronchi and vasculature (<xref ref-type="bibr" rid="B3">3</xref>). Based on the above, chest CT images should provide more imaging information for dyspnea identification in COPD.</p>
<p>Radiomics was proposed to mine more information from medical images using advanced feature analysis in 2007 for extracting more information from medical images (<xref ref-type="bibr" rid="B11">11</xref>). However, because the lesions as the region of interest (ROI) are diffusely distributed in the lungs, radiomics in COPD develops more slowly than other lung diseases, such as lung cancer or pulmonary nodules (<xref ref-type="bibr" rid="B12">12</xref>). With the significant progress of CT imaging technology, high-resolution CT (HRCT) imaging has become an effective method for the quantitative analysis of COPD (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B12">12</xref>). However, quantitative analysis of bronchial and vascular blood flow is still limited by HRCT imaging resolution. Furthermore, it is challenging to automatically, semi-automatically, or manually segment small trachea (such as small airways) and blood vessels from chest HRCT images (<xref ref-type="bibr" rid="B13">13</xref>&#x02013;<xref ref-type="bibr" rid="B15">15</xref>). In essence, COPD results from the characteristic pathological changes of the lung region, including the peripheral airway, parenchyma, and vessels. Therefore, lung imaging features extracted based on the lung region have been used for COPD analysis (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B12">12</xref>). Therefore, it is reasonable for dyspnea identification based on lung region HRCT images and effectively avoids the limitations of challenging segmentation tasks of small airways and vessels, which is conducive to clinical application. Besides, the value of lung radiomics features extracted from lung region HRCT images in COPD assessment has also been confirmed (<xref ref-type="bibr" rid="B16">16</xref>).</p>
<p>There are potential applications of radiomics features in COPD, particularly for the diagnosis, treatment, and follow-up of COPD and future directions of radiomics features in COPD (<xref ref-type="bibr" rid="B17">17</xref>). Currently, lung radiomics features have been widely used for COPD stage classification (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B12">12</xref>), COPD survival prediction (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>), COPD presence prediction (<xref ref-type="bibr" rid="B20">20</xref>), COPD exacerbations (<xref ref-type="bibr" rid="B21">21</xref>), COPD early decision (<xref ref-type="bibr" rid="B22">22</xref>), and analysis of COPD and resting heart rate (<xref ref-type="bibr" rid="B3">3</xref>). However, radiomics features are extracted from medical images by specific calculation equations, preset types of images, and preset classes, limiting the forms of radiomics features. Convolutional neural networks (CNN) based on images for classification also rapidly developed (<xref ref-type="bibr" rid="B23">23</xref>). Features extracted from medical images based on the CNN model will compensate for the limitations of radiomics features. Therefore, deep CNN features extracted from lung region HRCT images should be paid attention to improve the classification performance for facilitating precision and/or personalized medicine.</p>
<p>Dyspnea, one of COPD&#x00027;s main symptoms, is currently assessed with the Modified British medical research council (mMRC) questionnaire (<xref ref-type="bibr" rid="B24">24</xref>). The mMRC scale, the most common validated scale to assess dyspnea for COPD patients in daily living, is used to assess the dyspnea scale (<xref ref-type="bibr" rid="B25">25</xref>). However, the mMRC lacks objectivity in identifying dyspnea. The accuracy of the mMRC depends on the understanding and cooperation attitude of the evaluator. Based on the above, previous works identified dyspnea based on physiological signals. For example, mild dyspnea is detected from pairs of speech recordings, achieving an accuracy of about 74% (<xref ref-type="bibr" rid="B26">26</xref>). Besides, respiratory Symptoms are automatically detected using a low-power multi-input CNN processor, achieving an accuracy of 87.3% on dyspnea identification (<xref ref-type="bibr" rid="B27">27</xref>). However, dyspnea identification in COPD remains lacking research, especially clinically applying for pre-clinical health management in COPD using multi-modal data.</p>
<p>Above, we summarize the advantages and disadvantages of PFT and HRCT. However, integrating the advantages of the PFT parameters, the lung radiomics features, and CNN features is crucial for dyspnea identification. Therefore, this paper proposes a multi-modal data combination strategy for dyspnea identification in COPD based on the multi-layer perceptron (MLP) classifier, providing an objective and effective model of dyspnea identification. Our contributions in this paper are briefly described as follows:</p>
<list list-type="order">
<list-item><p>We settle the problem that minor PFT parameters are easily overwhelmed by a large number of lung radiomics features and CNN features;</p></list-item>
<list-item><p>Further, inspired by CNN, we propose a combination strategy by combining the local and global features of the PFT parameters, the lung radiomics, and CNN features for improving the classification performance;</p></list-item>
<list-item><p>Last, our proposed combination strategy based on the MLP classifier achieves the best classification performance at 87.7% of accuracy, 87.7% of precision, 87.7% of recall, 87.7% of F1-score, and 89.3% of AUC, which may become an objective and effective tool for pre-clinical health management in COPD.</p></list-item>
</list>
</sec>
<sec id="s2">
<title>2. Materials and methods</title>
<p>This section details our study cohort and methodology, including the selection flow of 404 subjects, the dyspnea distribution of the subjects at different GOLD in our study cohort, and the framework of the proposed method.</p>
<sec>
<title>2.1. Materials</title>
<p>This study had approved by the ethics committee in the national clinical research center of China&#x00027;s respiratory diseases. In addition, all subjects have been provided written informed consent by the first affiliated hospital of Guangzhou medical university before chest high-resolution computed tomography (HRCT) scans, PFT, and mMRC scale inquisition.</p>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> shows the selection flow of 404 subjects aged 40&#x02013;79 in our study cohort and the dyspnea distribution of the subjects at different GOLD in our study cohort. <xref ref-type="fig" rid="F1">Figure 1A</xref> shows the selection flow of 404 subjects in our study cohort. Specifically, Chinese participants are enrolled by the national clinical research center of respiratory diseases, China, from May 25, 2009, to January 11, 2011. Four hundred sixty-eight Chinese subjects participated in the study after being strictly selected by the inclusion and exclusion criteria I (<xref ref-type="bibr" rid="B28">28</xref>). More detailed inclusion and exclusion criteria I can also be found in our previous study (<xref ref-type="bibr" rid="B22">22</xref>). First, the 468 subjects were asked to undergo PFT and chest HRCT scans (TOSHIBA, kVp:120 kV, X-ray tube current:40 mA, slice thinkness:1.0 mm) at the full inspiration state. Then, 404 subjects are strictly selected from the 468 subjects by the inclusion and exclusion criteria II. The inclusion and exclusion criteria II requires that every subject meets the following two requirements simultaneously: (1) that subject with the chest HRCT images and PFT parameter; (2) the time of the chest HRCT images, PFT parameters, and mMRC scale on the same day. Normal ordinary people always have shortness of breath during strenuous exercise. Therefore, if shortness of breath occurs only during strenuous exercise (mMRC score of 0), it is considered that there is no dyspnea. Otherwise, it is considered that the subjects suffered from dyspnea (mMRC score of 1&#x02013;4).</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>The subjects&#x00027; selection flow diagram and dyspnea distribution in this study. <bold>(A)</bold> The subjects&#x00027; selection flow diagram, including the enrollment and the inclusion and exclusion criteria I and II; <bold>(B)</bold> Dyspnea distribution of the subjects at GOLD 0-III&#x00026;IV.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0001.tif"/>
</fig>
<p>Besides, <xref ref-type="fig" rid="F1">Figure 1B</xref> shows the dyspnea distribution of the subjects at different GOLD. Our study cohort includes 254 subjects who suffered from dyspnea and 150 subjects without dyspnea. Eleven PFT parameters (OLF, 11) include diffusing capacity for carbon monoxide (DL<sub>CO</sub>, mmol/ kPa x min), FEV<sub>1</sub> (L), FEV<sub>1</sub> after (FEV<sub>1</sub>_AFT, L), FVC, FVC after (FEV<sub>1</sub>_ AFT, L), functional residual capacity (FRC, L), inspiratory capacity IC (L), FEV<sub>1</sub>/FVC (%), Carbon Monoxide Corr for Alveolar (KCO_BP), residual volume (RV, L), and total lung capacity (TLC, L), referring to the ATS/ERS standard (American Thoracic Society 2005) (<xref ref-type="bibr" rid="B29">29</xref>). Statistical information on the PFT parameters is available in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S1</xref> of <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>.</p></sec>
<sec>
<title>2.2. Methods</title>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> shows the proposed method in this study. The main idea of the proposed method in this paper is to combine PFT parameters and chest HRCT images for intelligent dyspnea identification in COPD based on machine learning (ML) classifiers.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>The framework of the proposed method. <bold>(A)</bold> The flow chart of the proposed method: (a) Lung region (ROI) segmentation; (b) Features extraction; (c) Multi-modal data combination strategy; (d) Dyspnea identification in COPD based on MLP classifier; <bold>(B)</bold> Detailed process of features extraction, including lung radiomics features and 3D CNN features.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0002.tif"/>
</fig>
<sec>
<title>2.2.1. Lung region segmentation</title>
<p><xref ref-type="fig" rid="F2">Figure 2A(a</xref>) shows that lung region mask images with red color are automatically segmented from the 404 sets of chest HRCT images using a state-of-the-art of ResU-Net (<xref ref-type="bibr" rid="B30">30</xref>). The ResU-Net trained by human chest CT images with different lung diseases is a robust and standard segmentation model of pathological lungs (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B30">30</xref>). The architecture of the ResU-Net has been described in detail in our previous paper (<xref ref-type="bibr" rid="B31">31</xref>). In addition, 404 sets of lung region mask images have been checked and modified by experienced radiologists.</p></sec>
<sec>
<title>2.2.2. Feature extraction</title>
<p><xref ref-type="fig" rid="F2">Figure 2A(b</xref>) shows that the two standard models PyRadiomics (<xref ref-type="bibr" rid="B32">32</xref>) and pre-trained Med3D (<xref ref-type="bibr" rid="B33">33</xref>) are selected to effectively and comprehensively extract the imaging features of lung region HRCT images. First, the 404 sets of the lung region HRCT images with the Hounsfield unit (HU) are obtained based on the lung region mask images and their chest HRCT images (<xref ref-type="bibr" rid="B34">34</xref>). Then, lung radiomics and 3D CNN features are separately extracted from the lung region HRCT images based on PyRadiomics and pre-trained Med3D. Finally, 1,316 original lung radiomics (OLR, 1,316) and 13,824 3D CNN features (O3C, 13,824) are obtained per subject.</p>
<p><xref ref-type="fig" rid="F2">Figure 2B</xref> details the feature extraction process of 1,316 OLR. Specifically, the lung region HRCT images with HU separately are filtered by wavelet filter and Laplacian of Gaussian filter (LoG) filter, generating the derived images. Then, the lung region HRCT images and their derived images are used to extract 1,316 OLR based on PyRadiomics. Please refer to our previous study (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B22">22</xref>) for a more detailed description of lung radiomics extraction. In addition, the PyRadiomics is available on the website (<ext-link ext-link-type="uri" xlink:href="https://pyradiomics.readthedocs.io/en/latest/index.html">https://pyradiomics.readthedocs.io/en/latest/index.html</ext-link>), and the website also has given detailed explanations of radiomics (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>Besides, <xref ref-type="fig" rid="F2">Figure 2B</xref> also details the feature extraction process of 13,824 O3C. Med3D, a heterogeneous 3D network, is to extract general medical 3D features by building a 3DSeg-8 dataset with diverse modalities, target organs, and pathologies (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B33">33</xref>). A truncated transfer learning strategy is adopted to extract the 3D CNN features based on the pre-trained Med3D. Thus, only the encoder backbone (3D ResNet10) of pre-trained Med3D needs to be transferred to generate 13,824 O3C. First, we use the same method in Med3D to crop and pre-process the lung region HRCT images (280 &#x000D7; 400 &#x000D7; N&#x00027;). Second, the cropped and pre-processed lung images generate the CNN feature maps (512 &#x000D7; 35 &#x000D7; 50 &#x000D7; 75). Third, higher-order CNN feature maps (512 &#x000D7; 3 &#x000D7; 3 &#x000D7; 3) are obtained based on the CNN feature maps (512 &#x000D7; 35 &#x000D7; 50 &#x000D7; 75) by 3D average pooling. Finally, the higher-order CNN feature maps (512 &#x000D7; 3 &#x000D7; 3 &#x000D7; 3) per subject are flattened into 13,824 O3C (512 &#x000D7; 3 &#x000D7; 3 &#x000D7; 3 = 13,824).</p>
<p>Before generating the CNN feature maps (512 &#x000D7; 35 &#x000D7; 50 &#x000D7; 75), the cropped and pre-processed lung images need to normalize the lung region and generate random values outside the lung region in accord with Gaussian distribution. Specifically, the mathematical expression of normalization is given by Eq. (1).</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mrow><mml:msup><mml:mi>x</mml:mi><mml:mo>&#x02032;</mml:mo></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mover accent='true'><mml:mi>x</mml:mi><mml:mo>&#x000AF;</mml:mo></mml:mover></mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mfrac></mml:mrow></mml:math></disp-formula>
<p>where <italic>x</italic>, <inline-formula><mml:math id="M2"><mml:mover accent="false" class="mml-overline"><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo accent="true">&#x000AF;</mml:mo></mml:mover></mml:math></inline-formula>, and &#x003C3; are the HU value, mean, and mean square deviation of cropped and pre-processed lung images, respectively; <italic>x</italic>&#x02032; is the normalized value of cropped and pre-processed lung images.</p></sec>
<sec>
<title>2.2.3. Multi-modal data combination strategy</title>
<p><xref ref-type="fig" rid="F2">Figure 2A(c</xref>) details the process of the proposed multi-modal data combination strategy in this paper. Inspired by CNN, a combination strategy by combining the local and global features of the PFT parameters, lung radiomics, and 3D CNN features is proposed for improving the classification performance.</p>
<p>The local and global features of 11 OLF, 1,316 OLR, and 13,824 O3C are available in <xref ref-type="supplementary-material" rid="SM1">Supplementary Tables S2</xref>&#x02013;<xref ref-type="supplementary-material" rid="SM1">7</xref> of <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>.</p>
<p>First, the local features are respectively selected from 11 OLF, 1,316 OLR, and 13,824 O3C by the least absolute shrinkage and selection operator (Lasso) algorithm (<xref ref-type="bibr" rid="B35">35</xref>), which has been proved to improve classification performance (<xref ref-type="bibr" rid="B3">3</xref>). A standard python package LassoCV (definition in Python 3.6), with 10 fold cross-validation, is performed in this paper. Subsequently, the local features are selected, including five selected PFT parameters (SLF, 5), 28 selected lung radiomics features (SLR, 28), and 22 selected 3D CNN features (S3C, 22). Second, global features of 11 OLF, 1,316 OLR, and 13,824 O3C are respectively fused by the principal component analysis (PCA) algorithm (a classic algorithm for reducing the number of dimensions) with a general 95% contribution (<xref ref-type="bibr" rid="B36">36</xref>). A standard python package sklearn.decomposition.PCA(svd_solver=&#x00027;auto&#x00027;) (definition in Python 3.6) is performed in this paper. Subsequently, the global features are fused, including two fused PFT parameters (FLF, 2), six fused lung radiomics features (FLR, 6), and 34 fused 3D CNN features (F3C, 34). Finally, all the local and global features (SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C, 5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34) are combined as the variables for dyspnea identification in COPD.</p>
<p>The mathematical expression of the Lasso algorithm (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B35">35</xref>) is given by Equation (2),</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M3"><mml:mrow><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>g</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mtext>i</mml:mtext></mml:msub><mml:mo>&#x02212;</mml:mo><mml:msub><mml:mi>&#x003B2;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>&#x02212;</mml:mo><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>p</mml:mi></mml:munderover><mml:mrow><mml:msub><mml:mi>&#x003B2;</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:mo>+</mml:mo><mml:mi>&#x003BB;</mml:mi><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mi>p</mml:mi></mml:munderover><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:msub><mml:mi>&#x003B2;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>where <italic>x</italic><sub><italic>ij</italic></sub> is the value of the independent variable (OLF: 404 &#x000D7; 11 subjects, OLR: 404 &#x000D7; 1,316 subjects, or O3C: 404 &#x000D7; 13,824 subjects) after a normalization operation (Min-max normalization) (<xref ref-type="bibr" rid="B3">3</xref>). <italic>y</italic><sub><italic>i</italic></sub> is the value of the dependent variable (subjects with dyspnea or without dyspnea), &#x003BB; is the penalty parameter (&#x003BB; &#x02265;0), &#x003B2;<sub><italic>j</italic></sub> is the regression coefficient, <italic>i</italic>&#x02208;[1, n], and <italic>j</italic>&#x02208;[0, <italic>p</italic>].</p>
<p>The detailed fused process of the PCA with the singular value decomposition (SVD) algorithm (<xref ref-type="bibr" rid="B36">36</xref>) is introduced in this paper. First, a feature matrix <italic>A</italic><sub><italic>m</italic>&#x000D7;<italic>n</italic></sub> <sub>&#x0003D;</sub> (<italic>a</italic><sub>1</sub>,<italic>a</italic>,<italic>a</italic><sub>3</sub>,&#x02026;,<italic>a</italic><sub><italic>n</italic></sub>) is constructed by 404 subjects with their features (OLF: 404 &#x000D7; 11 subjects, OLR: 404 &#x000D7; 1,316 subjects, or O3C: 404 &#x000D7; 13,824 subjects). Second, the eigenvalues of the feature matrix <italic>A</italic><sub><italic>m</italic>&#x000D7;<italic>n</italic></sub> are obtained by the SVD algorithm (Eq. (3)-(4)). Third, Normalize the eigenvalues, rank the normalized eigenvalues in the order of large size, and determine the corresponding eigenvalues (&#x003BB;<sub>1</sub>,&#x003BB;<sub>2</sub>,&#x003BB;<sub>3</sub>,&#x02026;,&#x003BB;<sub><italic>k</italic></sub>) with their 95% accumulation. Then, the eigenvectors are calculated based on the corresponding eigenvalues (&#x003BB;<sub><sub>1</sub>&#x02192;</sub>&#x003BE;<sub>1</sub>,&#x003BB;<sub><sub>2</sub>&#x02192;</sub>&#x003BE;<sub>2</sub>,&#x003BB;<sub><sub>3</sub>&#x02192;</sub>&#x003BE;<sub>3</sub>,&#x02026;,&#x003BB;<sub><sub><italic>k</italic></sub>&#x02192;</sub>&#x003BE;<sub><italic>k</italic></sub>) used to construct the transformation matrix <italic>P</italic><sub><italic>k</italic>&#x000D7;<italic>n</italic></sub> <sub>&#x0003D;</sub> (&#x003BE;<sub>1</sub>, &#x003BE;<sub>2</sub>, &#x003BE;<sub>3</sub>,&#x02026;, &#x003BE;<sub><italic>k</italic></sub>) <sub><italic>k</italic>&#x000D7;<italic>n</italic></sub>. Last, the fused features <italic>B</italic><sub><italic>m</italic>&#x000D7;<italic>k</italic></sub> are obtained based on the feature matrix <italic>A</italic><sub><italic>m</italic>&#x000D7;<italic>n</italic></sub> and the transformation matrix <italic>P</italic><sub><italic>k</italic>&#x000D7;<italic>n</italic></sub> using Equation (4).</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M4"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:msup><mml:mi>A</mml:mi><mml:mi>T</mml:mi></mml:msup><mml:mi>A</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>U</mml:mi><mml:mtext>&#x003A3;</mml:mtext><mml:msup><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:msup></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup><mml:mi>U</mml:mi><mml:mtext>&#x003A3;</mml:mtext><mml:msup><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:msup><mml:mo>=</mml:mo><mml:mi>V</mml:mi><mml:msup><mml:mtext>&#x003A3;</mml:mtext><mml:mi>T</mml:mi></mml:msup><mml:msup><mml:mi>U</mml:mi><mml:mi>T</mml:mi></mml:msup><mml:mi>U</mml:mi><mml:mtext>&#x003A3;</mml:mtext><mml:msup><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:msup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mo>=</mml:mo><mml:mi>V</mml:mi><mml:msup><mml:mtext>&#x003A3;</mml:mtext><mml:mi>T</mml:mi></mml:msup><mml:mtext>&#x003A3;</mml:mtext><mml:msup><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:msup><mml:mo>=</mml:mo><mml:mi>V</mml:mi><mml:msup><mml:mtext>&#x003A3;</mml:mtext><mml:mn>2</mml:mn></mml:msup><mml:msup><mml:mi>V</mml:mi><mml:mi>T</mml:mi></mml:msup></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E5"><label>(4)</label><mml:math id="M5"><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msqrt><mml:mrow><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msqrt><mml:mo>,</mml:mo><mml:msqrt><mml:mrow><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:msqrt><mml:mo>,</mml:mo><mml:msqrt><mml:mrow><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mn>3</mml:mn></mml:msub></mml:mrow></mml:msqrt><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msqrt><mml:mrow><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:msqrt></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula>
<disp-formula id="E7"><label>(5)</label><mml:math id="M6"><mml:mrow><mml:mtable columnalign='left'><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:msub><mml:mi>B</mml:mi><mml:mi>m</mml:mi></mml:msub><mml:mo>&#x000D7;</mml:mo><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mn>......</mml:mn><mml:msub><mml:mi>b</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msubsup><mml:mi>P</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mi>T</mml:mi></mml:msubsup></mml:mrow></mml:mtd></mml:mtr><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>a</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>a</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mn>....</mml:mn><mml:msub><mml:mi>a</mml:mi><mml:mi>n</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>m</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:mtd></mml:mtr><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:mo>&#x000A0;</mml:mo><mml:msubsup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>&#x003BE;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BE;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BE;</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mn>.....</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>&#x003BE;</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mi>T</mml:mi></mml:msubsup></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>
<p>where <italic>U</italic> <sub>m&#x000D7;m</sub> and <italic>V</italic> <sub>n&#x000D7;n</sub> are the orthogonal matrices, &#x02211;<sub><italic>m</italic>&#x000D7;<italic>n</italic></sub> = (&#x003C3;<sub>1</sub>, &#x003C3;<sub>2</sub>, &#x003C3;<sub>3</sub>,&#x02026;, &#x003C3;<sub><italic>k</italic></sub>) is the diagonal matrix, and &#x003C3;<sub><italic>i</italic></sub> is the i<sup>th</sup> eigenvalues of the matrix <italic>A</italic><sup><italic>T</italic></sup><italic>A</italic>.</p></sec>
<sec>
<title>2.2.4. Dyspnea identification in COPD</title>
<p>Early MLP classifier is a linear model, which can only handle simple binary classification and is difficult to analyze complex non-linear problems (<xref ref-type="bibr" rid="B37">37</xref>). However, its non-linear expression ability has been effectively improved by introducing hidden layers and activation functions. Currently, the MLP classifier is widely used in machine learning, pattern recognition, and other fields (<xref ref-type="bibr" rid="B38">38</xref>&#x02013;<xref ref-type="bibr" rid="B41">41</xref>).</p>
<p><xref ref-type="fig" rid="F2">Figure 2A(c</xref>) shows that the MLP classifier based on all the local and global features is used to identify dyspnea in COPD. A standard python package sklearn.neural_network. MLP classifier (definition in Python 3.6) is performed to identify dyspnea. The parameters in the package MLP classifier are set: hidden_layer_sizes=(256,128,64), activation=&#x00027;tanh&#x00027;, solver=&#x00027;adam&#x00027;, alpha=0.0001, tol=0.0005, and max_iter=1000, respectively.</p></sec>
<sec>
<title>2.2.5. Experiments</title>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> shows the experimental design in this paper. Our experiment includes four experiments (Experiments 1&#x02013;4) to verify the effectiveness of our proposed method. Previous studies used six classical machine learning (ML) classifiers to complete the COPD classification task (<xref ref-type="bibr" rid="B3">3</xref>). The six classical ML classifiers include MLP, support vector machine (SVM) (<xref ref-type="bibr" rid="B42">42</xref>), random forest (RF) (<xref ref-type="bibr" rid="B43">43</xref>), decision tree (DT) (<xref ref-type="bibr" rid="B44">44</xref>), gradient boosting (GB) (<xref ref-type="bibr" rid="B45">45</xref>), and linear discriminant analysis (LDA) (<xref ref-type="bibr" rid="B46">46</xref>). Based on the six above ML classifiers, K-nearest neighbor (KNN) (<xref ref-type="bibr" rid="B47">47</xref>) and logistic regression (LR) (<xref ref-type="bibr" rid="B48">48</xref>) are further considered to compare the performance of dyspnea recognition models further. Therefore, eight classical ML classifiers, including MLP, SVM, RF, KNN, DT, GB, LDA, and LR, are adopted to identify dyspnea in COPD based on different features. The definitions and parameters of the eight classifiers are available in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S8</xref> of <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Experimental design in this paper.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0003.tif"/>
</fig>
<p>First, the 404 subjects in our study cohort are divided into the training set (<italic>n</italic> = 323) and the test set (<italic>n</italic> = 81). Specifically, 113 subjects suffered from dyspnea, and 210 subjects were without dyspnea in the training set. The 27 subjects suffered from dyspnea, and 54 subjects were without dyspnea in the test set. Then, the standard python packages of eight ML classifiers (definition in Python 3.6) are trained based on the training set, respectively. Last, the trained models are separately used to identify dyspnea based on the test set, giving the evaluation metrics of the classification performance. Specifically, the evaluation metrics of the classification performance include accuracy, precision, recall, F1-score, and area under the curve of AUC.</p>
<p>In Experiment 1, the classification performances are obtained based on the eight classical ML classifiers with OLF (11), OLR (1,316), O3C (13,824), and their arbitrary combination, respectively. In Experiment 2, the classification performances are obtained based on the above classifiers with SLF (5), SLR (28), S3C (22), and their arbitrary combination, respectively. Similarly, in Experiment 3, the classification performances are obtained based on the above ML classifiers with FLF (2), FLR (6), F3C (34), and their arbitrary combination, respectively. Finally, in Experiment 4, the classification performances are obtained based on the above classifiers with SLF &#x0002B; FLF (5 &#x0002B; 2), SLR &#x0002B; FLR (28 &#x0002B; 6), S3C &#x0002B; F3C (22 &#x0002B; 34), SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR (5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6), SLF &#x0002B; FLF &#x0002B; S3C &#x0002B; F3C (5 &#x0002B; 2 &#x0002B; 22 &#x0002B; 34), SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34), and SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (selected ALL &#x0002B; fused ALL, 5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34), respectively.</p></sec></sec></sec>
<sec id="s3">
<title>3. Results</title>
<p>This section reports the experimental results of the eight classical ML classifiers with different features. Specifically, <xref ref-type="table" rid="T1">Tables 1</xref>&#x02013;<bold>7</bold> reports the experimental results of evaluation metrics. <xref ref-type="fig" rid="F4">Figures 4</xref>&#x02013;<bold>7</bold> visually shows these evaluation metrics, the mean value of evaluation metrics, and the receiver operating characteristic curve (ROC). In addition, the evaluation metric AUC in <xref ref-type="table" rid="T1">Tables 1</xref>&#x02013;<bold>7</bold> is calculated from their ROCs.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Evaluation metrics of the different classifiers with three original features (Experiment 1) on the test set.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold>Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">80.2<sup>i</sup>/77.8<sup>ii</sup>/74.1<sup>iii</sup></td>
<td valign="top" align="center">79.8/79.2/73.2</td>
<td valign="top" align="center">80.2/77.8/74.1</td>
<td valign="top" align="center">79.8/75.2/72.0</td>
<td valign="top" align="center">83.7/82.0/78.6</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">76.5/66.7/65.4</td>
<td valign="top" align="center">63.4/67.6/66.1</td>
<td valign="top" align="center">66.7/66.7/65.4</td>
<td valign="top" align="center">63.2/66.7/65.7</td>
<td valign="top" align="center">76.5/76.8/75.3</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">71.6/72.8/66.7</td>
<td valign="top" align="center">70.9/80.7/62.0</td>
<td valign="top" align="center">71.6/72.8/66.7</td>
<td valign="top" align="center">71.2/65.3/58.8</td>
<td valign="top" align="center">77.5/76.6/74.9</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">65.4/63.0/54.3</td>
<td valign="top" align="center">70.2/65.2/63.7</td>
<td valign="top" align="center">65.4/63.0/54.3</td>
<td valign="top" align="center">66.4/63.7/55.2</td>
<td valign="top" align="center">69.5/68.7/64.6</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">66.7/67.9/72.8</td>
<td valign="top" align="center">67.6/67.9/72.4</td>
<td valign="top" align="center">66.7/67.9/72.8</td>
<td valign="top" align="center">66.7/67.9/72.6</td>
<td valign="top" align="center">66.7/67.9/72.8</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">70.4/72.8/71.6</td>
<td valign="top" align="center">70.4/72.8/71.2</td>
<td valign="top" align="center">70.4/72.8/71.6</td>
<td valign="top" align="center">70.4/68.9/67.0</td>
<td valign="top" align="center">79.0/78.1/75.5</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">76.5/58.0/61.7</td>
<td valign="top" align="center">75.8/59.6/60.0</td>
<td valign="top" align="center">76.5/58.0/61.7</td>
<td valign="top" align="center">75.9/58.7/60.6</td>
<td valign="top" align="center">83.7/61.2/71.0</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">77.8/66.7/67.9</td>
<td valign="top" align="center">77.1/65.3/64.8</td>
<td valign="top" align="center">77.8/66.7/67.9</td>
<td valign="top" align="center">77.0/65.7/63.2</td>
<td valign="top" align="center">84.0/71.5/76.5</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">71.9/68.2/66.8</td>
<td valign="top" align="center">71.9/69.8/66.7</td>
<td valign="top" align="center">71.9/68.2/66.8</td>
<td valign="top" align="center">71.3/66.6/64.4</td>
<td valign="top" align="center">77.6/72.9/73.7</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii: OLF (11)/ OLR (1,316) / O3C (13,824).</p>
</table-wrap-foot>
</table-wrap>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>The evaluation metrics pictures and ROCs of eight classifiers with seven original classification features (three original features and four original combination features in Experiment 1). <bold>(A)</bold> The evaluation metrics pictures include <bold>(a)</bold> Accuracy, <bold>(b)</bold> Precision, <bold>(c)</bold> Recall, <bold>(d)</bold> F1-score, <bold>(e)</bold> AUC, and <bold>(f)</bold> Mean. <bold>(B)</bold> ROCs of the ML classifiers include <bold>(a)</bold> OLF (11), <bold>(b)</bold> OLR (1,316), <bold>(c)</bold> O3C (13,824), <bold>(d)</bold> OLF &#x0002B; OLR (11 &#x0002B; 1,316), <bold>(e)</bold> OLF &#x0002B; O3C (11 &#x0002B; 13,824), <bold>(f)</bold> OLR &#x0002B; O3C (1,316 &#x0002B; 13,824), <bold>(g)</bold> OLF &#x0002B; OLR &#x0002B; O3C (11 &#x0002B; 1,316 &#x0002B; 13,824).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0004.tif"/>
</fig>
<sec>
<title>3.1. The classification performance of original features and their combination features</title>
<p><xref ref-type="table" rid="T1">Tables 1</xref>, <xref ref-type="table" rid="T2">2</xref> reports the experimental results of three original features and their combination features based on the eight classical ML classifiers in Experiment 1. Specifically, three original features include OLF (11), OLR (1,316), and O3C (13,824), and their combination features include OLF &#x0002B; OLR (11 &#x0002B; 1,316), OLF &#x0002B; O3C (11 &#x0002B; 13,824), OLR &#x0002B; O3C (1,316 &#x0002B; 13,824), and OLF &#x0002B; OLR &#x0002B; O3C (original ALL, 11 &#x0002B; 1,316 &#x0002B; 13,824).</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Evaluation metrics of the different classifiers with four original combination features (Experiment 1) on the test set.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">79.0<sup>i</sup>/76.5<sup>ii</sup>/77.8<sup>iii</sup>/80.2<sup>iv</sup></td>
<td valign="top" align="center">78.8/75.9/77.1/80.1</td>
<td valign="top" align="center">79.0/76.5/77.8/80.2</td>
<td valign="top" align="center">77.7/75.1/77.0/79.2</td>
<td valign="top" align="center">85.0/79.3/80.9/83.4</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">72.8/65.4/65.4/65.4</td>
<td valign="top" align="center">76.3/66.1/55.6/55.6</td>
<td valign="top" align="center">72.8/65.4/65.4/65.4</td>
<td valign="top" align="center">67.0/65.7/54.7/54.7</td>
<td valign="top" align="center">77.9/77.5/73.4/73.2</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">74.1/67.9/66.7/67.9</td>
<td valign="top" align="center">77.6/65.6/62.3/67.5</td>
<td valign="top" align="center">74.1/67.9/66.7/67.9</td>
<td valign="top" align="center">69.0/59.6/60.1/58.0</td>
<td valign="top" align="center">80.2/73.8/76.9/76.6</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">63.0/65.4/63.0/60.5</td>
<td valign="top" align="center">64.4/68.4/64.4/66.3</td>
<td valign="top" align="center">63.0/65.4/63.0/60.5</td>
<td valign="top" align="center">63.5/66.3/63.5/61.6</td>
<td valign="top" align="center">75.2/71.3/69.1/70.4</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">65.4/74.1/67.9/74.1</td>
<td valign="top" align="center">66.1/73.8/66.3/74.3</td>
<td valign="top" align="center">65.4/74.1/67.9/74.1</td>
<td valign="top" align="center">65.7/73.9/66.7/74.2</td>
<td valign="top" align="center">65.4/74.1/67.9/74.1</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">75.3/69.1/67.9/74.1</td>
<td valign="top" align="center">78.7/62.6/64.8/73.6</td>
<td valign="top" align="center">75.3/64.2/67.9/74.1</td>
<td valign="top" align="center">70.9/63.2/63.2/71.4</td>
<td valign="top" align="center">82.0/79.3/77.3/79.0</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">55.6/64.2/67.9/65.4</td>
<td valign="top" align="center">58.8/62.6/65.6/61.5</td>
<td valign="top" align="center">55.6/64.2/67.9/65.4</td>
<td valign="top" align="center">56.6/63.2/65.6/61.4</td>
<td valign="top" align="center">57.9/71.4/73.8/70.8</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">70.4/67.9/67.9/63.0</td>
<td valign="top" align="center">69.0/65.6/66.3/67.8</td>
<td valign="top" align="center">70.4/67.9/67.9/63.0</td>
<td valign="top" align="center">69.3/65.6/66.7/64.0</td>
<td valign="top" align="center">74.4/79.7/74.1/66.7</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">69.5/68.8/68.1/68.8</td>
<td valign="top" align="center">71.2/67.6/65.3/68.3</td>
<td valign="top" align="center">69.4/68.2/68.1/68.8</td>
<td valign="top" align="center">67.5/66.6/64.7/65.6</td>
<td valign="top" align="center">74.8/75.8/74.2/74.3</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii/iv: OLF &#x0002B; OLR (11 &#x0002B; 1,316)/ OLF &#x0002B; O3C (11 &#x0002B; 13,824)/ OLR &#x0002B; O3C (1,316 &#x0002B; 13,824)/ OLF &#x0002B; OLR &#x0002B; O3C (11 &#x0002B; 1,316 &#x0002B; 13,824).</p>
</table-wrap-foot>
</table-wrap>
<p><xref ref-type="table" rid="T1">Tables 1</xref>, <xref ref-type="table" rid="T2">2</xref> and <xref ref-type="fig" rid="F4">Figures 4A(a&#x02013;e</xref>) show that the MLP classifier performs better than other classifiers. <xref ref-type="fig" rid="F4">Figure 4B</xref> shows the ROCs of the three single original features and their combination features based on the eight classical ML classifiers. Furthermore, the classification performance of the MLP classifier with OLF (11) is the best of the three original features, achieving 80.2% of accuracy, 79.8% of precision, 80.2% of recall, 79.8% of F1-scorel, and 83.7% of AUC. The classification performance of the MLP classifier with OLR is better than that of O3C, achieving 77.8% of accuracy, 79.2% of precision, 77.8% of recall, 75.2% of F1-scorel, and 82.0% of AUC. However, all original combination features have not improved the classification performance compared with single OLF (11). Specifically, the classification performance of the MLP classifier with OLF &#x0002B; OLR (11 &#x0002B; 1,316) performs best at AUC, achieving 85.0%. Other evaluation metrics of OLF &#x0002B; OLR (11 &#x0002B; 1,316) based on the MLP classifier are 79.0% of accuracy, 78.8% of precision, 79.0% of recall, and 77.7% of F1-scorel. Except for OLF &#x0002B; OLR (11 &#x0002B; 1,316), the MLP classifier with the original ALL (11 &#x0002B; 1,316 &#x0002B; 13,824) performs better than other original combination features at AUC, achieving 83.4%. Other evaluation metrics of original ALL (11 &#x0002B; 1,316 &#x0002B; 13,824) based on the MLP classifier are 80.2% of accuracy, 80.1% of precision, 80.2% of recall, and 79.2% of F1-scorel. <xref ref-type="fig" rid="F4">Figure 4A(f</xref>) shows the mean evaluation metrics of all classifiers in Experiment 1, and the mean evaluation metrics of single original features OLF (11) are best.</p></sec>
<sec>
<title>3.2. The classification performance of selected features and their combination features</title>
<p><xref ref-type="table" rid="T3">Tables 3</xref>, <xref ref-type="table" rid="T4">4</xref> reports the experimental results of three selected features and their combination features based on the eight classical ML classifiers in Experiment 2. Specifically, three selected features include SLF (5), SLR (28), and S3C (22), and their combination features include SLF &#x0002B; SLR (5 &#x0002B; 28), SLF &#x0002B; S3C (5 &#x0002B; 22), SLR &#x0002B; S3C (28 &#x0002B; 22), and SLF &#x0002B; SLR &#x0002B; S3C (selected ALL, 5 &#x0002B; 28 &#x0002B; 22).</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Evaluation metrics of the different classifiers with three selected features (Experiment 2) on the test set.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">80.2<sup>i</sup>/80.2<sup>ii</sup>/77.8<sup>iii</sup></td>
<td valign="top" align="center">79.8/80.1/77.1</td>
<td valign="top" align="center">80.2/80.2/77.8</td>
<td valign="top" align="center">79.8/79.2/77.0</td>
<td valign="top" align="center">83.7/84.0/80.5</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">75.3/77.8/67.9</td>
<td valign="top" align="center">74.6/80.8/64.8</td>
<td valign="top" align="center">75.3/77.8/67.9</td>
<td valign="top" align="center">73.5/74.5/62.2</td>
<td valign="top" align="center">81.7/82.5/73.9</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">74.1/79.0/70.4</td>
<td valign="top" align="center">73.5/81.8/68.8</td>
<td valign="top" align="center">74.1/79.0/70.4</td>
<td valign="top" align="center">73.7/76.3/66.0</td>
<td valign="top" align="center">81.8/81.4/73.2</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">69.1/67.9/65.4</td>
<td valign="top" align="center">68.9/70.8/70.2</td>
<td valign="top" align="center">69.1/67.9/65.4</td>
<td valign="top" align="center">69.0/68.7/66.4</td>
<td valign="top" align="center">79.9/75.3/77.9</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">72.8/67.9/63.0</td>
<td valign="top" align="center">72.8/65.9/57.9</td>
<td valign="top" align="center">72.8/67.9/63.0</td>
<td valign="top" align="center">72.8/66.2/58.6</td>
<td valign="top" align="center">79.4/67.9/66.1</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">74.1/79.0/71.6</td>
<td valign="top" align="center">74.9/80.3/70.1</td>
<td valign="top" align="center">74.1/79.0/71.6</td>
<td valign="top" align="center">74.4/76.8/69.3</td>
<td valign="top" align="center">82.9/84.0/74.1</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">76.5/72.8/67.9</td>
<td valign="top" align="center">75.8/71.7/66.8</td>
<td valign="top" align="center">76.5/72.8/67.9</td>
<td valign="top" align="center">75.9/71.9/67.2</td>
<td valign="top" align="center">82.4/83.4/80.2</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">77.8/75.3/69.1</td>
<td valign="top" align="center">77.8/74.4/67.9</td>
<td valign="top" align="center">77.8/75.3/69.1</td>
<td valign="top" align="center">76.6/74.4/68.2</td>
<td valign="top" align="center">82.7/83.5/80.7</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">75.0/75.0/69.1</td>
<td valign="top" align="center">74.8/75.7/68.0</td>
<td valign="top" align="center">75.0/75.0/69.1</td>
<td valign="top" align="center">74.5/73.5/66.9</td>
<td valign="top" align="center">81.8/80.3/75.8</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii: SLF (5)/ SLR (28)/ S3C (22).</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Evaluation metrics of the different classifiers with four selected combination features (Experiment 2) on the test set.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">82.7<sup>i</sup>/80.2<sup>ii</sup>/85.2<sup>iii</sup>/85.2<sup>iv</sup></td>
<td valign="top" align="center">82.5/80.2/85.2/85.0</td>
<td valign="top" align="center">82.7/80.2/85.2/85.2</td>
<td valign="top" align="center">82.1/80.2/84.6/85.0</td>
<td valign="top" align="center">87.7/85.2/85.6/87.9</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">77.8/69.1/71.6/72.8</td>
<td valign="top" align="center">79.2/66.9/70.5/72.1</td>
<td valign="top" align="center">77.8/69.1/71.6/72.8</td>
<td valign="top" align="center">75.2/64.1/67.9/69.6</td>
<td valign="top" align="center">86.6/77.9/82.0/85.5</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">80.2/77.8/79.0/81.5</td>
<td valign="top" align="center">84.8/77.2/80.3/83.7</td>
<td valign="top" align="center">80.2/77.8/79.0/81.5</td>
<td valign="top" align="center">77.4/76.6/76.8/79.5</td>
<td valign="top" align="center">85.4/82.0/82.2/84.7</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">75.3/66.7/67.9/76.5</td>
<td valign="top" align="center">78.0/72.0/69.2/75.9</td>
<td valign="top" align="center">75.3/66.7/67.9/76.5</td>
<td valign="top" align="center">75.9/67.6/68.4/75.1</td>
<td valign="top" align="center">83.0/80.3/76.6/79.1</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">72.8/70.4/67.9/70.4</td>
<td valign="top" align="center">71.7/69.0/65.6/68.8</td>
<td valign="top" align="center">72.8/70.4/67.9/70.4</td>
<td valign="top" align="center">71.9/69.3/65.6/68.8</td>
<td valign="top" align="center">72.8/70.4/67.9/70.4</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">84.0/79.0/77.8/81.5</td>
<td valign="top" align="center">84.1/78.8/77.6/81.3</td>
<td valign="top" align="center">84.0/79.0/77.8/81.5</td>
<td valign="top" align="center">83.2/77.7/76.2/80.7</td>
<td valign="top" align="center">86.2/80.5/82.0/83.5</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">74.1/70.4/72.8/74.1</td>
<td valign="top" align="center">73.0/69.9/71.7/73.2</td>
<td valign="top" align="center">74.1/70.4/72.8/74.1</td>
<td valign="top" align="center">72.9/70.1/71.9/73.3</td>
<td valign="top" align="center">85.0/83.2/84.1/85.0</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">72.8/69.1/80.2/80.2</td>
<td valign="top" align="center">71.7/68.3/79.8/79.8</td>
<td valign="top" align="center">72.8/69.1/80.2/80.2</td>
<td valign="top" align="center">71.9/68.6/79.8/79.8</td>
<td valign="top" align="center">85.4/82.7/84.5/86.4</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">77.5/72.8/75.3/77.8</td>
<td valign="top" align="center">78.1/72.8/75.0/77.5</td>
<td valign="top" align="center">77.5/72.8/75.3/77.8</td>
<td valign="top" align="center">76.3/72.2/73.9/76.5</td>
<td valign="top" align="center">84.0/80.2/80.6/82.8</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii/iv: SLF &#x0002B; SLR (5 &#x0002B; 28)/ SLF &#x0002B; S3C (5 &#x0002B; 22)/ SLR &#x0002B; S3C (28 &#x0002B; 22)/ SLF &#x0002B; SLR &#x0002B; S3C (5 &#x0002B; 28 &#x0002B; 22).</p>
</table-wrap-foot>
</table-wrap>
<p><xref ref-type="table" rid="T3">Table 3</xref> and <xref ref-type="fig" rid="F5">Figures 5A(a&#x02013;e</xref>) show that the MLP classifier with three single-selected features performs better than other classifiers in Experiment 2. <xref ref-type="fig" rid="F5">Figure 5B</xref> shows the ROCs of the three single-selected features and their combination features based on the eight classical ML classifiers. Furthermore, the MLP classifier with SLR (5) performs best, achieving 80.2% of accuracy, 80.1% of precision, 80.2% of recall, 79.2% of F1-score, and 84.0% of AUC. Compared with the classification performance of OLF (11) based on the MLP classifier, that of SLF (5) remains unchanged. However, the MLP classifier with SLR (28) and S3C (22) separately performs better than that with OLR (1,316) and O3C (13,824). Specifically, the classification performance of SLR (28) has improved by 2.4% of accuracy, 0.9% of precision, 2.4% of recall, 4.0% of F1-scorel, and 2.0% of AUC. On the other hand, the classification performance with S3C (22) has improved by 3.7% of accuracy, 3.9% of precision, 3.7% of recall, 5.0% of F1-scorel, and 1.9% of AUC.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>The evaluation metrics pictures and ROCs of eight classifiers with seven selected features (three selected features and four selected combination features in Experiment 2). <bold>(A)</bold> The evaluation metrics pictures include <bold>(a)</bold> Accuracy, <bold>(b)</bold> Precision, <bold>(c)</bold> Recall, <bold>(d)</bold> F1-score, <bold>(e)</bold> AUC, and <bold>(f)</bold> Mean. <bold>(B)</bold> ROCs of the ML classifiers include <bold>(a)</bold> SLF (5), <bold>(b)</bold> SLR (28), <bold>(c)</bold> S3C (22), <bold>(d)</bold> SLF &#x0002B; SLR (5 &#x0002B; 28), <bold>(e)</bold> SLF &#x0002B; S3C (5 &#x0002B; 22), <bold>(f)</bold> SLR &#x0002B; S3C (28 &#x0002B; 22), and <bold>(g)</bold> SLF &#x0002B; SLR &#x0002B; S3C (5 &#x0002B; 28 &#x0002B; 22).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0005.tif"/>
</fig>
<p><xref ref-type="table" rid="T4">Table 4</xref> and <xref ref-type="fig" rid="F5">Figures 5A(a&#x02013;e</xref>) show that the MLP classifier with selected combination features SLF &#x0002B; SLR &#x0002B; S3C (5 &#x0002B; 28 &#x0002B; 22) performs best. Specifically, the SLF &#x0002B; SLR &#x0002B; S3C (selected ALL, 5 &#x0002B; 28 &#x0002B; 22) based on the MLP classifier performs best, achieving 85.2% of accuracy, 85.0% of precision, 85.2% of recall, 85.0% of F1-scorel, and 87.9% of AUC. Compared with the classification performance of the single-selected features based on the MLP classifier, the selected combination features based on the MLP classifier performs better. Specifically, compared with the best classification performance of the single-selected features SLR (28) based on the MLP classifier, that of the selected combination features SLF &#x0002B; SLR &#x0002B; S3C (selected ALL, 5&#x0002B;28&#x0002B;22) has improved by 5.0% of accuracy, 4.9% of precision, 5.0% of recall, 5.8% of F1-scorel, and 3.9% of AUC. Compared with the classification performance of the original combination features based on the MLP classifier shown in <xref ref-type="table" rid="T2">Table 2</xref>, that of the selected combination features based on the MLP classifier has been improved, as shown in <xref ref-type="table" rid="T4">Table 4</xref>. Specifically, compared with the classification performance of OLF &#x0002B; OLR (11&#x0002B;1,316) based on the MLP classifier, that of SLF &#x0002B; SLR (5&#x0002B;28) based on the MLP classifier has improved by 3.7% of accuracy, 3.7% of precision, 3.7% of recall, 4.4% of F1-scorel, and 2.7% of AUC. Compared with the classification performance of OLF &#x0002B; O3C (11 &#x0002B; 13,824) based on the MLP classifier, that of SLF &#x0002B; S3C (5 &#x0002B; 22) based on the MLP classifier has improved by 3.7% of accuracy, 4.3% of precision, 3.7% of recall, 5.1% of F1-scorel, and 5.9% of AUC. Compared with the classification performance of OLR &#x0002B; O3C (1,316 &#x0002B; 13,824) based on the MLP classifier, that of SLR &#x0002B; S3C (28 &#x0002B; 22) based on the MLP classifier has improved by 7.4% of accuracy, 8.1% of precision, 7.4% of recall, 7.6% of F1-scorel, and 4.7% of AUC. Compared with the classification performance of OLR &#x0002B; O3C (1,316 &#x0002B; 13,824) based on the MLP classifier, that of SLR &#x0002B; S3C (28 &#x0002B; 22) based on the MLP classifier has improved by 5.0% of accuracy, 4.9% of precision, 5.0% of recall, 5.8% of F1-scorel, and 4.5% of AUC.</p>
<p><xref ref-type="fig" rid="F5">Figure 5A(f</xref>) shows the mean evaluation metrics of all classifiers in Experiment 2. The mean evaluation metrics of ML classifiers based on selected combination features SLF &#x0002B; SLR (5 &#x0002B; 28) and selected ALL (5 &#x0002B; 28 &#x0002B; 22) perform better than the single-selected features. In addition, compared with the mean evaluation metrics of ML classifiers based on the original features and their combination features, that of ML classifiers based on the selected features and their combination features has been improved. Specifically, compared with the best mean evaluation metrics of OLF (11) (71.9% of mean accuracy, 71.9% of mean precision, 71.9% of mean recall, 71.3% of mean F1-score, and 77.6% of mean AUC) in <xref ref-type="fig" rid="F4">Figure 4A(f</xref>), that of SLF &#x0002B; SLR (5 &#x0002B; 28) has improved by 5.6% of accuracy, 6.2% of precision, 5.6% of recall, 5.0% of F1-score, and 6.4% of AUC. Compared with the best mean evaluation metrics of OLF (11), that of selected ALL (5&#x0002B;28&#x0002B;22) has improved by 5.9% of accuracy, 5.6% of precision, 5.9% of recall, 5.0% of F1-score), and 5.2% of AUC.</p></sec>
<sec>
<title>3.3. The classification performance of fused features and their combination features based on different classifiers</title>
<p><xref ref-type="table" rid="T5">Tables 5</xref>, <xref ref-type="table" rid="T6">6</xref> reports the experimental results of three fused features and their combination features based on the eight classical ML classifiers in Experiment 3. Specifically, three fused features include FLF (2), FLR (6), and F3C (34), and their combination features include FLF &#x0002B; FLR (2 &#x0002B; 6), FLF &#x0002B; F3C (2 &#x0002B; 34), FLR &#x0002B; F3C (6 &#x0002B; 34), and FLF &#x0002B; FLR &#x0002B; F3C (fused ALL, 2 &#x0002B; 6 &#x0002B; 34).</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Evaluation metrics of the different classifiers with three fused features (Experiment 3) on the test set, respectively.</p></caption>
<table frame="box" rules="all">
<thead><tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">74.1<sup>i</sup>/70.4<sup>ii</sup>/74.1<sup>iii</sup></td>
<td valign="top" align="center">73.6/79.5/73.2</td>
<td valign="top" align="center">74.1/70.4/74.1</td>
<td valign="top" align="center">71.4/61.2/72.0</td>
<td valign="top" align="center">80.0/72.2/76.5</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">64.2/61.7/66.7</td>
<td valign="top" align="center">62.6/62.8/62.7</td>
<td valign="top" align="center">64.2/61.7/66.7</td>
<td valign="top" align="center">63.2/62.2/61.3</td>
<td valign="top" align="center">73.7/62.2/76.6</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">69.1/71.6/71.6</td>
<td valign="top" align="center">68.3/72.5/72.5</td>
<td valign="top" align="center">69.1/71.6/71.6</td>
<td valign="top" align="center">68.3/66.0/66.0</td>
<td valign="top" align="center">70.8/73.2/74.5</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">65.4/61.7/59.3</td>
<td valign="top" align="center">70.2/54.1/56.5</td>
<td valign="top" align="center">65.4/61.7/59.3</td>
<td valign="top" align="center">66.4/55.5/57.5</td>
<td valign="top" align="center">68.3/61.9/62.6</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">66.7/60.5/70.4</td>
<td valign="top" align="center">67.6/59.0/71.0</td>
<td valign="top" align="center">66.7/60.5/70.4</td>
<td valign="top" align="center">67.1/59.6/70.6</td>
<td valign="top" align="center">66.7/60.5/70.4</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">63.0/70.4/61.7</td>
<td valign="top" align="center">61.6/68.8/55.6</td>
<td valign="top" align="center">63.0/70.4/61.7</td>
<td valign="top" align="center">62.1/66.0/56.7</td>
<td valign="top" align="center">70.4/73.1/64.7</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">67.9/53.1/69.1</td>
<td valign="top" align="center">65.0/57.2/72.4</td>
<td valign="top" align="center">67.9/53.1/69.1</td>
<td valign="top" align="center">61.0/54.4/69.9</td>
<td valign="top" align="center">78.8/56.0/71.3</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">66.7/55.6/66.7</td>
<td valign="top" align="center">62.3/60.4/62.7</td>
<td valign="top" align="center">66.7/55.6/66.7</td>
<td valign="top" align="center">60.1/56.8/61.3</td>
<td valign="top" align="center">78.6/55.8/75.5</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">67.1/63.1/67.5</td>
<td valign="top" align="center">66.4/64.3/65.8</td>
<td valign="top" align="center">67.1/63.1/67.5</td>
<td valign="top" align="center">65.0/60.2/64.4</td>
<td valign="top" align="center">73.4/64.4/71.5</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii: FLF (2)/ FLR (6)/F3C (34).</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Evaluation metrics of the different classifiers with four fused combination features (Experiment 3) on the test set, respectively.</p></caption>
<table frame="box" rules="all">
<thead><tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">77.8<sup>i</sup>/80.2<sup>ii</sup>/75.3<sup>iii</sup>/80.2<sup>iv</sup></td>
<td valign="top" align="center">78.2/80.1/74.4/80.1</td>
<td valign="top" align="center">77.8/80.2/75.3/80.2</td>
<td valign="top" align="center">75.7/79.2/74.0/79.2</td>
<td valign="top" align="center">79.7/81.5/78.9/82.3</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">58.0/70.4/66.7/67.9</td>
<td valign="top" align="center">59.6/69.4/61.4/78.3</td>
<td valign="top" align="center">58.0/70.4/66.7/67.9</td>
<td valign="top" align="center">58.7/65.1/55.4/56.1</td>
<td valign="top" align="center">71.0/75.4/67.7/71.9</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">80.2/74.1/70.4/72.8</td>
<td valign="top" align="center">80.6/77.6/79.5/76.3</td>
<td valign="top" align="center">80.2/74.1/70.4/72.8</td>
<td valign="top" align="center">78.8/69.0/61.2/67.0</td>
<td valign="top" align="center">81.9/78.4/76.1/79.7</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">71.6/60.5/61.7/72.8</td>
<td valign="top" align="center">70.5/57.4/54.1/72.8</td>
<td valign="top" align="center">71.6/60.5/61.7/72.8</td>
<td valign="top" align="center">67.9/58.4/55.5/68.9</td>
<td valign="top" align="center">77.7/69.6/63.0/77.8</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">66.7/69.1/67.9/70.4</td>
<td valign="top" align="center">66.4/76.9/67.9/68.8</td>
<td valign="top" align="center">66.7/69.1/67.9/70.4</td>
<td valign="top" align="center">66.5/69.9/67.9/68.8</td>
<td valign="top" align="center">66.7/69.1/67.9/70.4</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">75.3/72.8/66.7/70.4</td>
<td valign="top" align="center">74.6/72.1/63.4/68.6</td>
<td valign="top" align="center">75.3/72.8/66.7/70.4</td>
<td valign="top" align="center">73.5/69.9/58.0/66.9</td>
<td valign="top" align="center">79.4/74.1/70.4/75.4</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">70.4/69.1/66.7/75.3</td>
<td valign="top" align="center">75.1/76.9/63.4/74.4</td>
<td valign="top" align="center">70.4/69.1/66.7/75.3</td>
<td valign="top" align="center">71.2/69.9/63.2/74.0</td>
<td valign="top" align="center">76.0/76.9/77.0/80.8</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">63.0/70.4/67.9/76.5</td>
<td valign="top" align="center">72.5/69.4/65.0/76.3</td>
<td valign="top" align="center">63.0/70.4/67.9/76.5</td>
<td valign="top" align="center">63.8/65.1/64.1/74.6</td>
<td valign="top" align="center">73.3/80.0/77.0/81.5</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">70.4/70.8/67.9/73.3</td>
<td valign="top" align="center">72.2/72.5/66.1/74.5</td>
<td valign="top" align="center">70.4/70.8/67.9/73.3</td>
<td valign="top" align="center">69.5/68.3/62.4/69.4</td>
<td valign="top" align="center">75.7/75.6/72.3/77.5</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii/iv: FLF &#x0002B; FLR (2 &#x0002B; 6)/ FLF &#x0002B; F3C (2 &#x0002B; 34)/ FLR &#x0002B; F3C (6 &#x0002B; 34)/ FLF &#x0002B; FLR &#x0002B; F3C (2 &#x0002B; 6 &#x0002B; 34).</p>
</table-wrap-foot>
</table-wrap>
<p><xref ref-type="table" rid="T5">Table 5</xref> and <xref ref-type="fig" rid="F6">Figures 6A(a&#x02013;e</xref>) show that the MLP classifier with three single-fused features performs better than other classifiers in Experiment 3. <xref ref-type="fig" rid="F6">Figure 6B</xref> shows the ROCs of the three single-fused features and their combination features based on the eight classical ML classifiers. Furthermore, the MLP classifier with FLR (6) performs best at AUC, achieving 80.0%. Other evaluation metrics of FLR on the MLP classifier are 74.1% of accuracy, 73.6% of precision, 74.1% of recall, and 74.1% of F1-scorel. However, compared with the classification performance of single original features and single-selected features based on the MLP classifier in <xref ref-type="table" rid="T1">Tables 1</xref>, <xref ref-type="table" rid="T3">3</xref>, the MLP classifier with single-fused features fails to improve the classification performance.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>The evaluation metrics pictures and ROCs of eight classifiers with seven fused features (three fused features and four fused combination features in Experiment 3). <bold>(A)</bold> The evaluation metrics pictures include <bold>(a)</bold> Accuracy, <bold>(b)</bold> Precision, <bold>(c)</bold> Recall, <bold>(d)</bold> F1-score, <bold>(e)</bold> AUC, and <bold>(f)</bold> Mean. <bold>(B)</bold> ROCs of the ML classifiers include <bold>(a)</bold> FLF (2), <bold>(b)</bold> FLR (6), <bold>(c)</bold> F3C (34), <bold>(d)</bold> FLF &#x0002B; FLR (2 &#x0002B; 6), <bold>(e)</bold> FLF &#x0002B; F3C (2 &#x0002B; 34), <bold>(f)</bold> FLR &#x0002B; F3C (6 &#x0002B; 34), and <bold>(g)</bold> FLF &#x0002B; FLR &#x0002B; F3C (2 &#x0002B; 6 &#x0002B; 34).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0006.tif"/>
</fig>
<p><xref ref-type="table" rid="T6">Table 6</xref> and <xref ref-type="fig" rid="F6">Figures 6A(a&#x02013;e</xref>) show that FLF &#x0002B; FLR &#x0002B; F3C (fused ALL, 2 &#x0002B; 6 &#x0002B; 34) based on the MLP classifier also performs best at AUC, achieving 82.3%. Other evaluation metrics of fused ALL based on the MLP classifier are 80.2% of accuracy, 80.1% of precision, 80.2% of recall, and 79.2% of F1-scorel. Compared with the best classification performance of the single-fused features FLR based on the MLP classifier, that of fused ALL based on the MLP classifier has improved by 6.1% of accuracy, 6.5% of precision, 6.1% of recall, 5.1% of F1-scorel, and 2.3% of AUC. However, compared with the classification performance of original combination features and selected combination features based on the MLP classifier in <xref ref-type="table" rid="T2">Tables 2</xref>, <xref ref-type="table" rid="T4">4</xref>, the fused combination features based on the MLP classifier fail to improve the classification performance.</p>
<p><xref ref-type="fig" rid="F6">Figure 6A(f</xref>) shows the mean evaluation metrics of all classifiers in Experiment 3. The mean evaluation metrics of fused combination features FLF &#x0002B; FLR &#x0002B; F3C (2 &#x0002B; 6 &#x0002B; 34) perform better than the single-fused features. However, compared with the best mean evaluation metrics of OLF (11) in <xref ref-type="fig" rid="F4">Figure 4A(f</xref>) and SLF &#x0002B; SLR (5 &#x0002B; 28) / selected ALL (5 &#x0002B; 28 &#x0002B; 22) in <xref ref-type="fig" rid="F5">Figure 5A(f</xref>), the fused combination features FLF &#x0002B; FLR &#x0002B; F3C (2 &#x0002B; 6 &#x0002B; 34) fail to improve the mean evaluation metrics.</p></sec>
<sec>
<title>3.4. The classification performance of the selected and fused combination features</title>
<p><xref ref-type="table" rid="T7">Table 7</xref> reports the experimental results of three selected and fused combination features based on the eight classical ML classifiers in Experiment 4. Specifically, seven selected and fused combination features include SLF &#x0002B; FLF (5 &#x0002B; 2), SLR &#x0002B; FLR (28 &#x0002B; 6), S3C &#x0002B; F3C (22 &#x0002B; 34), SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR (5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6), SLF &#x0002B; FLF &#x0002B; S3C &#x0002B; F3C (5 &#x0002B; 2 &#x0002B; 22 &#x0002B; 34), SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34), SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (selected ALL &#x0002B; fused ALL, 5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34).</p>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Evaluation metrics of the different classifiers with seven selected and fused combination features (Experiment 4) on the test set.</p></caption>
<table frame="box" rules="all">
<thead><tr>
<th valign="top" align="left" style="background-color:#919497; color:#ffffff"><bold> Classifier</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Accuracy (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Precision (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>Recall (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>F1-score (%)</bold></th>
<th valign="top" align="center" style="background-color:#919497; color:#ffffff"><bold>AUC (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">81.5<sup>i</sup>/84.0<sup>ii</sup>/79.0<sup>iii</sup>/84.0<sup>iv</sup>/81.5<sup>v</sup>/86.4<sup>vi</sup>/87.7<sup>vii</sup></td>
<td valign="top" align="center">81.3/84.1/78.5/83.7/81.1/86.3/87.7</td>
<td valign="top" align="center">81.5/84.0/79.0/84.0/81.5/86.4/87.7</td>
<td valign="top" align="center">81.4/83.2/78.4/83.7/81.2/86.2/87.7</td>
<td valign="top" align="center">84.3/85.1/81.1/86.7/84.0/86.9/89.3</td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">75.3/76.5/67.9/77.8/69.1/77.8/79.0</td>
<td valign="top" align="center">74.4/79.8/65.0/78.2/67.4/77.1/78.5</td>
<td valign="top" align="center">75.3/76.5/67.9/77.8/69.1/77.8/79.0</td>
<td valign="top" align="center">74.0/72.7/61.0/75.7/63.1/77.0/78.4</td>
<td valign="top" align="center">80.2/82.5/72.1/85.6/79.4/81.4/83.3</td>
</tr> <tr>
<td valign="top" align="left">RF</td>
<td valign="top" align="center">74.1/81.5/69.1/84.0/77.8/77.8/79.0</td>
<td valign="top" align="center">73.5/85.5/68.4/84.1/77.6/80.8/80.3</td>
<td valign="top" align="center">74.1/81.5/69.1/84.0/77.8/77.8/79.0</td>
<td valign="top" align="center">73.7/79.0/61.8/83.2/76.2/74.5/76.8</td>
<td valign="top" align="center">77.5/82.6/76.5/88.7/80.5/81.1/84.1</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">66.7/75.3/63.0/75.3/69.1/72.8/77.8</td>
<td valign="top" align="center">67.0/74.4/66.0/74.9/67.9/72.1/77.2</td>
<td valign="top" align="center">66.7/75.3/63.0/75.3/69.1/72.8/77.8</td>
<td valign="top" align="center">66.8/74.0/63.9/73.0/68.2/69.6/76.6</td>
<td valign="top" align="center">74.9/79.1/67.2/84.1/72.2/75.8/78.5</td>
</tr> <tr>
<td valign="top" align="left">DT</td>
<td valign="top" align="center">70.4/71.6/65.4/71.6/75.3/74.1/77.8</td>
<td valign="top" align="center">69.9/70.3/64.2/70.3/75.3/73.0/77.2</td>
<td valign="top" align="center">70.4/71.6/65.4/71.6/75.3/74.1/77.8</td>
<td valign="top" align="center">70.1/70.4/64.7/70.4/75.3/72.5/77.3</td>
<td valign="top" align="center">70.4/71.6/64.7/71.6/75.3/74.1/74.2</td>
</tr> <tr>
<td valign="top" align="left">GB</td>
<td valign="top" align="center">66.7/80.2/70.4/81.5/71.6/79.0/80.2</td>
<td valign="top" align="center">67.0/82.7/68.8/81.7/70.2/80.3/82.7</td>
<td valign="top" align="center">66.7/80.2/70.4/81.5/71.6/79.0/80.2</td>
<td valign="top" align="center">66.8/77.9/66.0/80.3/68.6/76.8/77.9</td>
<td valign="top" align="center">75.8/83.2/72.8/85.6/76.8/79.1/81.0</td>
</tr> <tr>
<td valign="top" align="left">LDA</td>
<td valign="top" align="center">72.8/74.1/80.2/72.8/80.2/76.5/79.0</td>
<td valign="top" align="center">71.6/73.2/79.8/71.7/79.2/75.8/78.8</td>
<td valign="top" align="center">72.8/74.1/80.2/72.8/80.2/76.5/79.0</td>
<td valign="top" align="center">71.4/73.3/79.8/71.9/79.8/75.9/78.9</td>
<td valign="top" align="center">82.7/82.5/84.5/83.1/86.0/84.3/86.8</td>
</tr> <tr>
<td valign="top" align="left">LR</td>
<td valign="top" align="center">72.8/74.1/72.8/74.1/80.2/82.7/82.7</td>
<td valign="top" align="center">71.7/73.0/71.7/73.2/80.0/82.5/82.5</td>
<td valign="top" align="center">72.8/74.1/72.8/74.1/80.2/82.7/82.7</td>
<td valign="top" align="center">71.9/72.9/71.9/73.3/80.0/82.5/82.5</td>
<td valign="top" align="center">82.7/83.6/83.9/85.1/86.1/86.2/87.9</td>
</tr> <tr>
<td valign="top" align="left">(Mean)</td>
<td valign="top" align="center">72.5/77.2/71.0/77.6/75.6/78.4/80.4</td>
<td valign="top" align="center">72.1/77.9/70.3/77.2/74.8/78.5/80.6</td>
<td valign="top" align="center">72.5/77.2/71.0/77.6/75.6/78.4/80.4</td>
<td valign="top" align="center">72.0/75.4/68.4/76.4/74.1/76.9/79.5</td>
<td valign="top" align="center">78.6/81.3/75.4/83.8/80.0/81.1/83.1</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>i/ii/iii/iv/v/vi/vii: SLF &#x0002B; FLF (5 &#x0002B; 2)/ SLR &#x0002B; FLR (28 &#x0002B; 6)/ S3C &#x0002B; F3C (22 &#x0002B; 34)/ SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR (5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6)/ SLF &#x0002B; FLF &#x0002B; S3C &#x0002B; F3C (5 &#x0002B; 2 &#x0002B; 22 &#x0002B; 34)/ SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34)/ SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (selected ALL &#x0002B; fused ALL &#x02192; our proposed strategy, 5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34).</p>
</table-wrap-foot>
</table-wrap>
<p><xref ref-type="table" rid="T7">Table 7</xref> and <xref ref-type="fig" rid="F7">Figures 7A(a&#x02013;e</xref>) show that the MLP classifier with selected ALL&#x0002B;fused ALL (our proposed strategy, 5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34) performs best in Experiments 1&#x02013;4. <xref ref-type="fig" rid="F7">Figure 7B</xref> shows the ROCs of the seven selected and fused combination features based on the eight classical ML classifiers. Specifically, the MLP classifier with selected ALL &#x0002B; fused ALL (5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6 &#x0002B; 22 &#x0002B; 34) achieves 87.7% of accuracy, 87.7% of precision, 87.7% of recall, 87.7% of F1-scorel, and 89.3% of AUC. Compared with the best classification performance of the single original feature OLF (11) based on the MLP classifier in Experiment 1, the classification performance of the MLP classifier with our proposed strategy has improved by 7.5% of accuracy, 7.9% of precision, 7.5% of recall, 7.9% of F1-scorel, and 5.6% of AUC. Compared with the best classification performance of the selected combination feature selected ALL (5 &#x0002B; 28 &#x0002B; 22) based on the MLP classifier in Experiment 2, the classification performance of the MLP classifier with our proposed strategy has improved by 2.5% of accuracy, 2.9% of precision, 2.5% of recall, 2.7% of F1-scorel, and 1.4% of AUC. Compared with the best classification performance of the fused combination feature fused ALL (2&#x0002B;6&#x0002B;34) based on the MLP classifier in Experiment 3, the classification performance of the MLP classifier with our proposed strategy has improved by 7.5% of accuracy, 7.6% of precision, 7.5% of recall, 8.5% of F1-scorel, and 7.0% of AUC.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>The evaluation metrics pictures and ROCs of eight classifiers with seven selected and fused combination features (Experiment 4). <bold>(A)</bold> The evaluation metrics pictures include <bold>(a)</bold> Accuracy, <bold>(b)</bold> Precision, <bold>(c)</bold> Recall, <bold>(d)</bold> F1-score, <bold>(e)</bold> AUC, and <bold>(f)</bold> Mean. <bold>(B)</bold> ROCs of the ML classifiers include <bold>(a)</bold> SLF &#x0002B; FLF (5&#x0002B;2), <bold>(b)</bold> SLR &#x0002B; FLR (28&#x0002B;6), <bold>(c)</bold> S3C &#x0002B; F3C (22&#x0002B;34), <bold>(d)</bold> SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR (5&#x0002B;2&#x0002B;28&#x0002B;6), <bold>(e)</bold> SLF &#x0002B; FLF &#x0002B; S3C &#x0002B; F3C (5&#x0002B;2&#x0002B;22&#x0002B;34), <bold>(f)</bold> SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (28&#x0002B;6&#x0002B;22&#x0002B;34), and <bold>(g)</bold> SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR &#x0002B; S3C &#x0002B; F3C (selected ALL &#x0002B; fused ALL, 5&#x0002B;2&#x0002B;28&#x0002B;6&#x0002B;22&#x0002B;34).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0007.tif"/>
</fig>
<p><xref ref-type="fig" rid="F7">Figure 7A(f</xref>) shows the mean evaluation metrics of all classifiers in Experiment 4. The mean evaluation metrics of selected combination features SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR (5 &#x0002B; 2 &#x0002B; 28 &#x0002B; 6) perform best at mean AUC, achieving 83.8%. The mean AUC of selected ALL&#x0002B;fused ALL is marginally lower than that of SLF &#x0002B; FLF &#x0002B; SLR &#x0002B; FLR, achieving 83.1%. However, other mean evaluation metrics of selected ALL&#x0002B;fused ALL perform best, achieving 80.4% of accuracy, 80.6% of precision, 80.4% of recall, and 79.5% of F1-scorel. The mean evaluation metrics of selected ALL&#x0002B;fused ALL are far superior to the best mean evaluation metrics in Experiments 1&#x02013;4.</p>
<p>Because of the MLP classifier&#x00027;s excellent performance in dyspnea identification, <xref ref-type="fig" rid="F8">Figure 8</xref> shows the evaluation metrics pictures and ROCs of MLP classifiers with different features in Experiments 1&#x02013;4 (<xref ref-type="fig" rid="F2">Figure 2</xref>). <xref ref-type="fig" rid="F8">Figure 8A(a</xref>) shows that although OLF (11), OLR (1,316), and O3C (13,824) are directly combined, the classification performance of the MLP classifier is basically not improved. However, <xref ref-type="fig" rid="F8">Figure 8A(b</xref>) shows that the classification performance of selected combination features based on the MLP classifier has improved. In addition, <xref ref-type="fig" rid="F8">Figure 8A(c</xref>) shows that compared to the single-fused features, the classification performance of the fused combination features based on the MLP classifier has improved. However, compared with the classification performance of the original features or their combination features, the classification performance of the fused combination features based on the MLP classifier is also not improved. Finally, <xref ref-type="fig" rid="F8">Figure 8A(d</xref>) shows that our proposed strategy by combining the local and global features of OLF, OLR, and O3C based on the MLP classifier performs the best classification performance, achieving 87.7% of accuracy, 87.7% of precision, 87.7% of recall, 87.7% of F1-scorel, and 89.3% of AUC.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>The evaluation metrics pictures and ROCs of MLP classifiers with different features. <bold>(A)</bold> The evaluation metrics pictures of MLP classifiers in <bold>(a)</bold> Experiment 1, <bold>(b)</bold> Experiment 2, <bold>(c)</bold> Experiment 3, and <bold>(d)</bold> Experiment 4; <bold>(B)</bold> ROCs of MLP classifiers in <bold>(a)</bold> Experiment 1, <bold>(b)</bold> Experiment 2, <bold>(c)</bold> Experiment 3, and <bold>(d)</bold> Experiment 4.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-09-980950-g0008.tif"/>
</fig></sec></sec>
<sec id="s4">
<title>4. Discussion</title>
<p>This paper proposes a multi-modal data combination strategy by concatenating selected and fused PFT parameters, lung radiomics features, and 3D CNN features for dyspnea identification based on the MLP classifier. This section discusses three aspects: the single original modal data, Lasso and PCA, the proposed multi-modal data combination strategy, and the MLP classifier for dyspnea identification. Last, we also point out the limitations in this study and the future direction.</p>
<sec>
<title>4.1. The single original modal data for dyspnea identification</title>
<p>The single original modal data makes it difficult to achieve satisfactory performance of dyspnea identification in COPD for clinical application. Compared with OLR (1,316) or O3C (13,824) extracted from chest HRCT images, PFT parameters OLF (11) performs best for dyspnea identification in the mean evaluation metrics. The reason for PFT parameters achieving the best identification performance also can be explained. Compared with chest HRCT images, the PFT parameters can directly reflect the respiratory status of the lungs. Therefore, the PFT parameters perform better in dyspnea identification than OLR (1,316) and O3C (13,824). Specifically, as pulmonary function index in PFT, FEV<sub>1</sub> and FEV1/FVC are the criteria for determining COPD classification (<xref ref-type="bibr" rid="B1">1</xref>). Therefore, they may be major factors in dyspnea in COPD. Unfortunately, because of the heterogeneity of COPD patients, some patients are without dyspnea even if they are in a higher COPD stage, such as GOLDIII&#x00026;IV (<xref ref-type="fig" rid="F1">Figure 1B</xref>). In addition, the alveolar wall structure is damaged in severe COPD patients, leading to alveolar fusion, which further reduces the area of the pulmonary vascular bed so that the gas exchange area is reduced. The proportion of ventilation/blood flow is an imbalance, which may lead to the decline of diffusion function. The mechanisms of exertional dyspnea in patients with mild COPD and low resting DL<sub>CO</sub> have been revealed (<xref ref-type="bibr" rid="B49">49</xref>). The TLC, FVC, and RV increase, vital capacity decreases, and the flow rate in the respiratory process decrease in COPD patients, which may result in dyspnea (<xref ref-type="bibr" rid="B50">50</xref>).</p></sec>
<sec>
<title>4.2. The Lasso and PCA algorithm for dyspnea identification</title>
<p>The Lasso algorithm is respectively performed to select the SLF (5), SLR (28), and S3C (22) from OLF (11), OLR (1,316), and O3C (13,824). Meanwhile, the PCA algorithm is respectively performed to select the FLF (2), FLR (6), and F3C (34) from OLF (11), OLR (1,316), and O3C (13,824). All ML Models based on the SLF (5), SLR (28), and S3C (22) respectively perform better than the OLF (11), OLR (1,316), and O3C (13,824) in the mean evaluation metrics. However, The FLF (2) and FLR (6) respectively perform poorer than the OLF (11) and OLR (1,316) in the mean evaluation metrics. The mean evaluation metrics of F3C (34) and O3C (13,824) basically remain unchanged. Lasso algorithm selects the identification features by establishing the relationship between the independent and dependent variables (OLF (11)/ OLR (1,316)/ O3C (13,824) and dyspnea identification), reducing the complexity of the ML classifiers and avoiding overfitting (<xref ref-type="bibr" rid="B3">3</xref>). While reducing the complexity of the ML classifiers, the ML classifiers respectively focus on the SLF (5), SLR (28), and S3C (22), improving the classifiers&#x00027; performance for dyspnea identification. PCA algorithm fuses the identification features by reducing the dimension of the high-dimensional original features within a certain range of information loss (<xref ref-type="bibr" rid="B36">36</xref>). The PCA algorithm performs better at O3C (13,824) than the OLF (11) and OLR (1,316). Specifically, the OLF (11) and OLR (1,316) are not high-dimensional features. Therefore, certain identification information is lost when the original features of OLF and OLR&#x00027;s dimensionality reduction are performed. The mean evaluation metrics of F3C (34) and O3C (13,824) remain unchanged, confirming the discussion about the PCA algorithm above.</p></sec>
<sec>
<title>4.3. The proposed multi-modal data combination strategy for dyspnea identification</title>
<p>The main problem of the multi-modal data combination is that a smaller number of features OLF (11) are overwhelmed by a larger number of features OLR (1,316) and O3C (13,824). Therefore, the mean evaluation metrics of the ML Models based on original combination features have not been improved. Inspired by YOLOv3-SPP (<xref ref-type="bibr" rid="B51">51</xref>) (a CNN for the target detection), a multi-modal data combination strategy is proposed by combining the local and global features for dyspnea identification in COPD. The Lasso algorithm, with excellent performance for COPD dyspnea identification and its discussion above, is used to obtain the local features. Meanwhile, the global features are obtained by the PCA algorithm. The PCA algorithm fails to improve the identification performance, but the mean evaluation metrics of the local and global features have been improved. One important reason is that we select and fuse the original features separately and combine them for dyspnea identification. The local features are relevant for identifying dyspnea, but in any case, other possible features have been ignored by the Lasso algorithm. However, global features are obtained by the PCA algorithm, fusing all original features, which makes up for the defects of local features. Further, the advantages of PFT and CT are fully exploited. Except for PFT parameters and lung radiomics features, deep 3D CNN features are extracted from chest HRCT images. The local and global features of the PFT parameters, lung radiomics features, and 3D CNN features are re-integrated, finally obtaining a good dyspnea identification effect.</p></sec>
<sec>
<title>4.4. The MLP classifier for dyspnea identification</title>
<p>Eight classical ML classifiers are respectively used for dyspnea identification in COPD. The MLP classifier performs better than the other classifiers in this paper, implying that there may be a non-linear relationship between identification features and dyspnea. In addition, due to the multi-modal data combination, there are essential differences between the multi-modal features. In particular, the OLF (11) is obtained by PET, and OLR (1,316) and O3C (13,824) are extracted from chest HRCT images imaged by CT. The MLP classifier with strong adaptive and self-learning ability can handle the multi-modal data combination well. Meanwhile, 13,824 3D CNN features are the non-linear classification features. The MLP classifier is good at handling complex non-linear features by itself, which fits the essence of the MLP classifier and is interpretable (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B37">37</xref>).</p></sec>
<sec>
<title>4.5. The limitations in this study and future direction</title>
<p>This study also has some limitations, and we point out the future direction. First, the number of our study cohort limits the multi-classification of dyspnea in COPD, which may be more meaningful in clinical COPD management. Second, dyspnea in COPD is identified only by engineering means. However, professional clinicians should further analyze the deeper relationship between dyspnea and identification features from a pathophysiological point of view. Third, the existing classic ML classifiers are not improved. Last, the measurement of PFT parameters is very complex and limited by the cooperation of the examiner (<xref ref-type="bibr" rid="B52">52</xref>). In our future work, the improved graph neural network, an auto-metric Graph Neural Network based on a meta-learning strategy (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B53">53</xref>), will be further attempted and modified for dyspnea identification. Meanwhile, this paper only uses chest HRCT images and PFT parameters for dyspnea identification. Other clinical information should be collected to further improve dyspnea&#x00027;s classification performance, such as the heterogeneity parameter ventilation/perfusion <inline-formula><mml:math id="M7"><mml:mrow><mml:msup><mml:mi>V</mml:mi><mml:mo>&#x02032;</mml:mo></mml:msup></mml:mrow></mml:math></inline-formula>/Q&#x00027; is a major contributor to dyspnea in COPD patients (<xref ref-type="bibr" rid="B54">54</xref>, <xref ref-type="bibr" rid="B55">55</xref>). Besides, the mMRC score of 1 is a rather low dyspnea level and can even be physiological breathlessness in older subjects. However, identifying severe and extremely severe dyspnea in COPD may be more valuable for clinical application. Therefore, in subsequent studies, we will further expand our research to reveal the rule of dyspnea in COPD with aging using a survival analysis model.</p></sec></sec>
<sec id="s5">
<title>5. Conclusions</title>
<p>This paper proposes a multi-modal data combination strategy by combining the local and global features for dyspnea identification in COPD based on the MLP classifier. Specifically, the Lasso algorithm is separately performed to select the local features from original multi-modal data (11 original PFT parameters, 1,316 original lung radiomics features, and 13,824 original 3D CNN features). Meanwhile, the PCA algorithm is separately performed to fuse original multi-modal data, generating the global features. All the local and global features of original multi-modal data are combined for dyspnea identification in COPD based on the MLP classifier, achieving the best classification performance at 87.7% of accuracy, 87.7% of precision, 87.7% of recall, 87.7% of F1-score, and 89.3% of AUC, respectively. Compared with single-modal data, our proposed multi-modal data combination strategy effectively improves the classification performance for dyspnea identification in COPD, providing an objective and effective tool for COPD pre-clinical health management.</p></sec>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>, further inquiries can be directed to the corresponding authors.</p></sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by National Clinical Research Center of China&#x00027;s respiratory diseases. The patients/participants provided their written informed consent to participate in this study.</p></sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>Conceptualization and supervision: YK and RC. Methodology: YY, ZC, WL, and YG. Software: YY, ZC, NZ, SW, and WD. Validation: WL, YY, ZC, XL, and HC. Formal analysis: YY, ZC, YL, WL, and YG. Investigation: HC and XL. Resources: HC, RC, and XL. Data curation: RC. Writing&#x02014;original draft preparation: YY and ZC. Writing&#x02014;review and editing: WL, YL, and YK. Visualization: ZC, YY, YL, NZ, SW, ZC, and WD. Project administration: YK and HC. Funding acquisition: YK, WL, and HC. All authors have read and agreed to the published version of the manuscript.</p></sec>
</body>
<back>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>This research was funded by the National Natural Science Foundation of China, Grant Number 62071311; The Stable Support Plan for Colleges and Universities in Shenzhen of China, Grant Number SZWD2021010; The Scientific Research Fund of Liaoning Province of China, Grant Number JL201919; The Natural Science Foundation of Guangdong Province of China, Grant Number 2019A1515011382; the special program for key fields of colleges and universities in Guangdong Province (biomedicine and health) of China, Grant Number 2021ZDZX2008.</p>
</sec>
<ack><p>Thanks to the Department of Radiology, The First Affiliated Hospital of Guangzhou Medical University, for providing the dataset.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fmed.2022.980950/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fmed.2022.980950/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.ZIP" id="SM1" mimetype="application/zip" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>D</given-names></name> <name><surname>Agusti</surname> <given-names>A</given-names></name> <name><surname>Anzueto</surname> <given-names>A</given-names></name> <name><surname>Barnes</surname> <given-names>PJ</given-names></name> <name><surname>Bourbeau</surname> <given-names>J</given-names></name> <name><surname>Celli</surname> <given-names>BR</given-names></name> <etal/></person-group>. <article-title>Global strategy for the diagnosis, management, and prevention of chronic obstructive lung disease: the GOLD science committee report 2019</article-title>. <source>Eur Respir J.</source> (<year>2019</year>) <volume>53</volume>:<fpage>1900164</fpage>. <pub-id pub-id-type="doi">10.1183/13993003.00164-2019</pub-id><pub-id pub-id-type="pmid">30846476</pub-id></citation></ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Matheson</surname> <given-names>MC</given-names></name> <name><surname>Bowatte</surname> <given-names>G</given-names></name> <name><surname>Perret</surname> <given-names>JL</given-names></name> <name><surname>Lowe</surname> <given-names>AJ</given-names></name> <name><surname>Senaratna</surname> <given-names>CV</given-names></name> <name><surname>Hall</surname> <given-names>GL</given-names></name> <etal/></person-group>. <article-title>Prediction models for the development of COPD: a systematic review</article-title>. <source>Int J Chronic Obstr Pulm Dis.</source> (<year>2018</year>) <volume>13</volume>:<fpage>1927</fpage>&#x02013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.2147/COPD.S155675</pub-id><pub-id pub-id-type="pmid">29942125</pub-id></citation></ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>W</given-names></name> <name><surname>Kang</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>Y</given-names></name> <name><surname>Yang</surname> <given-names>K</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <etal/></person-group>. <article-title>A novel lung radiomics feature for characterizing resting heart rate and COPD stage evolution based on radiomics feature combination strategy</article-title>. <source>Math Biosci Eng.</source> (<year>2022</year>) <volume>19</volume>:<fpage>4145</fpage>&#x02013;<lpage>65</lpage>. <pub-id pub-id-type="doi">10.3934/mbe.2022366</pub-id><pub-id pub-id-type="pmid">35341291</pub-id></citation></ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Redelmeier</surname> <given-names>DA</given-names></name> <name><surname>Goldstein</surname> <given-names>RS</given-names></name> <name><surname>Min</surname> <given-names>ST</given-names></name> <name><surname>Hyland</surname> <given-names>R</given-names></name></person-group>. <article-title>Spirometry and dyspnea in patients with COPD-when small differences mean little</article-title>. <source>Chest.</source> (<year>1996</year>) <volume>109</volume>:<fpage>1163</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1378/chest.109.5.1163</pub-id><pub-id pub-id-type="pmid">8625661</pub-id></citation></ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ficker</surname> <given-names>JH</given-names></name> <name><surname>Br&#x000FC;ckl</surname> <given-names>WM</given-names></name></person-group>. <article-title>Refractory dyspnea in advanced COPD: palliative treatment with opioids</article-title>. <source>Pneumologie.</source> (<year>2019</year>) <volume>73</volume>:<fpage>430</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1055/s-0043-103033</pub-id><pub-id pub-id-type="pmid">31291670</pub-id></citation></ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gardiner</surname> <given-names>L</given-names></name> <name><surname>Carrieri</surname> <given-names>AP</given-names></name> <name><surname>Bingham</surname> <given-names>K</given-names></name> <name><surname>Macluskie</surname> <given-names>G</given-names></name> <name><surname>Bunton</surname> <given-names>D</given-names></name> <name><surname>McNeil</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Combining explainable machine learning, demographic and multi-omic data to identify precision medicine strategies for inflammatory bowel disease</article-title>. <source>Cold Spring Harbor Laboratory Press.</source> (<year>2021</year>). <pub-id pub-id-type="doi">10.1101/2021.03.03.21252821</pub-id></citation>
</ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Madabhushi</surname> <given-names>A</given-names></name> <name><surname>Agner</surname> <given-names>S</given-names></name> <name><surname>Basavanhally</surname> <given-names>A</given-names></name> <name><surname>Doyle</surname> <given-names>S</given-names></name> <name><surname>Lee</surname> <given-names>G</given-names></name></person-group>. <article-title>Computer-aided prognosis: Predicting patient and disease outcome via quantitative fusion of multi-scale, multi-modal data</article-title>. <source>Comput Med Imaging Graph.</source> (<year>2011</year>) <volume>35</volume>:<fpage>506</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1016/j.compmedimag.2011.01.008</pub-id><pub-id pub-id-type="pmid">21333490</pub-id></citation></ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Taube</surname> <given-names>C</given-names></name> <name><surname>Lehnigk</surname> <given-names>B</given-names></name> <name><surname>Paasch</surname> <given-names>K</given-names></name> <name><surname>Kirsten</surname> <given-names>DK</given-names></name> <name><surname>J&#x000F6;rres</surname> <given-names>RA</given-names></name> <name><surname>Magnussen</surname> <given-names>H</given-names></name></person-group>. <article-title>Factor analysis of changes in dyspnea and lung function parameters after bronchodilation in chronic obstructive pulmonary disease</article-title>. <source>Am J Respir Crit Care Med.</source> (<year>2000</year>). <pub-id pub-id-type="doi">10.1164/ajrccm.162.1.9909054</pub-id><pub-id pub-id-type="pmid">10903244</pub-id></citation></ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lynch</surname> <given-names>DA</given-names></name></person-group>. <article-title>Progress in imaging COPD, 2004-2014</article-title>. <source>Chronic Obstr Pulm Dis.</source> (<year>2014</year>) <volume>1</volume>:<fpage>73</fpage>. <pub-id pub-id-type="doi">10.15326/jcopdf.1.1.2014.0125</pub-id><pub-id pub-id-type="pmid">28848813</pub-id></citation></ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Castaldi</surname> <given-names>PJ</given-names></name> <name><surname>Est&#x000E9;par</surname> <given-names>RSJ</given-names></name> <name><surname>Mendoza</surname> <given-names>CS</given-names></name> <name><surname>Hersh</surname> <given-names>CP</given-names></name> <name><surname>Laird</surname> <given-names>N</given-names></name> <name><surname>Crapo</surname> <given-names>JD</given-names></name> <etal/></person-group>. <article-title>Distinct quantitative computed tomography emphysema patterns are associated with physiology and function in smokers</article-title>. <source>Am J Respir Crit Care Med.</source> (<year>2013</year>) <volume>188</volume>:<fpage>1083</fpage>&#x02013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1164/rccm.201305-0873OC</pub-id><pub-id pub-id-type="pmid">23980521</pub-id></citation></ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lambin</surname> <given-names>P</given-names></name> <name><surname>Rios-Velazquez</surname> <given-names>E</given-names></name> <name><surname>Leijenaar</surname> <given-names>R</given-names></name> <name><surname>Carvalho</surname> <given-names>S</given-names></name> <name><surname>Van Stiphout</surname> <given-names>RG</given-names></name> <name><surname>Granton</surname> <given-names>P</given-names></name> <etal/></person-group>. <article-title>Radiomics: extracting more information from medical images using advanced feature analysis</article-title>. <source>Eur Cancer J.</source> (<year>2007</year>) <volume>43</volume>:<fpage>441</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.ejca.2011.11.036</pub-id><pub-id pub-id-type="pmid">22257792</pub-id></citation></ref>
<ref id="B12">
<label>12.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Wang</surname> <given-names>S</given-names></name> <name><surname>Zeng</surname> <given-names>N</given-names></name> <name><surname>Duan</surname> <given-names>W</given-names></name> <name><surname>Chen</surname> <given-names>Z</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Lung radiomics features selection for COPD stage classification based on auto-metric graph neural network</article-title>. <source>Diagnostics.</source> (<year>2022</year>) <volume>12</volume>:<fpage>2274</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics12102274</pub-id><pub-id pub-id-type="pmid">36291964</pub-id></citation></ref>
<ref id="B13">
<label>13.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>W</given-names></name> <name><surname>Zhou</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>X</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name></person-group>. <article-title>Automated vessel segmentation in lung CT and CTA images via deep neural networks</article-title>. <source>J X-Ray Sci Technol</source>. <volume>2021</volume>:<fpage>1</fpage>&#x02013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.21203/rs.3.rs-551102/v1</pub-id><pub-id pub-id-type="pmid">34421004</pub-id></citation></ref>
<ref id="B14">
<label>14.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>W</given-names></name> <name><surname>Liu</surname> <given-names>P</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Xu</surname> <given-names>S</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name></person-group>. <article-title>Segmentation of lung airways based on deep learning methods</article-title>. <source>IET Image Process.</source> (<year>2022</year>) <volume>16</volume>:<fpage>1444</fpage>&#x02013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1049/ipr2.12423</pub-id><pub-id pub-id-type="pmid">32490437</pub-id></citation></ref>
<ref id="B15">
<label>15.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>G&#x000F3;reczny</surname> <given-names>S</given-names></name> <name><surname>Haak</surname> <given-names>A</given-names></name> <name><surname>Morgan</surname> <given-names>GJ</given-names></name> <name><surname>Zablah</surname> <given-names>J</given-names></name></person-group>. <article-title>Feasibility of airway segmentation from three-dimensional rotational angiography</article-title>. <source>Cardiol J.</source> (<year>2020</year>) <volume>27</volume>:<fpage>875</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.5603/CJ.a2020.0136</pub-id><pub-id pub-id-type="pmid">33140395</pub-id></citation></ref>
<ref id="B16">
<label>16.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>K</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Kang</surname> <given-names>Y</given-names></name> <name><surname>Liang</surname> <given-names>Z</given-names></name> <name><surname>Wang</surname> <given-names>F</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <etal/></person-group>. <article-title>The value of radiomic features in chronic obstructive pulmonary disease assessment: a prospective study</article-title>. <source>Clin Radiol.</source> (<year>2022</year>) <volume>77</volume>:<fpage>e466</fpage>&#x02013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1016/j.crad.2022.02.015</pub-id><pub-id pub-id-type="pmid">35346461</pub-id></citation></ref>
<ref id="B17">
<label>17.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>G</given-names></name> <name><surname>Ibrahim</surname> <given-names>A</given-names></name> <name><surname>Halilaj</surname> <given-names>I</given-names></name> <name><surname>Leijenaar</surname> <given-names>RT</given-names></name> <name><surname>Rogers</surname> <given-names>W</given-names></name> <name><surname>Gietema</surname> <given-names>HA</given-names></name> <etal/></person-group>. <article-title>The emerging role of radiomics in COPD and lung cancer</article-title>. <source>Respiration.</source> (<year>2020</year>) <volume>99</volume>:<fpage>99</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1159/000505429</pub-id><pub-id pub-id-type="pmid">31991420</pub-id></citation></ref>
<ref id="B18">
<label>18.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>L</given-names></name> <name><surname>Lin</surname> <given-names>W</given-names></name> <name><surname>Xie</surname> <given-names>D</given-names></name> <name><surname>Yu</surname> <given-names>Y</given-names></name> <name><surname>Cao</surname> <given-names>H</given-names></name> <name><surname>Liao</surname> <given-names>G</given-names></name> <etal/></person-group>. <article-title>Development and validation of a preoperative CT-based radiomic nomogram to predict pathology invasiveness in patients with a solitary pulmonary nodule: a machine learning approach, multicenter, diagnostic study</article-title>. <source>European Radiology.</source> <volume>2022</volume>:<fpage>1983</fpage>&#x02013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1007/s00330-021-08268-z</pub-id><pub-id pub-id-type="pmid">34654966</pub-id></citation></ref>
<ref id="B19">
<label>19.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Au</surname> <given-names>RC</given-names></name> <name><surname>Tan</surname> <given-names>WC</given-names></name> <name><surname>Bourbeau</surname> <given-names>J</given-names></name> <name><surname>Hogg</surname> <given-names>JC</given-names></name> <name><surname>Kirby</surname> <given-names>M</given-names></name></person-group>. <article-title>Impact of image pre-processing methods on computed tomography radiomics features in chronic obstructive pulmonary disease</article-title>. <source>Phys Med Biol.</source> (<year>2021</year>) <volume>66</volume>:<fpage>245015</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6560/ac3eac</pub-id><pub-id pub-id-type="pmid">34847536</pub-id></citation></ref>
<ref id="B20">
<label>20.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yun</surname> <given-names>J</given-names></name> <name><surname>Cho</surname> <given-names>YH</given-names></name> <name><surname>Lee</surname> <given-names>SM</given-names></name> <name><surname>Hwang</surname> <given-names>J</given-names></name> <name><surname>Lee</surname> <given-names>JS</given-names></name> <name><surname>Oh</surname> <given-names>Y-M</given-names></name> <etal/></person-group>. <article-title>Deep radiomics-based survival prediction in patients with chronic obstructive pulmonary disease</article-title>. <source>Sci Rep.</source> (<year>2021</year>) <volume>11</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-94535-4</pub-id><pub-id pub-id-type="pmid">34312450</pub-id></citation></ref>
<ref id="B21">
<label>21.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Au</surname> <given-names>RC</given-names></name> <name><surname>Tan</surname> <given-names>WC</given-names></name> <name><surname>Bourbeau</surname> <given-names>J</given-names></name> <name><surname>Hogg</surname> <given-names>JC</given-names></name> <name><surname>Kirby</surname></name> <name><surname>M</surname></name></person-group>. <article-title>Radiomics analysis to predict presence of chronic obstructive pulmonary disease and symptoms using machine learning[M]//TP121. TP121 COPD: FROM CELLS TO THE CLINIC</article-title>. <source>American Thoracic Society.</source> 2021:A4568. <pub-id pub-id-type="doi">10.1164/ajrccm-conference.2021.203.1_MeetingAbstracts.A4568</pub-id></citation>
</ref>
<ref id="B22">
<label>22.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>W</given-names></name> <name><surname>Guo</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <name><surname>Yang</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Early COPD risk decision for adults aged from 40 to 79 years based on lung radiomics features</article-title>. <source>Front Med</source>. (<year>2022</year>) <volume>9</volume>:<fpage>845286</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2022.845286</pub-id><pub-id pub-id-type="pmid">35530043</pub-id></citation></ref>
<ref id="B23">
<label>23.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Q</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>W</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Performance evaluation of deep learning classification network for image features</article-title>. <source>IEEE Access.</source> (<year>2021</year>) <volume>9</volume>:<fpage>9318</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3048956</pub-id></citation>
</ref>
<ref id="B24">
<label>24.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>S</given-names></name> <name><surname>Oh</surname> <given-names>J</given-names></name> <name><surname>Kim</surname> <given-names>Y-I</given-names></name> <name><surname>Ban</surname> <given-names>H-J</given-names></name> <name><surname>Kwon</surname> <given-names>Y-S</given-names></name> <name><surname>Oh</surname> <given-names>I-J</given-names></name> <etal/></person-group>. <article-title>Differences in classification of COPD group using COPD assessment test (CAT) or modified Medical Research Council (mMRC) dyspnea scores: a cross-sectional analyses</article-title>. <source>BMC Pulm Med.</source> (<year>2013</year>) <volume>13</volume>:<fpage>1</fpage>&#x02013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1186/1471-2466-13-35</pub-id><pub-id pub-id-type="pmid">23731868</pub-id></citation></ref>
<ref id="B25">
<label>25.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Launois</surname> <given-names>C</given-names></name> <name><surname>Barbe</surname> <given-names>C</given-names></name> <name><surname>Bertin</surname> <given-names>E</given-names></name> <name><surname>Nardi</surname> <given-names>J</given-names></name> <name><surname>Perotin</surname> <given-names>JM</given-names></name> <name><surname>Dury</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>The modified Medical Research Council scale for the assessment of dyspnea in daily living in obesity: a pilot study</article-title>. <source>BMC Pulm Med</source>. (<year>2012</year>) <volume>12</volume>:<fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1186/1471-2466-12-61</pub-id><pub-id pub-id-type="pmid">23025326</pub-id></citation></ref>
<ref id="B26">
<label>26.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boelders</surname> <given-names>S</given-names></name> <name><surname>Nallanthighal</surname> <given-names>VS</given-names></name> <name><surname>Menkovski</surname> <given-names>V</given-names></name> <name><surname>H&#x000E4;rm&#x000E4;</surname> <given-names>A</given-names></name></person-group>. <article-title>Detection of mild dyspnea from pairs of speech recordings[C]// ICASSP 2020-2020 IEEE international conference on acoustics, speech and signal processing (ICASSP)</article-title>. <source>IEEE.</source> (<year>2020</year>) <fpage>4102</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/ICASSP40776.2020.9054751</pub-id><pub-id pub-id-type="pmid">27295638</pub-id></citation></ref>
<ref id="B27">
<label>27.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mazumder</surname> <given-names>AN</given-names></name> <name><surname>Ren</surname> <given-names>H</given-names></name> <name><surname>Rashid</surname> <given-names>HA</given-names></name> <name><surname>Hosseini</surname> <given-names>M</given-names></name> <name><surname>Chandrareddy</surname> <given-names>V</given-names></name> <name><surname>Homayoun</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Automatic detection of respiratory symptoms using a low-power multi-input CNN processor</article-title>. <source>IEEE Design &#x00026; Test.</source> (<year>2021</year>) <volume>39</volume>:<fpage>82</fpage>&#x02013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1109/MDAT.2021.3079318</pub-id></citation>
</ref>
<ref id="B28">
<label>28.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Y</given-names></name> <name><surname>Bruijnzeel</surname> <given-names>P</given-names></name> <name><surname>Mccrae</surname> <given-names>C</given-names></name> <name><surname>Zheng</surname> <given-names>J</given-names></name> <name><surname>Nihlen</surname> <given-names>U</given-names></name> <name><surname>Zhou</surname> <given-names>R</given-names></name> <etal/></person-group>. <article-title>Study on risk factors and phenotypes of acute exacerbations of chronic obstructive pulmonary disease in Guangzhou, China-design and baseline characteristics</article-title>. <source>J Thorac Dis.</source> (<year>2015</year>) <volume>7</volume>:<fpage>720</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.3978/j.issn.2072-1439.2015.04.14</pub-id><pub-id pub-id-type="pmid">26380754</pub-id></citation></ref>
<ref id="B29">
<label>29.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brusasco</surname> <given-names>V</given-names></name> <name><surname>Crapo</surname> <given-names>R</given-names></name> <name><surname>Viegi</surname> <given-names>G</given-names></name></person-group>. <article-title>Coming together: the ATS/ERS consensus on clinical pulmonary function testing</article-title>. <source>Eur Respir J.</source> (<year>2007</year>) <volume>24</volume>:<fpage>11</fpage>&#x02013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1183/09031936.05.00034205</pub-id><pub-id pub-id-type="pmid">17389836</pub-id></citation></ref>
<ref id="B30">
<label>30.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hofmanninger</surname> <given-names>J</given-names></name> <name><surname>Prayer</surname> <given-names>F</given-names></name> <name><surname>Pan</surname> <given-names>J</given-names></name> <name><surname>Rohrich</surname> <given-names>S</given-names></name> <name><surname>Prosch</surname> <given-names>H</given-names></name> <name><surname>Langs</surname> <given-names>G</given-names></name></person-group>. <article-title>Automatic lung segmentation in routine imaging is a data diversity problem, not a methodology problem</article-title>. <source>Eur Radiol Exp.</source> (<year>2020</year>) <volume>4</volume>:<fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1186/s41747-020-00173-2</pub-id><pub-id pub-id-type="pmid">32814998</pub-id></citation></ref>
<ref id="B31">
<label>31.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <name><surname>Guo</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Guo</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>Lung parenchyma parameters measure of rats from pulmonary window computed tomography images based on ResU-Net model for medical respiratory researches</article-title>. <source>Math Biosci Eng.</source> (<year>2021</year>) <volume>18</volume>:<fpage>4193</fpage>&#x02013;<lpage>211</lpage>. <pub-id pub-id-type="doi">10.3934/mbe.2021210</pub-id><pub-id pub-id-type="pmid">34198432</pub-id></citation></ref>
<ref id="B32">
<label>32.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Griethuysen</surname> <given-names>JJM</given-names></name> <name><surname>Fedorov</surname> <given-names>A</given-names></name> <name><surname>Parmar</surname> <given-names>C</given-names></name> <name><surname>Hosny</surname> <given-names>A</given-names></name> <name><surname>Aucoin</surname> <given-names>N</given-names></name> <name><surname>Narayan</surname> <given-names>V</given-names></name> <etal/></person-group>. <article-title>Computational radiomics system to decode the radiographic phenotype</article-title>. <source>Cancer Res.</source> (<year>2017</year>) <volume>77</volume>:<fpage>e104</fpage>&#x02013;<lpage>e107</lpage>. <pub-id pub-id-type="doi">10.1158/0008-5472.CAN-17-0339</pub-id><pub-id pub-id-type="pmid">29092951</pub-id></citation></ref>
<ref id="B33">
<label>33.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>S</given-names></name> <name><surname>Ma</surname> <given-names>K</given-names></name> <name><surname>Zheng</surname> <given-names>Y</given-names></name></person-group>. <article-title>Med3d: Transfer learning for 3d medical image analysis</article-title>. <source>arXiv preprint arXiv:1904.00625.</source> (<year>2019</year>). <pub-id pub-id-type="doi">10.48550/arXiv.1904.00625</pub-id></citation>
</ref>
<ref id="B34">
<label>34.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>J</given-names></name> <name><surname>Gao</surname> <given-names>Y</given-names></name> <name><surname>Kang</surname> <given-names>Y</given-names></name></person-group>. <article-title>A method of abstracting single pulmonary lobe from computed tomography pulmonary images for locating COPD</article-title>. In: <source>Proceedings of the Fourth International Conference on Biological Information and Biomedical Engineering</source>. (<year>2020</year>). p. <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1145/3403782.3403805</pub-id></citation>
</ref>
<ref id="B35">
<label>35.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tibshirani</surname> <given-names>R</given-names></name></person-group>. <article-title>Regression shrinkage and selection via the Lasso Robert Tibshirani</article-title>. <source>Journal of the Royal Statistical Society: Series B (Statistical Methodology).</source> (<year>2007</year>) <volume>58</volume>:<fpage>267</fpage>&#x02013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1111/j.2517-6161.1996.tb02080.x</pub-id></citation>
</ref>
<ref id="B36">
<label>36.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bro</surname> <given-names>R</given-names></name> <name><surname>Smilde</surname> <given-names>AK</given-names></name></person-group>. <article-title>Principal component analysis</article-title>. <source>Analytical Methods.</source> (<year>2014</year>) <volume>6</volume>:<fpage>2812</fpage>&#x02013;<lpage>31</lpage>. <pub-id pub-id-type="doi">10.1039/C3AY41907J</pub-id></citation>
</ref>
<ref id="B37">
<label>37.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Riedmiller</surname> <given-names>M</given-names></name> <name><surname>Lernen</surname> <given-names>A</given-names></name></person-group>. <article-title>Multi layer perceptron</article-title>. <source>Machine Learning Lab Special Lecture, University of Freiburg.</source> <volume>2014</volume>:<fpage>7</fpage>&#x02013;<lpage>24</lpage>.</citation>
</ref>
<ref id="B38">
<label>38.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desai</surname> <given-names>M</given-names></name> <name><surname>Shah</surname> <given-names>M</given-names></name></person-group>. <article-title>An anatomization on breast cancer detection and diagnosis employing multi-layer perceptron neural network (MLP) and Convolutional neural network (CNN)</article-title>. <source>Clinical eHealth.</source> (<year>2021</year>) <volume>4</volume>:<fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.ceh.2020.11.002</pub-id></citation>
</ref>
<ref id="B39">
<label>39.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lorencin</surname> <given-names>I</given-names></name> <name><surname>Andeli&#x00107;</surname> <given-names>N</given-names></name> <name><surname>&#x00160;panjol</surname> <given-names>J</given-names></name> <name><surname>Car</surname> <given-names>Z</given-names></name></person-group>. <article-title>Using multi-layer perceptron with Laplacian edge detector for bladder cancer diagnosis</article-title>. <source>Artif Intell Med.</source> (<year>2020</year>) <volume>102</volume>:<fpage>101746</fpage>. <pub-id pub-id-type="doi">10.1016/j.artmed.2019.101746</pub-id><pub-id pub-id-type="pmid">31980088</pub-id></citation></ref>
<ref id="B40">
<label>40.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>F</given-names></name> <name><surname>Asgari</surname> <given-names>A</given-names></name></person-group>. <article-title>Prediction and optimization of heating and cooling loads in a residential building based on multi-layer perceptron neural network and different optimization algorithms</article-title>. <source>Energy.</source> (<year>2022</year>) <volume>240</volume>:<fpage>122692</fpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2021.122692</pub-id></citation>
</ref>
<ref id="B41">
<label>41.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wan</surname> <given-names>S</given-names></name> <name><surname>Liang</surname> <given-names>Y</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Guizani</surname> <given-names>M</given-names></name></person-group>. <article-title>Deep multi-layer perceptron classifier for behavior analysis to estimate Parkinson&#x00027;s disease severity using smartphones</article-title>. <source>IEEE Access.</source> (<year>2018</year>) <volume>6</volume>:<fpage>36825</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2851382</pub-id></citation>
</ref>
<ref id="B42">
<label>42.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jakkula</surname> <given-names>V</given-names></name></person-group>. <article-title>Tutorial on support vector machine (svm), School of EECS, Washington State University</article-title>. (<year>2006</year>) <volume>37</volume>:<fpage>3</fpage>.</citation>
</ref>
<ref id="B43">
<label>43.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Breiman</surname> <given-names>L</given-names></name></person-group>. <article-title>Random forest</article-title>. <source>Mach Learn.</source> (<year>2001</year>) <volume>45</volume>:<fpage>5</fpage>&#x02013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id></citation>
</ref>
<ref id="B44">
<label>44.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Safavian</surname> <given-names>SR</given-names></name> <name><surname>Landgrebe</surname> <given-names>D</given-names></name></person-group>. <article-title>A survey of decision tree classifier methodology</article-title>. <source>IEEE Trans Syst Man Cybern.</source> (<year>1991</year>) <volume>21</volume>:<fpage>660</fpage>&#x02013;<lpage>74</lpage>. <pub-id pub-id-type="doi">10.1109/21.97458</pub-id></citation>
</ref>
<ref id="B45">
<label>45.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friedman</surname> <given-names>JH</given-names></name></person-group>. <article-title>Greedy function approximation: a gradient boosting machine</article-title>. <source>Ann Stat.</source> (<year>2001</year>) <volume>29</volume>:<fpage>1189</fpage>&#x02013;<lpage>232</lpage>. <pub-id pub-id-type="doi">10.1214/aos/1013203451</pub-id></citation>
</ref>
<ref id="B46">
<label>46.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Knowles</surname> <given-names>CH</given-names></name> <name><surname>Eccersley</surname> <given-names>AJ</given-names></name> <name><surname>Scott</surname> <given-names>SM</given-names></name> <name><surname>Walker</surname> <given-names>SM</given-names></name> <name><surname>Reeves</surname> <given-names>B</given-names></name> <name><surname>Lunniss</surname> <given-names>PJ</given-names></name></person-group>. <article-title>Linear discriminant analysis of symptoms in patients with chronic constipation</article-title>. <source>Diseases of the Colon &#x00026; Rectum.</source> (<year>2000</year>). <pub-id pub-id-type="doi">10.1016/S0016-5085(00)80553-3</pub-id><pub-id pub-id-type="pmid">11052520</pub-id></citation></ref>
<ref id="B47">
<label>47.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ramteke</surname> <given-names>RJ</given-names></name> <name><surname>Khachane</surname> <given-names>MY</given-names></name></person-group>. <article-title>Automatic medical image classification and abnormality detection using K-Nearest neighbour</article-title>. <source>Int J Adv Comput Res.</source> (<year>2012</year>) <volume>2</volume>:<fpage>190</fpage>&#x02013;<lpage>6</lpage>.</citation>
</ref>
<ref id="B48">
<label>48.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>LaValley</surname> <given-names>MP</given-names></name></person-group>. <article-title>Logistic regression</article-title>. <source>Circulation.</source> (<year>2008</year>) <volume>117</volume>:<fpage>2395</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1161/CIRCULATIONAHA.106.682658</pub-id><pub-id pub-id-type="pmid">18458181</pub-id></citation></ref>
<ref id="B49">
<label>49.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>James</surname> <given-names>M</given-names></name> <name><surname>Milne</surname> <given-names>K</given-names></name> <name><surname>Neder</surname> <given-names>JA</given-names></name> <name><surname>O&#x00027;Donnell</surname> <given-names>D</given-names></name></person-group>. <article-title>Mechanisms of exertional dyspnea in patients with mild COPD and low resting lung diffusing capacity for carbon monoxide (DLCO)</article-title>. (<year>2020</year>) <volume>56</volume>:<fpage>922</fpage>. <pub-id pub-id-type="doi">10.1183/13993003.congress-2020.922</pub-id></citation>
</ref>
<ref id="B50">
<label>50.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parker</surname> <given-names>CM</given-names></name> <name><surname>Voduc</surname> <given-names>N</given-names></name> <name><surname>Aaron</surname> <given-names>SD</given-names></name> <name><surname>Webb</surname> <given-names>KA</given-names></name> <name><surname>O&#x00027;Donnell</surname> <given-names>DE</given-names></name></person-group>. <article-title>Physiological changes during symptom recovery from moderate exacerbations of COPD</article-title>. <source>European Respiratory Journal.</source> (<year>2005</year>) <volume>26</volume>:<fpage>420</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1183/09031936.05.00136304</pub-id><pub-id pub-id-type="pmid">16135722</pub-id></citation></ref>
<ref id="B51">
<label>51.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Shaotong</surname> <given-names>P</given-names></name> <name><surname>Dewang</surname> <given-names>L</given-names></name> <name><surname>Ziru</surname> <given-names>M</given-names></name> <name><surname>Yunpeng</surname> <given-names>L</given-names></name> <name><surname>Yonglin</surname> <given-names>L</given-names></name></person-group>. <article-title>Location and identification of insulator and bushing based on YOLOv3-spp algorithm</article-title>. In: <source>2021 IEEE International Conference on Electrical Engineering and Mechatronics Technology.</source> <publisher-loc>IEEE</publisher-loc> (<year>2021</year>). p. <fpage>791</fpage>&#x02013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1109/ICEEMT52412.2021.9602798</pub-id><pub-id pub-id-type="pmid">27295638</pub-id></citation></ref>
<ref id="B52">
<label>52.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bailey</surname> <given-names>KL</given-names></name></person-group>. <article-title>The importance of the assessment of pulmonary function in COPD</article-title>. <source>Medical Clinics.</source> (<year>2012</year>) <volume>96</volume>:<fpage>745</fpage>&#x02013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1016/j.mcna.2012.04.011</pub-id><pub-id pub-id-type="pmid">22793942</pub-id></citation></ref>
<ref id="B53">
<label>53.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>X</given-names></name> <name><surname>Mao</surname> <given-names>M</given-names></name> <name><surname>Qian</surname> <given-names>X</given-names></name></person-group>. <article-title>Auto-metric graph neural network based on a meta-learning strategy for the diagnosis of alzheimer&#x00027;s disease</article-title>. <source>IEEE J Biomed Health Inform.</source> (<year>2021</year>) <volume>25</volume>:<fpage>3141</fpage>&#x02013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2021.3053568</pub-id><pub-id pub-id-type="pmid">33493122</pub-id></citation></ref>
<ref id="B54">
<label>54.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Harutyunyan</surname> <given-names>G</given-names></name> <name><surname>Harutyunyan</surname> <given-names>V</given-names></name> <name><surname>Harutyunyan</surname> <given-names>G</given-names></name> <name><surname>S&#x000E1;nchez Gimeno</surname> <given-names>A</given-names></name> <name><surname>Cherkezyan</surname> <given-names>A</given-names></name> <name><surname>Petrosyan</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>Ventilation/perfusion mismatch is not the sole reason for hypoxaemia in early stage COVID-19 patients</article-title>. <source>Eur Respir J</source>. (<year>2022</year>) <volume>31</volume>:<fpage>210277</fpage>. <pub-id pub-id-type="doi">10.1183/16000617.0277-2021</pub-id><pub-id pub-id-type="pmid">35768132</pub-id></citation></ref>
<ref id="B55">
<label>55.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neder J</surname> <given-names>A</given-names></name> <name><surname>Kirby</surname> <given-names>M</given-names></name> <name><surname>Santyr</surname> <given-names>G</given-names></name> <name><surname>Pourafkari</surname> <given-names>M</given-names></name> <name><surname>Smyth</surname> <given-names>R</given-names></name> <name><surname>Phillips</surname> <given-names>DB</given-names></name> <etal/></person-group>. <article-title>Ventilation/perfusion mismatch: a novel target for COPD treatment</article-title>. <source>Chest.</source> (<year>2022</year>) <fpage>1030</fpage>&#x02013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.1016/j.chest.2022.03.033</pub-id><pub-id pub-id-type="pmid">35390329</pub-id></citation></ref>
</ref-list> 
</back>
</article> 