<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<?covid-19-tdm?>
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="systematic-review">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cardiovasc. Med.</journal-id>
<journal-title>Frontiers in Cardiovascular Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cardiovasc. Med.</abbrev-journal-title>
<issn pub-type="epub">2297-055X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcvm.2021.638011</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Cardiovascular Medicine</subject>
<subj-group>
<subject>Systematic Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Application of Machine Learning in Diagnosis of COVID-19 Through X-Ray and CT Images: A Scoping Review</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Mohammad-Rahimi</surname> <given-names>Hossein</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1236396/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Nadimi</surname> <given-names>Mohadeseh</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/877333/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ghalyanchi-Langeroudi</surname> <given-names>Azadeh</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/237153/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Taheri</surname> <given-names>Mohammad</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/712936/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ghafouri-Fard</surname> <given-names>Soudeh</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1244274/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Dental Research Center, Research Institute of Dental Sciences, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Medical Physics and Biomedical Engineering, Tehran University of Medical Sciences (TUMS)</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff3"><sup>3</sup><institution>Research Center for Biomedical Technologies and Robotics (RCBTR)</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff4"><sup>4</sup><institution>Urology and Nephrology Research Center, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Medical Genetics, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Salah D. Qanadli, University of Lausanne, Switzerland</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Beigelman Catherine, Centre Hospitalier Universitaire Vaudois (CHUV), Switzerland; Sara Hosseinzadeh Kassani, University of British Columbia, Canada</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Mohammad Taheri <email>mohammad_823&#x00040;yahoo.com</email></corresp>
<corresp id="c002">Soudeh Ghafouri-Fard <email>s.ghafourifard&#x00040;sbmu.ac.ir</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Cardiovascular Imaging, a section of the journal Frontiers in Cardiovascular Medicine</p></fn></author-notes>
<pub-date pub-type="epub">
<day>25</day>
<month>03</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>8</volume>
<elocation-id>638011</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>01</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>23</day>
<month>02</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2021 Mohammad-Rahimi, Nadimi, Ghalyanchi-Langeroudi, Taheri and Ghafouri-Fard.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Mohammad-Rahimi, Nadimi, Ghalyanchi-Langeroudi, Taheri and Ghafouri-Fard</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract><p>Coronavirus disease, first detected in late 2019 (COVID-19), has spread fast throughout the world, leading to high mortality. This condition can be diagnosed using RT-PCR technique on nasopharyngeal and throat swabs with sensitivity values ranging from 30 to 70%. However, chest CT scans and X-ray images have been reported to have sensitivity values of 98 and 69%, respectively. The application of machine learning methods on CT and X-ray images has facilitated the accurate diagnosis of COVID-19. In this study, we reviewed studies which used machine and deep learning methods on chest X-ray images and CT scans for COVID-19 diagnosis and compared their performance. The accuracy of these methods ranged from 76% to more than 99%, indicating the applicability of machine and deep learning methods in the clinical diagnosis of COVID-19.</p></abstract>
<kwd-group>
<kwd>COVID-19</kwd>
<kwd>machine learning</kwd>
<kwd>detection</kwd>
<kwd>biomarker</kwd>
<kwd>X-ray image</kwd>
</kwd-group>
<counts>
<fig-count count="1"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="116"/>
<page-count count="25"/>
<word-count count="11077"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>First identified in Wuhan, China, severe pneumonia caused by Severe Acute Respiratory Syndrome Coronavirus 2 (SARS-CoV-2) quickly spread all over the world. The resultant disorder was named coronavirus disease (COVID-19) (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). COVID-19 has various clinical symptoms, including fever, cough, dyspnea, fatigue, myalgia, headache, and gastrointestinal complications (<xref ref-type="bibr" rid="B3">3</xref>&#x02013;<xref ref-type="bibr" rid="B5">5</xref>). Diagnosis of COVID-19 infection through RT-PCR on nasopharyngeal and throat swab samples has been reported to yield positive results in 30&#x02013;70% of cases (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>). On the other hand, chest CT scans and X-ray images have been reported to have sensitivity values of 98 and 69%, respectively (<xref ref-type="bibr" rid="B7">7</xref>&#x02013;<xref ref-type="bibr" rid="B9">9</xref>). The most typical radiological signs in these patients include multifocal and bilateral ground-glass opacities and consolidations, particularly in the peripheral and basal sites (<xref ref-type="bibr" rid="B10">10</xref>). However, interpretation of the results of these imaging techniques by expert radiologists might encounter some problems leading to reduced sensitivity (<xref ref-type="bibr" rid="B11">11</xref>). Artificial intelligence has recently gained the attention of both clinicians and researchers for the appropriate management of the COVID-19 pandemic (<xref ref-type="bibr" rid="B12">12</xref>). As an accurate method, artificial intelligence is able to identify abnormal patterns of CT and X-ray images. Using this method, it is possible to assess certain segment regions and take precise structures in chest CT images facilitating diagnostic purposes. Artificial intelligence methods have been shown to detect COVID-19 and distinguish this condition from other pulmonary disorders and community-acquired pneumonia (<xref ref-type="bibr" rid="B13">13</xref>). Both deep learning and machine learning approaches have been used to predict different aspects of the COVID-19 outbreak. Support vector and random forest are among the most applied machine learning methods, while Convolutional Neural Network (CNN), Long Short-Term Memory (LSTM), Generative Adversarial Networks (GAN), and Residual Neural network are among the deep learning methods used in this regard (<xref ref-type="bibr" rid="B14">14</xref>). In this study, we reviewed studies which used machine and deep learning methods on chest X-ray images and CT scans for the purpose of COVID-19 diagnosis and compared their performance.</p></sec>
<sec sec-type="methods" id="s2">
<title>Methods</title>
<sec>
<title>Search Strategy</title>
<p>The research question was: &#x0201C;What are the applications of machine learning techniques and their performances in COVID-19 diagnosis using X-ray images?&#x0201D;. The search of the present review was based on the PICO elements, which were as follows:
<list list-type="bullet">
<list-item><p><bold>P (Problem/Patient/Population):</bold> Patients&#x00027; CT scans and Chest X-rays.</p></list-item>
<list-item><p><bold>I (Intervention/Indicator):</bold> Machine/deep learning models for diagnosis of Covid-19 patients</p></list-item>
<list-item><p><bold>C (Comparison):</bold> Ground truth or reference standards</p></list-item>
<list-item><p><bold>O (Outcome):</bold> Performance measurements including accuracy, AUC score, sensitivity, and specificity.</p></list-item>
</list></p>
<p>In other words, we were looking for publications that evaluated the performance of any machine learning or deep learning approaches based on inclusion and exclusion criteria. Studies that used other types of medical image modalities (e.g., ultrasound images) were excluded. An electronic search was conducted on PubMed, Google Scholar, Scopus, Embase, arXiv, and medRxiv for finding the relevant literature. Duplicate studies were removed. Studies that were cited within the retrieved papers were reviewed for finding missing studies. For identifying proper journal papers and conference proceedings, investigators screened the title and abstracts based on inclusion and exclusion criteria independently. Finally, considering the inclusion and exclusion criteria, investigators identified the eligible publications in this stage independently.</p></sec>
<sec>
<title>Inclusion Criteria</title>
<p>The following inclusion criteria were used in the selection of the articles: (1) Studies that applied machine learning or deep learning algorithms, (2) Studies that evaluated the measurement of model outcomes in comparison with ground truth or gold standards, and (3) Studies that used algorithms to analyze radiographic images (CT scan, Chest X-ray, etc.).</p></sec>
<sec>
<title>Exclusion Criteria</title>
<p>The following studies were excluded: (1) Studies that used any machine learning or deep learning approaches for problems not directly related to the COVID-19 imaging, (2) Studies that used other artificial intelligence techniques or classic computer vision approaches, (3) Studies that did not provide a clear explanation of the machine learning or deep learning model that was used to solve their problem, and (4) Review studies. The latter were excluded as we did not aim to review the data on an original level without any second-hand interpretations (summation, inferences, etc.).</p>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> shows the flowchart of the study design.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) chart showing the process of systematic identification, screening, and selection of articles.</p></caption>
<graphic xlink:href="fcvm-08-638011-g0001.tif"/>
</fig></sec></sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<p>We obtained 105 studies that used machine or deep learning methods to assess chest images of COVID-19 patients. These studies have used different analytical methods. For instance, Ardakani et al. (<xref ref-type="bibr" rid="B15">15</xref>) have assessed radiological features of CT images obtained from patients with COVID-19 and non-COVID-19 pneumonia. They used decision tree, K-nearest neighbor, na&#x000EF;ve Bayes, support vector machine, and ensemble classifiers to find the computer-aided diagnosis system with the best performance in distinguishing COVID-19 patients from non-COVID-19 pneumonia. They reported that site and distribution of pulmonary involvement, the quantity of the pulmonary lesions, ground-glass opacity, and crazy-paving as the most important characteristics for differentiation of these two sets of patients. Their computer-aided diagnosis method yielded the accuracy of 91.94%, using an ensemble (COVIDiag) classifier. Alazab et al. (<xref ref-type="bibr" rid="B16">16</xref>) have developed an artificial-intelligence method based on a deep CNN to evaluate chest X-ray images and detection of COVID-19 patients. Their method yielded an F-measure ranging from 95 to 99%. Notably, three predicting strategies could forecast the numbers of COVID-19 confirmations, recoveries, and mortalities over the upcoming week. The average accuracy of the prediction models were 94.80 and 88.43% in two different countries. Albahli has applied deep learning-based models on CT images of COVID-19 patients. He has demonstrated a high performance of a Deep Neural Network model in detecting COVID-19 patients and has offered an efficient assessment of chest-related disorders according to age and sex. His proposed model has yielded 89% accuracy in terms of GAN-based synthetic data (<xref ref-type="bibr" rid="B17">17</xref>). Automatic detection of COVID-19 based on X-ray images has been executed through the application of three deep learning models, including Inception ResNetV2, InceptionNetV3, and NASNetLarge. The best results have been obtained from InceptionNetV3, which yielded the accuracy levels of 98.63 and 99.02% with and without application of data augmentation in model training, respectively (<xref ref-type="bibr" rid="B18">18</xref>). Alsharman et al. (<xref ref-type="bibr" rid="B19">19</xref>) have used the CNN method to detect COVID-19 based on chest CT images in the early stages of disease course. Authors have reported high accuracy of GoogleNet CNN architecture for diagnosis of COVID-19. Altan et al. (<xref ref-type="bibr" rid="B20">20</xref>) have used a hybrid model comprising two-dimensional curvelet transformation, chaotic salp swarm algorithm, and deep learning methods for distinguishing COVID-19 from other pneumonia cases. Application of their proposed model on chest X-ray images has led to accurate diagnosis of COVID-19 patients (Accuracy = 99.69%, Sensitivity = 99.44% and Specificity = 99.81%). Apostolopoulos et al. (<xref ref-type="bibr" rid="B21">21</xref>) have used a certain CNN strategy, namely MobileNet on X-Ray images of COVID-19 patients. This method has yielded more than 99% accuracy in the diagnosis of COVID-19. In another study, Ardakani et al. (<xref ref-type="bibr" rid="B22">22</xref>) used 10 CNN strategies, namely AlexNet, VGG-16, VGG-19, SqueezeNet, GoogleNet, MobileNet-V2, ResNet-18, ResNet-50, ResNet-101, and Xception, to differentiate COVID-19 cases from non-COVID-19 patients. They have demonstrated the best diagnostic values for ResNet-101 and Xception, both of them having area under curve (AUC) values higher than 0.99 which is superior to the performance of the radiologist. Das et al. (<xref ref-type="bibr" rid="B23">23</xref>) have used the CNN model Truncated InceptionNet to diagnose COVID-19 from other non-COVID and/or healthy cases based on chest X-ray. Their suggested model yielded AUC of 1.0 in distinguishing COVID-19 patients from combined Pneumonia and healthy subjects. <xref ref-type="table" rid="T1">Tables 1</xref>, <xref ref-type="table" rid="T2">2</xref> summarize the features of studies which adopted machine learning methods in CT images and chest X-ray of COVID-19 patients, respectively.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Characteristics of papers that used CT images or a combination of X-ray and CT images.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>Author, year</bold></th>
<th valign="top" align="left"><bold>Data source</bold></th>
<th valign="top" align="left"><bold>Data structure and size</bold></th>
<th valign="top" align="left"><bold>Data preprocessing</bold></th>
<th valign="top" align="left"><bold>Best model structure(s)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>Performance measurements (on the best model)</bold></th>
<th valign="top" align="center"><bold>References</bold></th>
</tr>
<tr>
<th/>
<th/>
<th/>
<th/>
<th/>
<th valign="top" align="left"><bold>Accuracy</bold></th>
<th valign="top" align="left"><bold>AUC score</bold></th>
<th valign="top" align="left"><bold>Sensitivity</bold></th>
<th valign="top" align="left"><bold>Specificity</bold></th>
<th/>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Abbasian et al. (2020)</td>
<td valign="top" align="left">Iran University of Medical Sciences (IUMS)</td>
<td valign="top" align="left">306 COVID-19 patients; <break/> 306 COVID-19 pneumonia (CT images)</td>
<td valign="top" align="left">Extracting 20 features of CT images</td>
<td valign="top" align="left">Ensemble</td>
<td valign="top" align="left">91.94%</td>
<td valign="top" align="left">0.965</td>
<td valign="top" align="left">93.54%</td>
<td valign="top" align="left">90.32%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B15">15</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Alsharman et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-CT-dataset&#x0201D;</td>
<td valign="top" align="left">CT images</td>
<td valign="top" align="left">Binarization (the separation of the object and background is known as Binarization); <break/> Converting input image from 2D Grayscale to 3D Color</td>
<td valign="top" align="left">GoogleNet CNN</td>
<td valign="top" align="left">82.14%</td>
<td/>
<td/>
<td/>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B19">19</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ardakani et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left">108 COVID-19 patients; <break/> 86 viral pneumonia diseases (CT images)</td>
<td valign="top" align="left">Converted to the gray-scale Cropped and resized to 60 &#x0002A; 60 pixels</td>
<td valign="top" align="left">ResNet-101 <break/> Xception</td>
<td valign="top" align="left">Resnet: 99.51% <break/> Xception: 99.02% (compared to 86.7% in human)</td>
<td valign="top" align="left">Resnet: 0.994 <break/> Xception: 0.994% (compared to 0.873 in human)</td>
<td valign="top" align="left">Resnet: 100% <break/> Xception: 98.04% (compared to 89.21% in human)</td>
<td valign="top" align="left">Resnet: 99.02% <break/> Xception: 100% (compared to 83.33% in human)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B22">22</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Aswathy et al. (2020)</td>
<td valign="top" align="left">&#x0201C;National Cancer Institute and the Cancer Image Archive&#x0201D;</td>
<td valign="top" align="left">1,763 normal patients; <break/> 63 pneumonia patients</td>
<td valign="top" align="left">Thresholding; <break/> Texture-based feature extractionwith a wrapper</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">99%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B24">24</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Bai et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left"><break/> 521 COVID-19 patients; <break/> 665 other pulmonary diseases (CT images)</td>
<td valign="top" align="left">Lung segmentation; <break/> Generate an 8-bit image for each axial slice by applying Lung windowing to the Hounsfield units</td>
<td valign="top" align="left">EfficientNet B4</td>
<td valign="top" align="left">96% (compared to 85% in human)</td>
<td valign="top" align="left">0.95</td>
<td valign="top" align="left">95% (compared to 79% in human)</td>
<td valign="top" align="left">96% (compared to 88% in human)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B11">11</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Bridge et al. (2020)</td>
<td valign="top" align="left"><break/> &#x0201C;Toy dataset;&#x0201D; <break/> &#x0201C;Italian Society of Radiology;&#x0201D; <break/> &#x0201C;Shenzhen Hospital X-Ray dataset;&#x0201D; <break/> &#x0201C;ChestX-Ray8;&#x0201D; <break/> &#x0201C;COVID-CT-Dataset&#x0201D;</td>
<td valign="top" align="left">129 COVID-19 patients; <break/> 62,267 normal patients; <break/> 5,689 pneumonia patients (X-ray images) <break/> 30 COVID-19 patients; <break/> 1,919 normal patients (CT images)</td>
<td valign="top" align="left">Using the GEV activation function for unbalanced data</td>
<td valign="top" align="left">Inception V3</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B25">25</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Butt et al. (2020)</td>
<td valign="top" align="left">Not mentioned</td>
<td valign="top" align="left"><break/> 219 images from 110 COVID-19 patients; <break/> 399 normal patients (CT images)</td>
<td valign="top" align="left">Image processing method base on HU values</td>
<td valign="top" align="left">3D CNN</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.996</td>
<td valign="top" align="left">98.2%</td>
<td valign="top" align="left">92.2%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B26">26</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Dey et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-19 CT segmentation dataset;&#x0201D; <break/> &#x0201C;Chest X-rays (Radiopaedia)&#x0201D;</td>
<td valign="top" align="left">200 COVID-19 patients; <break/> 200 normal patients (grayscale lung CTI images)</td>
<td valign="top" align="left">Segmenting lung area related to pneumonia infection; <break/> Extracting CWT, DWT, EWT features from original image and Haralick, Hu moments from binary segmented area <break/> Feature selection based on statistical tests</td>
<td valign="top" align="left">KNN</td>
<td valign="top" align="left">87.75%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">89.00%</td>
<td valign="top" align="left">86.50%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B27">27</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">El Asnaoui et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Kermany et al. (<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="top" align="left">2,780 Bacterial pneumonia patients; <break/> 1,493 Coronavirus patients; <break/> 231 COVID-19 patients; <break/> 1,583 normal patients (X-ray and CT images)</td>
<td valign="top" align="left">Intensity Normalization; <break/> Contrast Limited Adaptive Histogram Equalization</td>
<td valign="top" align="left">Inception ResNetV2; Densnet201</td>
<td valign="top" align="left">Inception-ResNetV2: 92.18% <break/> Densnet201: 88.09%</td>
<td valign="top" align="left">Inception-ResNetV2: 0.920 <break/> Densnet201: 0.879</td>
<td valign="top" align="left">Inception-ResNetV2: 92.11% <break/> Densnet201: 87.99%</td>
<td valign="top" align="left">Inception-ResNetV2: 96.6% <break/> Densnet201: 94.00%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B29">29</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Han et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-19 hospitals in Shandong Province&#x0201D;</td>
<td valign="top" align="left">79 COVID-19 patients; <break/> 100 pneumonia patients; <break/> 130 normal patients (CT images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">AD3D-MIL</td>
<td valign="top" align="left">97.9%</td>
<td valign="top" align="left">0.99</td>
<td valign="top" align="left">97.9%</td>
<td valign="top" align="left">97.9%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B30">30</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Harmon et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left"><break/> 386 COVID-19 patients; <break/> 1,011 negative COVID-19 patients (CT images)</td>
<td valign="top" align="left"><break/> Lung segmentation; clipping images to HU range (&#x02212;1,000, 500); <break/> Data augmentation (flipping, rotation, image intensity and contrast adjustment, adding random Gaussian noise);</td>
<td valign="top" align="left">Hybrid 3D based on Densnet-121</td>
<td valign="top" align="left">90.8%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">84%</td>
<td valign="top" align="left">93%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B31">31</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hasan et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Radiopaedia and the cancer imaging archive websites&#x0201D;</td>
<td valign="top" align="left">118 COVID-19 patients; 96 pneumonia patients; <break/> 107 normal patients (CT images)</td>
<td valign="top" align="left">Histogram <break/> Thresholding; <break/> Dilation; <break/> Hole Filling</td>
<td valign="top" align="left">LSTM</td>
<td valign="top" align="left">99.68%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B32">32</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hu et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Hospital of Wuhan Red Cross Society;&#x0201D; <break/> &#x0201C;Shenzhen Hospital;&#x0201D; <break/> &#x0201C;TCIA dataset;&#x0201D; <break/> &#x0201C;Cancer Centre Archive (TCIA) Public Access;&#x0201D; <break/> &#x0201C;MD Anderson Cancer Centre;&#x0201D; <break/> &#x0201C;Memorial Sloan-Kettering Cancer Center;&#x0201D; <break/> &#x0201C;MAASTRO clinic&#x0201D;</td>
<td valign="top" align="left">150 COVID-19 patients; <break/> 150 pneumonia patients; <break/> 150 normal patients (CT images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">96.2%</td>
<td valign="top" align="left">0.970</td>
<td valign="top" align="left">94.5%</td>
<td valign="top" align="left">95.3%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B33">33</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Jaiswal et al. (2020)</td>
<td valign="top" align="left">&#x0201C;The SARS-CoV-2 CT scan dataset&#x0201D;</td>
<td valign="top" align="left">1,262 COVID-19 patients; 1,230 non-COVID-19 patients (CT images)</td>
<td valign="top" align="left">Data augmentation (rotation up to 15, slant-angle of 0.2, horizontal flipping, filling new pixels as &#x0201C;nearest&#x0201D; for better robustness)</td>
<td valign="top" align="left">DenseNet201</td>
<td valign="top" align="left">96.25%</td>
<td valign="top" align="left">0.97</td>
<td valign="top" align="left">96.29%</td>
<td valign="top" align="left">96.21%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B34">34</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Kang et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Tongji Hospital of Huazhong University of Science and Technology;&#x0201D; <break/> &#x0201C;China-Japan Union Hospital of Jilin University;&#x0201D; <break/> &#x0201C;Ruijin Hospital ofShanghai Jiao Tong University&#x0201D;</td>
<td valign="top" align="left">1,495 COVID-19 patients; <break/> 1,027 community-acquired pneumonia (CAP) patients (CT images)</td>
<td valign="top" align="left">Normalization; <break/> Standardization</td>
<td valign="top" align="left">NN</td>
<td valign="top" align="left">93.90%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">94.60%</td>
<td valign="top" align="left">91.70%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B35">35</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Lessmann et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Emergency wards of an Academic center and teaching hospital in the Netherlands in March and April 2020&#x0201D;</td>
<td valign="top" align="left">237 COVID-19 patients; <break/> 606 normal patients (CT images)</td>
<td valign="top" align="left">Resampling; <break/> Normalization</td>
<td valign="top" align="left">CORADS-AI</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.95</td>
<td valign="top" align="left">85.7%</td>
<td valign="top" align="left">89.8%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B36">36</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Li et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">1,296 COVID-19 patients; <break/> 1,325&#x02014;patients; <break/> 1,735 community-acquired (CT images)</td>
<td valign="top" align="left">Segmenting lung area with U-net</td>
<td valign="top" align="left">COVNet (ResNet-50)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.96</td>
<td valign="top" align="left">90%</td>
<td valign="top" align="left">96%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B13">13</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Li et al. (2020)</td>
<td valign="top" align="left">More than 10 medical centers between Nov. 11th, 2010 and Feb. 9th, 2020</td>
<td valign="top" align="left">305 images from 251 COVID-19 patients; <break/> 872 images from 869 pneumonia patients; <break/> 1,498 images from 1,475 non-pneumonia patients (CT images)</td>
<td valign="top" align="left">DL-based algorithm <break/> Image processing method base on HU values; <break/> Data augmentation</td>
<td valign="top" align="left">3D ResNet-18</td>
<td valign="top" align="left" colspan="4">Recall = 88% <break/> Precision = 89.6% <break/> F1 score = 87.8%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B37">37</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Liu et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">73 COVID-19 patients; <break/> 27 general pneumonia patients (CT images)</td>
<td valign="top" align="left">ROI delineation based on ground-glass opacities (GGOs); <break/> 13 gray level co-occurrence matrix (GLCM) features, 15 gray level-gradient co-occurrence matrix (GLGCM) features, and six histogram features were extracted; <break/> Feature selection by ReliefF;</td>
<td valign="top" align="left">An ensemble of bagged tree (EBT)</td>
<td valign="top" align="left">94.16%</td>
<td valign="top" align="left">0.99</td>
<td valign="top" align="left">88.62%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B38">38</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Mei et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">419 COVID-19 patients <break/> 486 non-COVID-19 patients (CT images)</td>
<td valign="top" align="left">Selecting pertinent slices by image segmentation to detect parenchymal tissue; <break/> Segmenting lung in CT images;</td>
<td valign="top" align="left">ResNet-18</td>
<td valign="top" align="left">79.6%</td>
<td valign="top" align="left">0.86</td>
<td valign="top" align="left">83.6%</td>
<td valign="top" align="left">75.9%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B39">39</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Panwar et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-chest X-ray;&#x0201D; <break/> &#x0201C;SARS-COV-2 CT-scan;&#x0201D; <break/> &#x0201C;Chest X-Ray Images (Pneumonia);&#x0201D;</td>
<td valign="top" align="left">206 COVID-19 patients; <break/> 364 Pneumonia patients (X-ray and CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">VGG-19</td>
<td valign="top" align="left">95.61% (COVID-19 vs. Pneumonia)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96.55% (COVID-19 vs. Pneumonia)</td>
<td valign="top" align="left">95.29% (COVID-19 vs. Pneumonia)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B40">40</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Pathak et al. (2020)</td>
<td valign="top" align="left">2 different COVID-19 datasets of chest-CT images</td>
<td valign="top" align="left">CT images</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Deep bidirectional long short-term memory network with mixture density network (DBM)</td>
<td valign="top" align="left">96.19% (multi-class)</td>
<td valign="top" align="left">0.96 (multi-class)</td>
<td valign="top" align="left">96.22% (multi-class)</td>
<td valign="top" align="left">96.16% (multi-class)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B41">41</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Pathak et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-19 open datasets of chest CT images&#x0201D;</td>
<td valign="top" align="left">413 COVID-19 patients; <break/> 439 normal or pneumonia infected patients (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">ResNet-50</td>
<td valign="top" align="left">93.01%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">91.45%</td>
<td valign="top" align="left">94.77%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B41">41</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Peng et al. (2020)</td>
<td valign="top" align="left">Collected from PMC</td>
<td valign="top" align="left">606 COVID-19 patients; <break/> 222 Influenza; <break/> 397 Normal or other disease patients (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">DenseNet121</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.87</td>
<td valign="top" align="left">72.3%</td>
<td valign="top" align="left">85.2%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B42">42</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Pu et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">498 COVID-19 patients; <break/> 497 community-acquired pneumonia (CAP) (CT images)</td>
<td valign="top" align="left">Data augmentation [rotation, translation, vertical/horizontal flips, Hounsfield Unit (HU) shift, smoothing (blurring) operation, Gaussian noise]</td>
<td valign="top" align="left">3D CNNs</td>
<td valign="top" align="left">99%</td>
<td valign="top" align="left">0.7</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B43">43</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Raajan et al. (2020)</td>
<td valign="top" align="left">X-ray images on public medical Github repositories; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">349 images from 216 COVID-19 patients; <break/> 1,341 Normal patients (CT images)</td>
<td valign="top" align="left">Normalization</td>
<td valign="top" align="left">ResNet-16</td>
<td valign="top" align="left">95.09%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">81.89%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B44">44</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Rajaraman et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Pediatric CXR dataset;&#x0201D; <break/> &#x0201C;RSNA CXR dataset;&#x0201D; <break/> &#x0201C;Twitter COVID-19 CXR dataset;&#x0201D; <break/> &#x0201C;Montreal COVID-19 CXR dataset&#x0201D;</td>
<td valign="top" align="left">313 COVID-19 patients; <break/> 7,595 pneumonia of unknown type patients; <break/> 2,780 bacterial pneumonia; <break/> 7,595 Normal patients (X-ray images)</td>
<td valign="top" align="left">Median filtering; <break/> Normalization; <break/> Standardization</td>
<td valign="top" align="left">Inception-V3</td>
<td valign="top" align="left">99.01%</td>
<td valign="top" align="left">0.997</td>
<td valign="top" align="left">98.4%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B45">45</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Sakagianni et al. (2020)</td>
<td valign="top" align="left">COVID-19 articles on medRxiv and bioRxiv</td>
<td valign="top" align="left">349 COVID-19 patients; <break/> 397 non-COVID-19 patients (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">AutoML Cloud Vision</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.94</td>
<td valign="top" align="left">88.31%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B46">46</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Sharma (2020)</td>
<td valign="top" align="left">Dataset from Italian Society of Medical and Interventional Radiology; <break/> COVID-CT available in GitHub; <break/> Dataset from hospitals in Moscow, Russia; <break/> Dataset from SAL Hospital, Ahmedabad, India;</td>
<td valign="top" align="left">800 COVID-19 patients; <break/> 600 Viral Pneumonia;<break/> 800 normal patients (CT images)</td>
<td valign="top" align="left">Ground-glass opacities (GGO), consolidation and pleural effusion are the features</td>
<td valign="top" align="left">ResNet</td>
<td valign="top" align="left">91%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">92.1%</td>
<td valign="top" align="left">90.29%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B47">47</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Singh et al. (2020)</td>
<td valign="top" align="left">Not mentioned</td>
<td valign="top" align="left">CT images</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Multi-objective differential evolution (MODE) based CNN</td>
<td valign="top" align="left">90.22%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">91.17%</td>
<td valign="top" align="left">89.23%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B48">48</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Song et al. (2020)</td>
<td valign="top" align="left">Private (two hospitals in China);</td>
<td valign="top" align="left">98 COVID-19 patients; <break/> 103 non-COVID-19 pneumonia (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">BigBiGAN</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.972</td>
<td valign="top" align="left">92%</td>
<td valign="top" align="left">91%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Wang et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">1,315 COVID-19 patients; <break/> 2,406 ILD patients; <break/> 936 Normal patients (CT images)</td>
<td valign="top" align="left">Lobe Segmentation by 3D-Unet; <break/> Converting CT numbers to grayscale</td>
<td valign="top" align="left">PA-66 model</td>
<td valign="top" align="left">93.3%</td>
<td valign="top" align="left">0.973</td>
<td valign="top" align="left">97.6%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B50">50</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Wang et al. (2020)</td>
<td valign="top" align="left">COVID-19 dataset (private); <break/> CT-epidermal growth factor receptor (CT-EGFR) dataset (private);</td>
<td valign="top" align="left">754 COVID-19 patients; <break/> 271 bacterial pneumonia <break/> 29 viral pneumonia; <break/> 42 Other pneumonia (CT images) <break/> &#x0002A;The CT-EGFR dataset was used for auxiliary training of the DL system</td>
<td valign="top" align="left">Lung segmentation; <break/> Using a fully automatic DL model (DenseNet121-FPN); <break/> suppress the intensities of non-lung areas inside the lung ROI;</td>
<td valign="top" align="left">COVID-19Net (DenseNet-like architecture)</td>
<td valign="top" align="left">Test-set1: 78.32% <break/> Test-set2: 80.12%</td>
<td valign="top" align="left">Test-set1: 0.87 <break/> Test-set2: 0.88</td>
<td valign="top" align="left">Test-set1: 80.39% <break/> Test-set2: 79.35%</td>
<td valign="top" align="left">Test-set1: 76.61% <break/> Test-set2: 81.16%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B51">51</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Warman et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Public sources&#x0201D;</td>
<td valign="top" align="left">606 COVID-19 patients; <break/> 224 viral pneumonias patients; <break/> 74 Normal patients (CT images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">YOLOv3 model</td>
<td valign="top" align="left">96.80%</td>
<td valign="top" align="left">0.966</td>
<td valign="top" align="left">98.33%</td>
<td valign="top" align="left">94.95%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B52">52</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Wu et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">368 COVID-19 patients; <break/> 127 other pneumonia (CT images)</td>
<td valign="top" align="left">Lung region in each axial, coronal and sagittal CT slices were segmented using threshold segmentation and morphological optimization algorithms; <break/> The slice with the most pixels in the segmented lung area from each of the axial, coronal and sagittal views was selected as the inputs of the deep learning network;</td>
<td valign="top" align="left">Multi-view fusion ResNet50 architecture</td>
<td valign="top" align="left">76%</td>
<td valign="top" align="left">0.819</td>
<td valign="top" align="left">81.1%</td>
<td valign="top" align="left">61.5%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B53">53</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Xu et al. (2020)</td>
<td valign="top" align="left">Private &#x0201C;Hospitals in Zhejiang Province, China.&#x0201D;</td>
<td valign="top" align="left">219 images from 110 COVID-19 patients; <break/> 224 Influenza-A viral pneumonia patients; <break/> 175 Normal patients (CT images)</td>
<td valign="top" align="left">Image processing method base on HU values</td>
<td valign="top" align="left">3D CNN segmentation model</td>
<td valign="top" align="left">86.7%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">86.7%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B54">54</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Xu et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">432 COVID-19 patients; <break/> 76 other viral pneumonia; <break/> 350 bacterial pneumonia; 418 normal patients (CT images)</td>
<td valign="top" align="left">Sampling 5 subsets of CT slices from all sequential images of one CT case to picture the infected lung regions.</td>
<td valign="top" align="left">3D-Densenet</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.98</td>
<td valign="top" align="left">97.5% (differentiating COVID-19 from three types of non-COVID-19 cases) (compared to 79% in human)</td>
<td valign="top" align="left">89.4% (differentiating COVID-19 from three types of non-COVID-19 cases) (compared to 90% in human)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B55">55</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Yan et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">416 images from 206 COVID-19 patients; <break/> 412 common pneumonia patients (CT images)</td>
<td valign="top" align="left">Transferring image slices to JPG; <break/> Normalization</td>
<td valign="top" align="left">MSCNN</td>
<td valign="top" align="left">97.7%</td>
<td valign="top" align="left">0.962</td>
<td valign="top" align="left">99.5%</td>
<td valign="top" align="left">95.6%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B56">56</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Yang et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">146 COVID-19 patients; <break/> 149 normal patients (CT images)</td>
<td valign="top" align="left">For patients, images containing round-glasses opacity (GGO), GGO with consolidation was selected; for healthy control, every 3 slices containing pulmonary parenchyma were selected; <break/> Lung windowing is performed over all image slices;</td>
<td valign="top" align="left">DenseNet</td>
<td valign="top" align="left">92% (compared to 95% in human)</td>
<td valign="top" align="left">0.98</td>
<td valign="top" align="left">97% (compared to 94% in human)</td>
<td valign="top" align="left">87% (compared to 96% in human)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B57">57</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Yu et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">202 COVID-19 patients (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">DenseNet-201 with the cubic SVM model</td>
<td valign="top" align="left">95.2%</td>
<td valign="top" align="left">0.99</td>
<td valign="top" align="left">91.87%</td>
<td valign="top" align="left">96.87%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B58">58</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Al-Karawi et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-CT-Dataset&#x0201D;</td>
<td valign="top" align="left">275 COVID-19 patients; <break/> 195 normal patients (CT images)</td>
<td valign="top" align="left">Adaptive winner filter followed by inversion; <break/> Feature extraction by the FFT-spectrum</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">95.37%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">95.99%</td>
<td valign="top" align="left">94.76%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B59">59</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Alom et al. (2020)</td>
<td valign="top" align="left">Publicly available datasets; <break/> &#x0201C;Kaggle repository&#x0201D;</td>
<td valign="top" align="left">3,875 pneumonia patients; <break/> 1,341 normal patients (X-Ray images) <break/> 178 COVID-19 patients; <break/> 247 normal patients (CT images)</td>
<td valign="top" align="left">Data augmentation; <break/> Adaptive Thresholding Approach</td>
<td valign="top" align="left">IRRCNN model; <break/> NABLA-3 network model</td>
<td valign="top" align="left">X-ray images: 84.67% <break/> CT images: 98.78%</td>
<td valign="top" align="left">0.93</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B60">60</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Barstugan et al. (2020)</td>
<td valign="top" align="left">From the Italian Society of Medical and Interventional Radiology</td>
<td valign="top" align="left">150 COVID-19 patients (CT images)</td>
<td valign="top" align="left">13 features were extracted by Gray Level Size Zone Matrix (GLSZM)</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">98.77%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.72%</td>
<td valign="top" align="left">99.67%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B61">61</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Chen et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left">25,989 images from 51 COVID-19 patients; <break/> 20,107 images from 55 normal patients (retrospective dataset); <break/> 13,911 images from 27 consecutive patients (prospective dataset) (CT images)</td>
<td valign="top" align="left">Filtering</td>
<td valign="top" align="left">Deep learning model</td>
<td valign="top" align="left">Retrospective dataset: 95.24%; <break/> Prospective dataset: 92.59% (per patient)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Retrospective dataset: 100%; <break/> Prospective dataset: 100% (per patient)</td>
<td valign="top" align="left">Retrospective dataset:93.55%; <break/> Prospective dataset: 81.82% (per patient)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B62">62</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Farid et al. (2020)</td>
<td valign="top" align="left">Kaggle database</td>
<td valign="top" align="left">51 COVID-19 patients (CT images)</td>
<td valign="top" align="left">Feature extraction (MPEG7 Histogram Filter, Gabor Image Filter, Pyramid of Rotation-Invariant Local Binary Pattern, Fuzzy 64-bin Histogram Image Filter); <break/> Feature selection by composite hybrid feature selection</td>
<td valign="top" align="left">CHFS-Stacked (jrip, RF) with Na&#x000EF;ve Bayes classifier</td>
<td valign="top" align="left">96.07%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B63">63</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Gozes et al. (2020)</td>
<td valign="top" align="left">Dataset1:ChainZ; <break/> Dataset2: Private; <break/> Dataset3: ChainZ;</td>
<td valign="top" align="left">50 suspicious COVID-19 patients from dataset1 used for training; <break/> 56 COVID-19 patients; <break/> 51 normal patients (CT images) used for testing</td>
<td valign="top" align="left">Data augmentation (rotation, horizontal flips and cropping)</td>
<td valign="top" align="left">Resnet-50-2D</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.996</td>
<td valign="top" align="left">98.2%</td>
<td valign="top" align="left">92.2%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B64">64</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Jin et al. (2020)</td>
<td valign="top" align="left">Three centers in China; <break/> &#x0201C;LIDC-IDRI;&#x0201D; &#x0201C;Tianchi-Alibaba;&#x0201D; &#x0201C;CC-CCII&#x0201D;</td>
<td valign="top" align="left">2,529 images from 1,502 COVID-19 patients; <break/> 1,338 images from 1,334 CAP patients; <break/> 135 images from 83 influenza-A/B patients; <break/> 258 images from 258 normal patients (CT images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.977</td>
<td valign="top" align="left">90.19%</td>
<td valign="top" align="left">95.76%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B65">65</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Jin et al. (2020)</td>
<td valign="top" align="left">Data from three different centers in Wuhan; <break/> Data from three publicly available databases, LIDC-IDRI26, Tianchi-Alibaba27, and CC-CCII18;</td>
<td valign="top" align="left">1,502 COVID-19 patients; <break/> 83 influenza-A/B patients; 1,334 CAP patients except for influenza; <break/> 258 healthy subjects (CT images)</td>
<td valign="top" align="left">Segmenting lung area with U-net</td>
<td valign="top" align="left">ResNet152</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">0.971</td>
<td valign="top" align="left">90.19%</td>
<td valign="top" align="left">95.76%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B66">66</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hosseinzadeh Kassani et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;Kaggle chest X-ray database;&#x0201D; <break/> &#x0201C;Kaggle RSNA Pneumonia Detection dataset&#x0201D;</td>
<td valign="top" align="left">117 COVID-19 patients; <break/> 117 normal patients (X-Ray images); <break/> 20 COVID-19 patients; <break/> 20 normal patients (CT images)</td>
<td valign="top" align="left">Normalization</td>
<td valign="top" align="left">DenseNet121 with Bagging tree classifier</td>
<td valign="top" align="left">99%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B67">67</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ozkaya et al. (2020)</td>
<td valign="top" align="left">From the Italian Society of Medical and Interventional Radiology</td>
<td valign="top" align="left">53 COVID-19 patients (CT images)</td>
<td valign="top" align="left">Feature vectors obtained from Pre-trained VGG-16, GoogleNet and ResNet-50 networks and fusion method; <break/> Feature ranking by <italic>t</italic>-test method</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">98.27%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">98.93%</td>
<td valign="top" align="left">97.60%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B68">68</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Shi et al. (2020)</td>
<td valign="top" align="left">From Tongji Hospital, Shanghai Public Health Clinical Center, and China-Japan Union Hospital (all in China)</td>
<td valign="top" align="left">183 COVID-19 patients; 5,521 Pneumonia patients (CT images)</td>
<td valign="top" align="left">Segmentation by a deep learning network (VB-Net)</td>
<td valign="top" align="left">Infection size-aware random forest</td>
<td valign="top" align="left">87.9%</td>
<td valign="top" align="left">0.942</td>
<td valign="top" align="left">90.7%</td>
<td valign="top" align="left">83.3%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B69">69</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Song et al. (2020)</td>
<td valign="top" align="left">From the Renmin Hospital of Wuhan University</td>
<td valign="top" align="left">88 COVID-19 patients (CT images)</td>
<td valign="top" align="left">We extracted the main regions of lungs and filled the blank of lung segmentation with the lung itself</td>
<td valign="top" align="left">Details Relation Extraction neural network</td>
<td valign="top" align="left">86%</td>
<td valign="top" align="left">0.96</td>
<td valign="top" align="left">96%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B3">3</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Wang et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left">44 COVID-19 patients; 55 Pneumonia patients (CT images)</td>
<td valign="top" align="left">Random selection of ROI; Feature extraction using Transfer Learning</td>
<td valign="top" align="left">Fully connected network and combination of Decision tree and Adaboost</td>
<td valign="top" align="left">82.9%</td>
<td valign="top" align="left">0.90</td>
<td valign="top" align="left">81%</td>
<td valign="top" align="left">84%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B6">6</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Zheng et al. (2020)</td>
<td valign="top" align="left">Private dataset</td>
<td valign="top" align="left">313 COVID-19 patients; 229 non-COVID-19 patients (CT images)</td>
<td valign="top" align="left">Data augmentation; Producing lung masks by a trained UNet</td>
<td valign="top" align="left">3D deep convolutional neural network</td>
<td valign="top" align="left">90.8%</td>
<td valign="top" align="left">0.959</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B70">70</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>Data Source: The source(s) that images were acquired from, Data Structure and Size: Number of images, image modalities, sample groups, Data Preprocessing: cleaning, Instance selection, normalization, transformation, feature extraction, selection, etc. The product of data preprocessing is the final training set, Best Model Structure(s): Best machine algorithm or deep learning model reported in the selected paper based on its performance, Performance Measurements (on the best model): The measurement of the model&#x00027;s output performance based on accuracy, sensitivity, specificity, and AUC score</italic>.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Characteristics of papers that used X-ray images.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>Author, year</bold></th>
<th valign="top" align="left"><bold>Data source</bold></th>
<th valign="top" align="left"><bold>Data structure and size</bold></th>
<th valign="top" align="left"><bold>Data preprocessing</bold></th>
<th valign="top" align="left"><bold>Best model structure(s)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>Performance measurements (on the best model)</bold></th>
<th valign="top" align="center"><bold>References</bold></th>
</tr>
<tr>
<th/>
<th/>
<th/>
<th/>
<th/>
<th valign="top" align="left"><bold>Accuracy</bold></th>
<th valign="top" align="left"><bold>AUC score</bold></th>
<th valign="top" align="left"><bold>Sensitivity</bold></th>
<th valign="top" align="left"><bold>Specificity</bold></th>
<th/>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Alazab et al. (2020)</td>
<td valign="top" align="left">Kaggle database</td>
<td valign="top" align="left">70 COVID-19 patients <break/> 28 normal patients (X-ray images)</td>
<td valign="top" align="left">Augmented to 1,000 images</td>
<td valign="top" align="left">VGG-16</td>
<td valign="top" align="left" colspan="4">F1 Score: 0.99</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B16">16</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Albahli et al. (2020)</td>
<td valign="top" align="left">&#x0201C;ChestX-ray8&#x0201D; combined with the few samples of rare classes from the Kaggle challenge</td>
<td valign="top" align="left">108,948 X-ray images of 32,717 unique patients. Including 15 kinds of chest disease</td>
<td valign="top" align="left">Data augmentation (rotation, height shift, zoom, horizontal flip)</td>
<td valign="top" align="left">ResNet</td>
<td valign="top" align="left">89%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B17">17</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Albahli et al. (2020)</td>
<td valign="top" align="left">Open source COVIDx dataset</td>
<td valign="top" align="left">850 COVID-19 patients; <break/> 500 non-COVID-19 pneumonia cases; <break/> 915 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">InceptionNetV3</td>
<td valign="top" align="left">99.02%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B18">18</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Altan et al. (2020)</td>
<td valign="top" align="left">Not mentioned</td>
<td valign="top" align="left">7,980 chest X-ray image (2,905 real raw 5,075 synthetic chests X-ray images)</td>
<td valign="top" align="left">Data augmentation; <break/> The feature matrix is formed by 2D Curvelet transformation Coefficients; <break/> Optimizing the coefficients in the feature matrix with the CSSA</td>
<td valign="top" align="left">Hybrid model</td>
<td valign="top" align="left">99.69%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">99.44%</td>
<td valign="top" align="left">99.81%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B20">20</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Apostolopoulos et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Common Bacterial and Viral Pneumonia X-ray Images by Kermany et al.; <break/> Public datasets (Radiological Society of North America, Radiopaedia, and the Italian Society of Medical and Interventional Radiology); <break/> &#x0201C;NIH Chest X-ray Dataset&#x0201D;</td>
<td valign="top" align="left">455 COVID-19 patients; <break/> 910 viral pneumonia; <break/> 2,540 other pulmonary diseases (X-ray images)</td>
<td valign="top" align="left">Data augmentation (randomly rotated by a maximum of 10&#x000B0; and randomly shifted horizontally or vertically by a maximum of 20 pixels toward any direction)</td>
<td valign="top" align="left">MobileNet v2</td>
<td valign="top" align="left">99.18%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.36%</td>
<td valign="top" align="left">99.42%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B21">21</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Apostolopoulos et al. (2020)</td>
<td valign="top" align="left">X-ray images on public medical Github repositories; <break/> &#x0201C;Radiological Society of North America;&#x0201D; <break/> &#x0201C;Radiopaedia, and Italian Society of Medicine and Interventional Radiology&#x0201D;</td>
<td valign="top" align="left">Dataset 1: <break/> 224 COVID-19 patients; <break/> 700 bacterial pneumonia patients; <break/> 504 normal patients (X-ray images) <break/> Dataset 2: <break/> 224 Covid-19 patients; <break/> 714 bacterial and viral pneumonia patients; <break/> 504 normal patients (X-ray images)</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">MobileNet v2</td>
<td valign="top" align="left">96.78%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">98.66%</td>
<td valign="top" align="left">96.46%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B71">71</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Brunese et al. (2020)</td>
<td valign="top" align="left">COVID-19 image data collection; <break/> COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;ChestX-ray8;&#x0201D; <break/> &#x0201C;NIH Chest X-ray Dataset&#x0201D;</td>
<td valign="top" align="left">250 COVID-19 patients; <break/> 2,753 other pulmonary diseases; <break/> 3,520 normal patients (X-Ray images)</td>
<td valign="top" align="left">Data augmentation (15 degrees rotation clockwise or counterclockwise)</td>
<td valign="top" align="left">VGG-16</td>
<td valign="top" align="left">96% (comparison between COVID-19 and other pulmonary diseases)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">87% <break/> 96%</td>
<td valign="top" align="left">94% <break/> 98%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B72">72</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Chowdhury et al. (2020)</td>
<td valign="top" align="left">Kaggle chest X-ray database; <break/> &#x0201C;Italian Society of Medical and Interventional Radiology COVID-19 database;&#x0201D; <break/> &#x0201C;Novel Corona Virus 2019 Dataset;&#x0201D; <break/> GitHub database; <break/> &#x0201C;COVID-19 Chest imaging at thread reader;&#x0201D; <break/> &#x0201C;RSNA-Pneumonia-Detection-Challenge&#x0201D;</td>
<td valign="top" align="left">423 COVID-19 patients; <break/> 1,485 viral pneumonia patients; <break/> 1,579 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">99.7%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">99.7%</td>
<td valign="top" align="left">99.55%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B73">73</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Civit-Masot et al. (2020)</td>
<td valign="top" align="left">COVID-19 and Pneumonia Scans Dataset</td>
<td valign="top" align="left">132 COVID-19 patients; <break/> 132 normal patients; <break/> 132 Pneumonia patients (X-ray images)</td>
<td valign="top" align="left">Histogram equalization</td>
<td valign="top" align="left">VGG16</td>
<td valign="top" align="left">85%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">85%</td>
<td valign="top" align="left">92%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B74">74</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Das et al. (2020)</td>
<td valign="top" align="left">COVID-19 collection; <break/> &#x0201C;Kaggle CXR collection;&#x0201D; <break/> &#x0201C;Tuberculosis collections;&#x0201D; <break/> &#x0201C;U.S. National Library of Medicine;&#x0201D; <break/> &#x0201C;National Institutes of Health;&#x0201D; Pneumonia collections</td>
<td valign="top" align="left">162 COVID-19 patients; <break/> 1,583 normal patients</td>
<td valign="top" align="left">Histogram matching</td>
<td valign="top" align="left">Truncated Inception Net</td>
<td valign="top" align="left">100% (Pneumonia collections)</td>
<td valign="top" align="left">1.0</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B23">23</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Elaziz et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;Chest X-Ray Images Pneumonia;&#x0201D; Italian Society of Medical and Interventional Radiology COVID-19 DATABASE;</td>
<td valign="top" align="left">219 COVID-19 patients; <break/> 1,341 negative COVID-19 patients (X-ray images)</td>
<td valign="top" align="left">Feature extraction by Fractional Multichannel Exponent Moments (FrMEMs); <break/> Feature selection by modified Manta-Ray <break/> Foraging Optimization based on differential evolution</td>
<td valign="top" align="left">KNN</td>
<td valign="top" align="left">98.09</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">98.91</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B75">75</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hassantabar et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-CT-Dataset&#x0201D;</td>
<td valign="top" align="left">315 COVID-19 patients; 367 non-COVID-19 patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">93.2%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96.1%</td>
<td valign="top" align="left">99.71%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B76">76</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Islam et al. (2020)</td>
<td valign="top" align="left">&#x0201C;GitHub;&#x0201D; <break/> &#x0201C;Radiopaedia;&#x0201D; <break/> &#x0201C;Cancer Imaging Archive;&#x0201D; <break/> &#x0201C;Italian Society of Radiology;&#x0201D; <break/> &#x0201C;Kaggle repository;&#x0201D; <break/> NIH dataset</td>
<td valign="top" align="left">1,525 COVID-19 patients; <break/> 1,525 pneumonia patients; <break/> 1,525 normal patients (X-ray images)</td>
<td valign="top" align="left">Normalization</td>
<td valign="top" align="left">CNN-LSTM</td>
<td valign="top" align="left">99.4%</td>
<td valign="top" align="left">0.999</td>
<td valign="top" align="left">99.3%</td>
<td valign="top" align="left">99.2%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B77">77</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Khan et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Covid-chestxray-dataset&#x0201D; <break/> &#x0201C;Chest X-Ray Images (Pneumonia)&#x0201D;</td>
<td valign="top" align="left">284 COVID-19 patients; <break/> 330 Pneumonia Bacterial <break/> 327 Pneumonia Viral; <break/> 310 normal patients (X-ray images)</td>
<td valign="top" align="left">Random under-sampling (to overcome the unbalanced data problem)</td>
<td valign="top" align="left">CoroNet (based on Xception)</td>
<td valign="top" align="left">89.6%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">89.92%</td>
<td valign="top" align="left">96.4%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B78">78</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Khuzani et al. (2020)</td>
<td valign="top" align="left">&#x0201C;GitHub&#x0201D;</td>
<td valign="top" align="left">140 COVID-19 patients; <break/> 140 non-COVID-19 pneumonia patients; <break/> 140 normal patients (X-ray images)</td>
<td valign="top" align="left">PCA method; <break/> Min-Max Normalization; <break/> Adaptive Histogram Equalization</td>
<td valign="top" align="left">ML</td>
<td valign="top" align="left">94%</td>
<td valign="top" align="left">0.91</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B79">79</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ko et al. (2020)</td>
<td valign="top" align="left">Private; <break/> Italian Society of Medical and Interventional Radiology COVID-19 DATABASE;</td>
<td valign="top" align="left">1,194 COVID-19 patients; <break/> 1,442 non-pneumonia patients; <break/> 1,357 Pneumonia patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation (rotation, zoom)</td>
<td valign="top" align="left">FCONet (ResNet-50)</td>
<td valign="top" align="left">99.58%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">99.58%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B80">80</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Loey et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP</td>
<td valign="top" align="left">69 COVID-19 patients; <break/> 79 pneumonia bacterial patients; <break/> 79</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">Googlenet</td>
<td valign="top" align="left">80.56% (Four classes)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">80.56%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B81">81</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Mahmud et al. (2020)</td>
<td valign="top" align="left">Private</td>
<td valign="top" align="left">1,583 normal patients; <break/> 1,493 non-COVID viral pneumonia; <break/> 2,780 bacterial pneumonia; 305 COVID-19 patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">CovXNet (CNN based architecture)</td>
<td valign="top" align="left">90.2% (multi-class)</td>
<td valign="top" align="left">0.911 (multi-class)</td>
<td valign="top" align="left">89.9% (multi-class)</td>
<td valign="top" align="left">89.1% (multi-class)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B82">82</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Mart&#x000ED;nez et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP</td>
<td valign="top" align="left">120 COVID-19 patients; <break/> 120 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation; <break/> Normalization</td>
<td valign="top" align="left">NASNet-type convolutional</td>
<td valign="top" align="left">97%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97%</td>
<td valign="top" align="left">97%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B83">83</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Minaee et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;ChexPert dataset&#x0201D;</td>
<td valign="top" align="left">40 COVID-19 patients; <break/> 3,000 normal patients (X-ray images)</td>
<td valign="top" align="left">Regularization</td>
<td valign="top" align="left">SqueezeNet</td>
<td valign="top" align="left">97%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.5%</td>
<td valign="top" align="left">97.8%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B84">84</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Narayan Das et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;ChestX-ray8&#x0201D;</td>
<td valign="top" align="left">125 COVID-19 patients; <break/> 500 pneumonia patients; <break/> 500 normal patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Xception</td>
<td valign="top" align="left">97.4%</td>
<td valign="top" align="left">0.986</td>
<td valign="top" align="left">97.09%</td>
<td valign="top" align="left">97.29%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B85">85</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Nour et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Public COVID-19 radiology database;&#x0201D; <break/> &#x0201C;Italian Society of Medical and Interventional Radiology;&#x0201D; <break/> &#x0201C;COVID-19 Database;&#x0201D; <break/> &#x0201C;Novel Corona Virus 2019 Dataset;&#x0201D; <break/> &#x0201C;COVID-19 positive chest X-ray images from different articles;&#x0201D;</td>
<td valign="top" align="left">219 COVID-19 patients; <break/> 1,345 Viral Pneumonia patients; <break/> 1,341 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">97.14%</td>
<td valign="top" align="left">0.995</td>
<td valign="top" align="left">94.61%</td>
<td valign="top" align="left">98.29%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B86">86</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Novitasari et al. (2020)</td>
<td valign="top" align="left">GitHub and Kaggle</td>
<td valign="top" align="left">102 COVID-19 patients; <break/> 204 Pneumonia and Normal patients (X-ray images)</td>
<td valign="top" align="left">Feature extraction by Googlenet, Resnet18, Resnet50, Resnet101; <break/> Feature selection by PCA, Relief;</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">97.33% (multi class)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96%</td>
<td valign="top" align="left">98%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B87">87</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Oh et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Japanese Society of Radiological Technology;&#x0201D; <break/> &#x0201C;SCR database;&#x0201D; <break/> &#x0201C;U.S. National Library of Medicine&#x0201D;</td>
<td valign="top" align="left">180 COVID-19 patients; <break/> 20 Viral Pneumonia patients; <break/> 54 pneumonia bacterial patients; <break/> 57 Tuberculosis patients; <break/> 191 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data normalization; <break/> Data type casting; <break/> Histogram equalization; <break/> Gamma correction</td>
<td valign="top" align="left">(FC)-DenseNet103</td>
<td valign="top" align="left">88.9%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">85.9%</td>
<td valign="top" align="left">96.4%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B88">88</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ozturk et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;ChestX-ray8;&#x0201D;</td>
<td valign="top" align="left">(X-ray images)</td>
<td/>
<td valign="top" align="left">DarkCovidNet inspired by the DarkNet architecture</td>
<td valign="top" align="left">87.02%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">85.35%</td>
<td valign="top" align="left">92.18%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B89">89</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Pandit et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">224 COVID-19 patients; <break/> 700 pneumonia bacterial patients; <break/> 504 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">VGG-16</td>
<td valign="top" align="left">92.53% (Three class output)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">86.7%</td>
<td valign="top" align="left">95.1%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B90">90</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Panwar et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Radiopedia.org website; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">142 COVID-19 patients; <break/> 142 other (&#x0201C;Normal&#x0201D; &#x0201C;Bacterial Pneumonia&#x0201D; and &#x0201C;Viral Pneumonia&#x0201D;) (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">nCOVnet</td>
<td valign="top" align="left">88.10%</td>
<td valign="top" align="left">0.880</td>
<td valign="top" align="left">97.62%</td>
<td valign="top" align="left">78.57%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B40">40</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Pereira et al. (2020)</td>
<td valign="top" align="left">&#x0201C;RYDLS-20;&#x0201D; <break/> Radiopedia Encyclopedia <break/> &#x0201C;Chest X-ray14&#x0201D;</td>
<td valign="top" align="left">90 COVID-19 patients; <break/> 1,000 Normal patients; <break/> 10 MERS patients; <break/> 11 SARS patients; <break/> 10 Varicella patients; <break/> 12 Streptococcus patients; <break/> 11 Pneumocystis patients (X-ray images)</td>
<td valign="top" align="left">Resampling algorithms; <break/> Fusion techniques;</td>
<td valign="top" align="left">Pre-trained CNN</td>
<td valign="top" align="left" colspan="4">F1 score = 89%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B91">91</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Rahaman et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; &#x0201C;Chest X-Ray Images (pneumonia)&#x0201D;</td>
<td valign="top" align="left">260 COVID-19 patients; <break/> 300 Pneumonia; <break/> 300 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation (rotate, shift, shear, zoom, horizontal and vertical flip)</td>
<td valign="top" align="left">VGG19</td>
<td valign="top" align="left">89.3%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">89%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B92">92</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Rahimzadeh et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Covid chestxray dataset;&#x0201D; <break/> &#x0201C;RSNA pneumonia detection challenge&#x0201D;</td>
<td valign="top" align="left">180 COVID-19 patients; <break/> 6,054 Pneumocystis patients; <break/> 8,851 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">Xception <break/> ResNet50V2 concatenated</td>
<td valign="top" align="left">91.4%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">80.53%</td>
<td valign="top" align="left">99.56%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B93">93</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Rajaraman et al. (2020)</td>
<td valign="top" align="left">Pediatric CXR dataset; <break/> RSNA CXR dataset; <break/> CheXpert CXR dataset; <break/> NIH CXR-14 dataset; <break/> Twitter COVID-19 CXR dataset; <break/> Montreal COVID-19 CXR dataset;</td>
<td valign="top" align="left">4,683 Bacterial Pneumonia; <break/> 3,883 Viral Pneumonia (X-Ray images)</td>
<td valign="top" align="left">Segmenting lung area with dilated dropout U-Net; <break/> Image thresholding to remove very bright pixels; <break/> In-painting missing pixels using the surrounding pixel values; Using median-filter to remove noise and preserve edges;</td>
<td valign="top" align="left">VGG-16</td>
<td valign="top" align="left">94.05%</td>
<td valign="top" align="left">0.96</td>
<td valign="top" align="left">98.77%</td>
<td valign="top" align="left">86.24%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B45">45</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Rajaraman et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Pediatric CXR dataset;&#x0201D; <break/> &#x0201C;RSNA CXR dataset;&#x0201D; <break/> &#x0201C;Twitter COVID-19 CXR dataset;&#x0201D; <break/> &#x0201C;Montreal COVID-19 CXR dataset&#x0201D;</td>
<td valign="top" align="left">313 COVID-19 patients; <break/> 7,595 pneumonia of unknown type patients; <break/> 2,780 bacterial pneumonia; <break/> 7,595 Normal patients (X-ray images)</td>
<td valign="top" align="left">Median Filtering; <break/> Normalization; <break/> Standardization</td>
<td valign="top" align="left">Inception-V3</td>
<td valign="top" align="left">99.01%</td>
<td valign="top" align="left">0.997</td>
<td valign="top" align="left">98.4%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B45">45</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Sethy et al. (2020)</td>
<td valign="top" align="left">X-ray images on public medical Github repositories; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">127 COVID-19 patients; <break/> 127 Pneumonia patients; <break/> 127 Normal patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">ResNet50 plus SVM</td>
<td valign="top" align="left">98.66%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">95.33%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B94">94</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Shibly et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;RSNA pneumonia detection challenge dataset;&#x0201D; <break/> Kaggle chest X-ray database; <break/> &#x0201C;COVIDx&#x0201D;</td>
<td valign="top" align="left">183 COVID-19 patients; <break/> 5,551 Pneumonia patients; <break/> 8,066 Normal patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Faster R-CNN</td>
<td valign="top" align="left">97.36%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.65%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B95">95</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Toga&#x000E7;ar et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Kaggle COVID-19 dataset created by a team of researchers from Qatar University, medical doctors from Bangladesh, and collaborators from Pakistan and Malaysia.</td>
<td valign="top" align="left">295 COVID-19 patients; <break/> 98 Pneumonia; <break/> 65 normal patients (X-ray images)</td>
<td valign="top" align="left">Restructuring images using the Fuzzy Color technique and stacking them with the original images; <break/> Feature extracting using deep learning models (MobileNetV2, SqueezeNet) using the Social Mimic optimization method;</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B96">96</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Toraman et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP</td>
<td valign="top" align="left">231 COVID-19 patients; <break/> 1,050 Pneumonia patients; <break/> 1,050 Normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation;</td>
<td valign="top" align="left">Convolutional capsnet</td>
<td valign="top" align="left">97.24% (Binary class)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.42%</td>
<td valign="top" align="left">97.04%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B97">97</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Tsiknakis et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> Dataset originated from the QUIBIM imagingcovid19 platform database and various public repositories, including RSNA, IEEE, RadioGyan and the British Society of Thoracic Imaging; Publicly available <break/> X-ray dataset of patients with pneumonia;</td>
<td valign="top" align="left">137 COVID-19 patients; <break/> 150 Virus Pneumonia; <break/> 150 Bacteria Pneumonia; <break/> 150 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation (rotation, shear, zoom)</td>
<td valign="top" align="left">Inception V3</td>
<td valign="top" align="left">76% (multi-class)</td>
<td valign="top" align="left">0.93 (multi-class)</td>
<td valign="top" align="left">93% (multi-class)</td>
<td valign="top" align="left">91.8% (multi-class)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B98">98</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Tuncer et al. (2020)</td>
<td valign="top" align="left">GitHub website; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">87 COVID-19 patients; <break/> 234 Normal patients (X-ray images)</td>
<td valign="top" align="left">Converting X-ray image to grayscale; <break/> ResExLBP and IRF based method</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">98.29%</td>
<td valign="top" align="left">100%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B99">99</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ucar et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID chest X-ray dataset;&#x0201D; &#x0201C;Kaggle chest X-ray pneumonia dataset;&#x0201D;</td>
<td valign="top" align="left">403 COVID-19 patients; <break/> 721 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation (noise, shear, brightness increase, brightness decrease)</td>
<td valign="top" align="left">Bayes-SqueezeNet</td>
<td valign="top" align="left">98.26% (multi-class)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">99.13% (multi-class)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B100">100</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Vaid et al. (2020)</td>
<td valign="top" align="left">Set of lately published articles; <break/> NIH dataset</td>
<td valign="top" align="left">181 COVID-19 patients; <break/> 364 Normal patients (X-ray images)</td>
<td valign="top" align="left">Normalization</td>
<td valign="top" align="left">VGG-19</td>
<td valign="top" align="left">96.3%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.1%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B101">101</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Waheed et al. (2020)</td>
<td valign="top" align="left">&#x0201C;IEEE Covid Chest X-ray dataset;&#x0201D; <break/> &#x0201C;COVID-19 Radiography Database&#x0201D; <break/> &#x0201C;COVID-19 Chest X-ray Dataset;&#x0201D;</td>
<td valign="top" align="left">403 COVID-19 patients; <break/> 721 normal patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation using CovidGAN</td>
<td valign="top" align="left">VGG16</td>
<td valign="top" align="left">95%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">90%</td>
<td valign="top" align="left">97%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B102">102</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Yildirim et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-19 Chest X-Ray dataset;&#x0201D; <break/> Kaggle chest X-ray database</td>
<td valign="top" align="left">136 COVID-19 patients; <break/> 162 Pneumonia patients; <break/> 245 Normal patients (X-ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">Hybrid model</td>
<td valign="top" align="left">96.30%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96.30%</td>
<td valign="top" align="left">98.73%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B103">103</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Yoo et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVID-Chest XrayDataset;&#x0201D; <break/> Eastern Asian Hospital; <break/> Shenzen data;</td>
<td valign="top" align="left">162 COVID-19 Patients; <break/> 162 TB patients; <break/> 162 Non-TB patients (X-ray images)</td>
<td valign="top" align="left">Data augmentation (rotated, translated, and horizontally flipped)</td>
<td valign="top" align="left">ResNet18</td>
<td valign="top" align="left">95% Average of (COVID-19/TB) and (COVID-19/non-TB)</td>
<td valign="top" align="left">0.95 Average of (COVID-19/TB) and (COVID-19/non-TB)</td>
<td valign="top" align="left">97% Average of (COVID-19/TB) and (COVID-19/non-TB)</td>
<td valign="top" align="left">93% Average of (COVID-19/TB) and (COVID-19/non-TB)</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B104">104</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ghoshal et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> &#x0201C;Kaggle chest X-ray database&#x0201D;</td>
<td valign="top" align="left">68 COVID-19 patients; <break/> 2,786 Bacterial <break/> Pneumonia patients; <break/> 1,504 Viral Pneumonia patients; <break/> 1,583 normal patients (X-Ray images)</td>
<td valign="top" align="left">Standardization; <break/> Data augmentation</td>
<td valign="top" align="left">Bayesian ResNet50V2 model</td>
<td valign="top" align="left">89.82%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B105">105</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hall et al. (2020)</td>
<td valign="top" align="left">&#x0201C;X-ray images on public medical Github repositories;&#x0201D; <break/> &#x0201C;Radiopaedia;&#x0201D; <break/> &#x0201C;Italian Society of Medical and Interventional Radiology (SIRM)&#x0201D;</td>
<td valign="top" align="left">135 COVID-19 patients; <break/> 320 Viral and Bacterial Pneumonia patients (X-Ray images)</td>
<td valign="top" align="left">Data augmentation</td>
<td valign="top" align="left">Resnet50 and VGG16 plus <break/> CNN</td>
<td valign="top" align="left">91.24%</td>
<td valign="top" align="left">0.94</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B106">106</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Hammoudi et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Chest XRay Images (Pneumonia) dataset;&#x0201D; <break/> COVID-19 X-ray image database developed by Cohen JP;</td>
<td valign="top" align="left">148 Bacterial pneumonia; <break/> 148 Viral pneumonia; <break/> 148 Normal patients (X-Ray Images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">DenseNet169</td>
<td valign="top" align="left">95.72%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B107">107</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">El-Din Hemdan et al. (2020)</td>
<td valign="top" align="left">COVID-19 X-ray image database developed by Cohen JP; <break/> COVID-19 X-ray image database by Dr. Adrian Rosebrock</td>
<td valign="top" align="left">25 COVID-19 patients; <break/> 25 normal patients (X-Ray images)</td>
<td valign="top" align="left">Scaling to 224&#x0002A;224 pixels; <break/> One-hot encoding</td>
<td valign="top" align="left">COVIDX-Net (VGG19 and DenseNet201 models)</td>
<td valign="top" align="left">VGG19 = 90%; <break/> DenseNet201 = 90%</td>
<td valign="top" align="left">VGG19 = 0.90; <break/> DenseNet201 = 0.90</td>
<td valign="top" align="left">VGG19 = 100%; <break/> DenseNet201 = 100%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B108">108</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Jain et al. (2020)</td>
<td valign="top" align="left">&#x0201C;Chest XRay Images (Pneumonia) dataset;&#x0201D; <break/> COVID-19 X-ray image database developed by Cohen JP;</td>
<td valign="top" align="left">250 COVID-19 patients; <break/> 300 Bacterial pneumonia; <break/> 350 Viral pneumonia; <break/> 315 Normal patients (X-Ray Images)</td>
<td valign="top" align="left">Normalize images according to the images in the ImageNet database; <break/> Data augmentation (rotation and Gaussian blur);</td>
<td valign="top" align="left">ResNet50</td>
<td valign="top" align="left">97.77%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">97.14%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B109">109</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Luz et al. (2020)</td>
<td valign="top" align="left">&#x0201C;COVIDx dataset;&#x0201D; <break/> &#x0201C;RSNA Pneumonia Detection Challenge dataset;&#x0201D; <break/> &#x0201C;COVID-19 image data collection&#x0201D;</td>
<td valign="top" align="left">183 COVID-19 patients; <break/> 5,521 Pneumonia patients; <break/> 8,066 normal patients (X-Ray images)</td>
<td valign="top" align="left">Intensity normalization; <break/> Data augmentation</td>
<td valign="top" align="left">EfficientNet B3</td>
<td valign="top" align="left">93.9%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">96.8%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B110">110</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ozkaya et al. (2020)</td>
<td valign="top" align="left">From the Italian Society of Medical and Interventional Radiology</td>
<td valign="top" align="left">53 COVID-19 patients (CT images)</td>
<td valign="top" align="left"><break/> Feature vectors obtained from Pre-trained VGG-16, GoogleNet and ResNet-50 networks and fusion method; <break/> Feature ranking by <italic>t</italic>-test method</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">98.27%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">98.93%</td>
<td valign="top" align="left">97.60%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B68">68</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Ozturk et al. (2020)</td>
<td valign="top" align="left">&#x0201C;covid-chestxray-dataset available at: <ext-link ext-link-type="uri" xlink:href="https://github.com/ieee8023/covid-chestxray-dataset">https://github.com/ieee8023/covid-chestxray-dataset</ext-link>&#x0201D;</td>
<td valign="top" align="left">4 ARds images, 101 COVID images, 2 No finding images, 2 pneumocystis-pneumonia images, 11 Sars images, and 6 streptococcus (X-Ray images)</td>
<td valign="top" align="left">Data augmentation; SMOTE oversampling; creating feature vectors with sAE and PCA; feature extraction by feature vectors, Gray Level Co-occurrence Matrix, Local Binary Gray Level Co-occurrence Matrix, Gray Level Run Length Matrix, and Segmentation-based Fractal Texture Analysis</td>
<td valign="top" align="left">SVM</td>
<td valign="top" align="left">94.23%</td>
<td valign="top" align="left">0.99</td>
<td valign="top" align="left">91.88%</td>
<td valign="top" align="left">98.54%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B111">111</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Wang et al. (2020)</td>
<td valign="top" align="left">COVIDx dataset</td>
<td valign="top" align="left">266 COVID-19 patients; 5,536 Pneumonia patients; 8,066 normal patients (X-Ray images)</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="left">COVID-Net Network Architecture using a &#x0201C;lightweight residual projection-expansion- projection-extension design pattern&#x0201D; (Customized CNN)</td>
<td valign="top" align="left">93.3%</td>
<td/>
<td valign="top" align="left">91.0%</td>
<td valign="top" align="left">&#x02013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B1">1</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Zhang et al. (2020)</td>
<td valign="top" align="left">X-COVID, OpenCOVID</td>
<td valign="top" align="left">599 COVID-19 patients; 2,107 non-COVID-19 patients (non-viral pneumonia and healthy) (X-Ray images)</td>
<td valign="top" align="left">Data augmentation; Feature extraction using EfficientNet</td>
<td valign="top" align="left">Confidence-aware anomaly detection</td>
<td valign="top" align="left">78.57%</td>
<td valign="top" align="left">0.844</td>
<td valign="top" align="left">77.13%</td>
<td valign="top" align="left">78.97%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B112">112</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>Data Source: The source(s) that images were acquired from, Data Structure and Size: Number of images, image modalities, sample groups, Data Preprocessing: cleaning, Instance selection, normalization, transformation, feature extraction, selection, etc. The product of data preprocessing is the final training set, Best Model Structure(s): Best machine algorithm or deep learning model reported in the selected paper based on its performance, Performance Measurements (on the best model): The measurement of the model&#x00027;s output performance based on accuracy, sensitivity, specificity, and AUC score</italic>.</p>
</table-wrap-foot>
</table-wrap></sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>Machine and deep learning methods have been proven as valuable strategies to assess massive high-dimensional characteristics of medical images. CT or X-Ray findings of COVID-19 patients have similarities with other atypical and viral pneumonia diseases. Therefore, machine and deep learning methods might facilitate automatic discrimination of COVID-19 from other pneumonia conditions. The differential diagnosis of COVID also includes drug-induced diseases or immune pneumonitis. However, most of the studies reviewed here lack these kinds of samples. This point is the limitation of these studies. Different methods, such as Ensemble, VGG-16, ResNet, InceptionNetV3, MobileNet v2, Xception, CNN, VGG16, Truncated Inception Net, and KNN, have been used for the purpose of assessment of chest images of COVID-19 patients. Notably, the application of these methods on X-rays has offered promising results. Such a finding is particularly important since X-rays are easily accessible and low cost. These methods not only can diagnose COVID-19 patients from non-COVID pneumonia cases, but can also predict the severity of COVID-19 pneumonia and the risk of short-term mortality. In spite of the low expense of X-ray compared with CT images, the numbers of studies that assessed these two types of imaging using machine/deep learning methods are not meaningfully different. However, few studies have used these methods on both types of imaging (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B40">40</xref>). CNN-based methods have achieved accuracy values above 99% in classifying COVID-19 patients from other cases of pneumonia or related disorders, as reported by several independent studies, suggesting these strategies as screening methods for initial evaluation of COVID-19 cases.</p>
<p>Although both deep learning and machine learning strategies can be used for the mentioned purpose, they differ in some respects. For instance, deep learning methods usually need a large amount of labeled training data to make a concise conclusion. However, machine learning can apply a small amount of data delivered by users. Moreover, deep learning methods need high-performance hardware. Machine learning, on the other hand, needs features to be precisely branded by users, deep learning generates novel features by itself, thus requires more time to train. Machine learning classifies tasks into small fragments and subsequently combines obtained results into one conclusion, whereas deep learning resolves the problems using end-to-end principles.</p>
<p>Several studies have diagnosed COVID-19 patients through the application of machine learning methods rather than using deep learning methods by retrieving the features from the images. These studies have yielded high recognition outcomes and have the advantage of high learning speed (<xref ref-type="bibr" rid="B12">12</xref>). Pre-processing is an essential step for reducing the impacts of intensity variations in CT slices and getting rid of noise. Subsequent thresholding and morphological operations have also enhanced the analytical performance. Data augmentation and histogram equalization are among the most applied preprocessing methods.</p>
<p>One of the most promising approaches used in the included studies was transfer learning. Transfer learning is defined as using model knowledge on a huge dataset (which is referred to as the &#x0201C;pre-trained model&#x0201D;) and transferring it to use on a new problem. This is very useful in settings like medical imaging, where there is a limited number of labeled data (<xref ref-type="bibr" rid="B113">113</xref>). Previous studies showed favorable outcomes of the transfer learning approaches in medical imaging tasks (<xref ref-type="bibr" rid="B114">114</xref>, <xref ref-type="bibr" rid="B115">115</xref>). Among the included studies, Bridge et al. (<xref ref-type="bibr" rid="B25">25</xref>) even reached 100% classification accuracy on COVID-19 using the pre-trained InceptionV3.</p>
<p>The availability of public databases of CT and X-ray images of patients with COVID-19 has facilitated the application of machine learning methods on large quantities of clinical images and execution of training and verification steps. However, since these images have come from various institutes using different scanners, preprocessing of the obtained data is necessary to make them uniform and facilitate further analysis (<xref ref-type="bibr" rid="B12">12</xref>). Appraisal of demographic and clinical data of COVID-19 patients and their association with CT/ X-ray images features as well as the accuracy of machine learning prediction methods would provide more valuable information in the stratification of COVID-19 patients. Moreover, one of the major challenges of deep learning models in medical applications is its unexplainable features due to its black-box nature, which should be solved (<xref ref-type="bibr" rid="B116">116</xref>). Future studies can focus on approaches that provide interpretation besides black-box predictions.</p></sec>
<sec sec-type="conclusions" id="s5">
<title>Conclusion</title>
<p>Deep and machine learning methods have high accuracy in the differentiation of COVID-19 from non-COVID-19 pneumonia based on chest images. These techniques have facilitated the automatic evaluation of these images. However, deep learning methods suffer from the absence of transparency and interpretability, as it is not possible to identify the exact imaging feature that has been applied to define the output (<xref ref-type="bibr" rid="B13">13</xref>). As no single strategy has the capacity to distinguish all pulmonary disorders based merely on the imaging presentation on chest CT scans, the application of multidisciplinary approaches is suggested for overcoming diagnostic problems (<xref ref-type="bibr" rid="B13">13</xref>).</p></sec>
<sec sec-type="data-availability-statement" id="s6">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p></sec>
<sec id="s7">
<title>Author Contributions</title>
<p>HM-R, MN, and AG-L collected the data and designed the tables. MT and SG-F designed the study, wrote the draft, and revised it. All the authors read the draft and approved the submitted version.</p></sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
</body>
<back>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Wong</surname> <given-names>A</given-names></name></person-group>. <article-title>COVID-Net: a tailored deep convolutional neural network design for detection of covid-19 cases from chest X-ray images</article-title>. <source>arXiv.</source> (<year>2020</year>) Preprint arXiv:200309871. <pub-id pub-id-type="doi">10.1038/s41598-020-76550-z</pub-id><pub-id pub-id-type="pmid">33177550</pub-id></citation></ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghafouri-Fard</surname> <given-names>S</given-names></name> <name><surname>Noroozi</surname> <given-names>R</given-names></name> <name><surname>Vafaee</surname> <given-names>R</given-names></name> <name><surname>Branicki</surname> <given-names>W</given-names></name> <name><surname>Po&#x01E61;piech</surname> <given-names>E</given-names></name> <name><surname>Pyrc</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Effects of host genetic variations on response to, susceptibility and severity of respiratory infections</article-title>. <source>Biomed Pharmacother.</source> (<year>2020</year>) <volume>128</volume>:<fpage>110296</fpage>. <pub-id pub-id-type="doi">10.1016/j.biopha.2020.110296</pub-id><pub-id pub-id-type="pmid">32480226</pub-id></citation></ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>Y</given-names></name> <name><surname>Zheng</surname> <given-names>S</given-names></name> <name><surname>Li</surname> <given-names>L</given-names></name> <name><surname>Zhang</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>X</given-names></name> <name><surname>Huang</surname> <given-names>Z</given-names></name> <etal/></person-group>. <article-title>Deep learning enables accurate diagnosis of novel coronavirus (COVID-19) with CT images</article-title>. <source>medRxiv</source>. (<year>2020</year>).<pub-id pub-id-type="pmid">33705321</pub-id></citation></ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Samsami</surname> <given-names>M</given-names></name> <name><surname>Mehravaran</surname> <given-names>E</given-names></name> <name><surname>Tabarsi</surname> <given-names>P</given-names></name> <name><surname>Javadi</surname> <given-names>A</given-names></name> <name><surname>Arsang-Jang</surname> <given-names>S</given-names></name> <name><surname>Komaki</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Clinical and demographic characteristics of patients with COVID-19 infection: statistics from a single hospital in Iran</article-title>. <source>Human Antibodies</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.3233/HAB-200428</pub-id><pub-id pub-id-type="pmid">32986663</pub-id></citation></ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghafouri-Fard</surname> <given-names>S</given-names></name> <name><surname>Noroozi</surname> <given-names>R</given-names></name> <name><surname>Omrani</surname> <given-names>MD</given-names></name> <name><surname>Branicki</surname> <given-names>W</given-names></name> <name><surname>Po&#x0015B;piech</surname> <given-names>E</given-names></name> <name><surname>Sayad</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Angiotensin converting enzyme: a review on expression profile and its association with human disorders with special focus on SARS-CoV-2 infection</article-title>. <source>Vascular Pharmacol.</source> (<year>2020</year>) <volume>130</volume>:<fpage>106680</fpage>. <pub-id pub-id-type="doi">10.1016/j.vph.2020.106680</pub-id><pub-id pub-id-type="pmid">32423553</pub-id></citation></ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S</given-names></name> <name><surname>Kang</surname> <given-names>B</given-names></name> <name><surname>Ma</surname> <given-names>J</given-names></name> <name><surname>Zeng</surname> <given-names>X</given-names></name> <name><surname>Xiao</surname> <given-names>M</given-names></name> <name><surname>Guo</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>A deep learning algorithm using CT images to screen for Corona Virus Disease (COVID-19)</article-title>. <source>medRxiv</source>. (<year>2020</year>) <volume>14</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1101/2020.02.14.20023028</pub-id><pub-id pub-id-type="pmid">33629156</pub-id></citation></ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fang</surname> <given-names>Y</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name> <name><surname>Xie</surname> <given-names>J</given-names></name> <name><surname>Lin</surname> <given-names>M</given-names></name> <name><surname>Ying</surname> <given-names>L</given-names></name> <name><surname>Pang</surname> <given-names>P</given-names></name> <etal/></person-group>. <article-title>Sensitivity of chest CT for COVID-19: comparison to RT-PCR</article-title>. <source>Radiology</source>. (<year>2020</year>) <volume>296</volume>:<fpage>1</fpage>&#x02013;<lpage>2</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020200432</pub-id><pub-id pub-id-type="pmid">32073353</pub-id></citation></ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J</given-names></name> <name><surname>Tian</surname> <given-names>S</given-names></name> <name><surname>Lou</surname> <given-names>J</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name></person-group>. <article-title>Familial cluster of COVID-19 infection from an asymptomatic</article-title>. <source>Crit Care.</source> (<year>2020</year>) <volume>24</volume>:<fpage>1</fpage>&#x02013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.1186/s13054-020-2817-7</pub-id><pub-id pub-id-type="pmid">32247826</pub-id></citation></ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lei</surname> <given-names>Y</given-names></name> <name><surname>Zhang</surname> <given-names>H-W</given-names></name> <name><surname>Yu</surname> <given-names>J</given-names></name> <name><surname>Patlas</surname> <given-names>MN</given-names></name></person-group>. <source>COVID-19 Infection: Early Lessons</source>. <publisher-loc>Los Angeles, CA</publisher-loc>: <publisher-name>Sage</publisher-name> (<year>2020</year>).</citation></ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rousan</surname> <given-names>LA</given-names></name> <name><surname>Elobeid</surname> <given-names>E</given-names></name> <name><surname>Karrar</surname> <given-names>M</given-names></name> <name><surname>Khader</surname> <given-names>Y</given-names></name></person-group>. <article-title>Chest x-ray findings and temporal lung changes in patients with COVID-19 pneumonia</article-title>. <source>BMC Pulmonary Med.</source> (<year>2020</year>) <volume>20</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1186/s12890-020-01286-5</pub-id><pub-id pub-id-type="pmid">32933519</pub-id></citation></ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bai</surname> <given-names>HX</given-names></name> <name><surname>Hsieh</surname> <given-names>B</given-names></name> <name><surname>Xiong</surname> <given-names>Z</given-names></name> <name><surname>Halsey</surname> <given-names>K</given-names></name> <name><surname>Choi</surname> <given-names>JW</given-names></name> <name><surname>Tran</surname> <given-names>TML</given-names></name> <etal/></person-group>. <article-title>Performance of radiologists in differentiating COVID-19 from viral pneumonia on chest CT</article-title>. <source>Radiology</source>. (<year>2020</year>) <volume>296</volume>:<fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020200823</pub-id><pub-id pub-id-type="pmid">32155105</pub-id></citation></ref>
<ref id="B12">
<label>12.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozsahin</surname> <given-names>I</given-names></name> <name><surname>Sekeroglu</surname> <given-names>B</given-names></name> <name><surname>Musa</surname> <given-names>MS</given-names></name> <name><surname>Mustapha</surname> <given-names>MT</given-names></name> <name><surname>Uzun Ozsahin</surname> <given-names>D</given-names></name></person-group>. <article-title>Review on diagnosis of COVID-19 from chest CT images using artificial intelligence</article-title>. <source>Comput Math Methods Med</source>. (<year>2020</year>) <volume>2020</volume>:<fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1155/2020/9756518</pub-id><pub-id pub-id-type="pmid">33014121</pub-id></citation></ref>
<ref id="B13">
<label>13.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L</given-names></name> <name><surname>Qin</surname> <given-names>L</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <name><surname>Yin</surname> <given-names>Y</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Kong</surname> <given-names>B</given-names></name> <etal/></person-group>. <article-title>Using artificial intelligence to detect COVID-19 and community-acquired pneumonia based on pulmonary CT: evaluation of the diagnostic accuracy</article-title>. <source>Radiology.</source> (<year>2020</year>) <volume>296</volume>:<fpage>E65</fpage>&#x02013;<lpage>71</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020200905</pub-id><pub-id pub-id-type="pmid">32191588</pub-id></citation></ref>
<ref id="B14">
<label>14.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>rekha Hanumanthu</surname> <given-names>S</given-names></name></person-group>. <article-title>Role of intelligent computing in COVID-19 prognosis: a state-of-the-art review</article-title>. <source>Chaos Solitons Fractals</source>. (<year>2020</year>) <volume>138</volume>:<fpage>109947</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.109947</pub-id><pub-id pub-id-type="pmid">32836916</pub-id></citation></ref>
<ref id="B15">
<label>15.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbasian Ardakani</surname> <given-names>A</given-names></name> <name><surname>Acharya</surname> <given-names>UR</given-names></name> <name><surname>Habibollahi</surname> <given-names>S</given-names></name> <name><surname>Mohammadi</surname> <given-names>A</given-names></name></person-group>. <article-title>COVIDiag: a clinical CAD system to diagnose COVID-19 pneumonia based on CT findings</article-title>. <source>Eur Radiol</source>. (<year>2020</year>) <volume>31</volume>:<fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1007/s00330-020-07087-y</pub-id><pub-id pub-id-type="pmid">32740817</pub-id></citation></ref>
<ref id="B16">
<label>16.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alazab</surname> <given-names>M</given-names></name> <name><surname>Awajan</surname> <given-names>A</given-names></name> <name><surname>Mesleh</surname> <given-names>A</given-names></name> <name><surname>Abraham</surname> <given-names>A</given-names></name> <name><surname>Jatana</surname> <given-names>V</given-names></name> <name><surname>Alhyari</surname> <given-names>S</given-names></name></person-group>. <article-title>COVID-19 prediction and detection using deep learning</article-title>. <source>Int J Comput Information Syst Indus Manage Appl</source>. (<year>2020</year>) <volume>12</volume>:<fpage>168</fpage>&#x02013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110338</pub-id></citation></ref>
<ref id="B17">
<label>17.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Albahli</surname> <given-names>S</given-names></name></person-group>. <article-title>Efficient GAN-based Chest Radiographs (CXR) augmentation to diagnose coronavirus disease pneumonia</article-title>. <source>Int J Med Sci</source>. (<year>2020</year>) <volume>17</volume>:<fpage>1439</fpage>&#x02013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.7150/ijms.46684</pub-id><pub-id pub-id-type="pmid">32624700</pub-id></citation></ref>
<ref id="B18">
<label>18.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Albahli</surname> <given-names>S</given-names></name> <name><surname>Albattah</surname> <given-names>W</given-names></name></person-group>. <article-title>Detection of coronavirus disease from X-ray images using deep learning and transfer learning algorithms</article-title>. <source>J Xray Sci Technol</source>. (<year>2020</year>) <volume>28</volume>:<fpage>841</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.3233/XST-200720</pub-id><pub-id pub-id-type="pmid">32804113</pub-id></citation></ref>
<ref id="B19">
<label>19.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alsharman</surname> <given-names>N</given-names></name> <name><surname>Jawarneh</surname> <given-names>I</given-names></name></person-group>. <article-title>GoogleNet CNN neural network towards chest CT-coronavirus medical image classification</article-title>. <source>J Comput Sci</source>. (<year>2020</year>) <volume>16</volume>:<fpage>620</fpage>&#x02013;<lpage>5</lpage> <pub-id pub-id-type="doi">10.3844/jcssp.2020.620.625</pub-id></citation></ref>
<ref id="B20">
<label>20.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Altan</surname> <given-names>A</given-names></name> <name><surname>Karasu</surname> <given-names>S</given-names></name></person-group>. <article-title>Recognition of COVID-19 disease from X-ray images by hybrid model consisting of 2D curvelet transform, chaotic salp swarm algorithm and deep learning technique</article-title>. <source>Chaos Solitons Fractals.</source> (<year>2020</year>) <volume>140</volume>:<fpage>110071</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110071</pub-id><pub-id pub-id-type="pmid">32834627</pub-id></citation></ref>
<ref id="B21">
<label>21.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Apostolopoulos</surname> <given-names>ID</given-names></name> <name><surname>Aznaouridis</surname> <given-names>SI</given-names></name> <name><surname>Tzani</surname> <given-names>MA</given-names></name></person-group>. <article-title>Extracting possibly representative COVID-19 biomarkers from X-ray images with deep learning approach and image data related to pulmonary diseases</article-title>. <source>J Med Biol Eng</source>. (<year>2020</year>) <volume>40</volume>:<fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1007/s40846-020-00529-4</pub-id><pub-id pub-id-type="pmid">32412551</pub-id></citation></ref>
<ref id="B22">
<label>22.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ardakani</surname> <given-names>AA</given-names></name> <name><surname>Kanafi</surname> <given-names>AR</given-names></name> <name><surname>Acharya</surname> <given-names>UR</given-names></name> <name><surname>Khadem</surname> <given-names>N</given-names></name> <name><surname>Mohammadi</surname> <given-names>A</given-names></name></person-group>. <article-title>Application of deep learning technique to manage COVID-19 in routine clinical practice using CT images: results of 10 convolutional neural networks</article-title>. <source>Comput Biol Med.</source> (<year>2020</year>) <volume>121</volume>:<fpage>103795</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103795</pub-id><pub-id pub-id-type="pmid">32568676</pub-id></citation></ref>
<ref id="B23">
<label>23.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Das</surname> <given-names>D</given-names></name> <name><surname>Santosh</surname> <given-names>KC</given-names></name> <name><surname>Pal</surname> <given-names>U</given-names></name></person-group>. <article-title>Truncated inception net: COVID-19 outbreak screening using chest X-rays</article-title>. <source>Phys Eng Sci Med.</source> (<year>2020</year>) <volume>43</volume>:<fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.21203/rs.3.rs-20795/v1</pub-id><pub-id pub-id-type="pmid">32588200</pub-id></citation></ref>
<ref id="B24">
<label>24.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aswathy</surname> <given-names>SU</given-names></name> <name><surname>Jarin</surname> <given-names>T</given-names></name> <name><surname>Mathews</surname> <given-names>R</given-names></name> <name><surname>Nair</surname> <given-names>LM</given-names></name> <name><surname>Rroan</surname> <given-names>M</given-names></name></person-group>. <article-title>CAD systems for automatic detection and classification of COVID-19 in nano CT lung image by using machine learning technique</article-title>. <source>Int J Pharm Res</source>. (<year>2020</year>) <volume>12</volume>:<fpage>1865</fpage>&#x02013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.31838/ijpr/2020.12.02.247</pub-id></citation></ref>
<ref id="B25">
<label>25.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bridge</surname> <given-names>J</given-names></name> <name><surname>Meng</surname> <given-names>Y</given-names></name> <name><surname>Zhao</surname> <given-names>Y</given-names></name> <name><surname>Du</surname> <given-names>Y</given-names></name> <name><surname>Zhao</surname> <given-names>M</given-names></name> <name><surname>Sun</surname> <given-names>R</given-names></name> <etal/></person-group>. <article-title>Introducing the GEV activation function for highly unbalanced data to develop COVID-19 diagnostic models</article-title>. <source>IEEE J Biomed Health Inform</source>. (<year>2020</year>) <volume>24</volume>:<fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2020.3012383</pub-id><pub-id pub-id-type="pmid">32750973</pub-id></citation></ref>
<ref id="B26">
<label>26.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Butt</surname> <given-names>C</given-names></name> <name><surname>Gill</surname> <given-names>J</given-names></name> <name><surname>Chun</surname> <given-names>D</given-names></name> <name><surname>Babu</surname> <given-names>BA</given-names></name></person-group>. <article-title>Deep learning system to screen coronavirus disease 2019 pneumonia</article-title>. <source>Appl Intell</source>. (<year>2020</year>) <volume>6</volume>:<fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1007/s10489-020-01714-3</pub-id><pub-id pub-id-type="pmid">32837749</pub-id></citation></ref>
<ref id="B27">
<label>27.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dey</surname> <given-names>N</given-names></name> <name><surname>Rajinikanth</surname> <given-names>V</given-names></name> <name><surname>Fong</surname> <given-names>SJ</given-names></name> <name><surname>Kaiser</surname> <given-names>MS</given-names></name> <name><surname>Mahmud</surname> <given-names>M</given-names></name></person-group>. <article-title>Social group optimization-assisted Kapur&#x00027;s entropy and morphological segmentation for automated detection of COVID-19 infection from computed tomography images</article-title>. <source>Cognit Comput.</source> (<year>2020</year>) <volume>12</volume>:<fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.20944/preprints202005.0052.v1</pub-id><pub-id pub-id-type="pmid">32837591</pub-id></citation></ref>
<ref id="B28">
<label>28.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kermany</surname> <given-names>D</given-names></name> <name><surname>Zhang</surname> <given-names>K</given-names></name> <name><surname>Goldbaum</surname> <given-names>M</given-names></name></person-group>. <article-title>Labeled optical coherence tomography (OCT) and Chest X-Ray images for classification</article-title>. <source>Mendeley Data</source>. (<year>2018</year>) <volume>2</volume>. <pub-id pub-id-type="doi">10.17632/RSCBJBR9SJ.2</pub-id></citation></ref>
<ref id="B29">
<label>29.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>El Asnaoui</surname> <given-names>K</given-names></name> <name><surname>Chawki</surname> <given-names>Y</given-names></name></person-group>. <article-title>Using X-ray images and deep learning for automated detection of coronavirus disease</article-title>. <source>J Biomol Struct Dyn</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1080/07391102.2020.1767212</pub-id><pub-id pub-id-type="pmid">32397844</pub-id></citation></ref>
<ref id="B30">
<label>30.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>Z</given-names></name> <name><surname>Wei</surname> <given-names>B</given-names></name> <name><surname>Hong</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>T</given-names></name> <name><surname>Cong</surname> <given-names>J</given-names></name> <name><surname>Zhu</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>Accurate screening of COVID-19 using attention-based deep 3D multiple instance learning</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<fpage>2584</fpage>&#x02013;<lpage>94</lpage>.<pub-id pub-id-type="doi">10.1109/TMI.2020.2996256</pub-id><pub-id pub-id-type="pmid">32730211</pub-id></citation></ref>
<ref id="B31">
<label>31.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Harmon</surname> <given-names>SA</given-names></name> <name><surname>Sanford</surname> <given-names>TH</given-names></name> <name><surname>Xu</surname> <given-names>S</given-names></name> <name><surname>Turkbey</surname> <given-names>EB</given-names></name> <name><surname>Roth</surname> <given-names>H</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence for the detection of COVID-19 pneumonia on chest CT using multinational datasets</article-title>. <source>Nat Commun.</source> (<year>2020</year>) <volume>11</volume>:<fpage>4080</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-020-17971-2</pub-id><pub-id pub-id-type="pmid">32796848</pub-id></citation></ref>
<ref id="B32">
<label>32.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hasan</surname> <given-names>AM</given-names></name> <name><surname>Al-Jawad</surname> <given-names>MM</given-names></name> <name><surname>Jalab</surname> <given-names>HA</given-names></name> <name><surname>Shaiba</surname> <given-names>H</given-names></name> <name><surname>Ibrahim</surname> <given-names>RW</given-names></name> <name><surname>Al-Shamasneh</surname> <given-names>AR</given-names></name></person-group>. <article-title>Classification of Covid-19 coronavirus, pneumonia and healthy lungs in CT scans using Q-deformed entropy and deep learning features</article-title>. <source>Entropy.</source> (<year>2020</year>) <volume>22</volume>:<fpage>517</fpage>. <pub-id pub-id-type="doi">10.3390/e22050517</pub-id><pub-id pub-id-type="pmid">33286289</pub-id></citation></ref>
<ref id="B33">
<label>33.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>S</given-names></name> <name><surname>Gao</surname> <given-names>Y</given-names></name> <name><surname>Niu</surname> <given-names>Z</given-names></name> <name><surname>Jiang</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>L</given-names></name> <name><surname>Xiao</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>Weakly supervised deep learning for COVID-19 infection detection and classification from CT images</article-title>. <source>IEEE Access</source>. (<year>2020</year>) <volume>8</volume>:<fpage>118869</fpage>&#x02013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3005510</pub-id></citation></ref>
<ref id="B34">
<label>34.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jaiswal</surname> <given-names>A</given-names></name> <name><surname>Gianchandani</surname> <given-names>N</given-names></name> <name><surname>Singh</surname> <given-names>D</given-names></name> <name><surname>Kumar</surname> <given-names>V</given-names></name> <name><surname>Kaur</surname> <given-names>M</given-names></name></person-group>. <article-title>Classification of the COVID-19 infected patients using DenseNet201 based deep transfer learning</article-title>. <source>J Biomol Struct Dyn</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1080/07391102.2020.1788642</pub-id><pub-id pub-id-type="pmid">32619398</pub-id></citation></ref>
<ref id="B35">
<label>35.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kang</surname> <given-names>H</given-names></name> <name><surname>Xia</surname> <given-names>L</given-names></name> <name><surname>Yan</surname> <given-names>F</given-names></name> <name><surname>Wan</surname> <given-names>Z</given-names></name> <name><surname>Shi</surname> <given-names>F</given-names></name> <name><surname>Yuan</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Diagnosis of coronavirus disease 2019 (COVID-19) with structured latent multi-view representation learning</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<fpage>2606</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2020.2992546</pub-id><pub-id pub-id-type="pmid">32386147</pub-id></citation></ref>
<ref id="B36">
<label>36.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lessmann</surname> <given-names>N</given-names></name> <name><surname>S&#x000E1;nchez</surname> <given-names>CI</given-names></name> <name><surname>Beenen</surname> <given-names>L</given-names></name> <name><surname>Boulogne</surname> <given-names>LH</given-names></name> <name><surname>Brink</surname> <given-names>M</given-names></name> <name><surname>Calli</surname> <given-names>E</given-names></name> <etal/></person-group>. <article-title>Automated assessment of CO-RADS and chest CT severity scores in patients with suspected COVID-19 using artificial intelligence</article-title>. <source>Radiology</source>. (<year>2020</year>) <fpage>202439</fpage>.<pub-id pub-id-type="pmid">32729810</pub-id></citation></ref>
<ref id="B37">
<label>37.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y</given-names></name> <name><surname>Dong</surname> <given-names>W</given-names></name> <name><surname>Chen</surname> <given-names>J</given-names></name> <name><surname>Cao</surname> <given-names>S</given-names></name> <name><surname>Zhou</surname> <given-names>H</given-names></name> <name><surname>Zhu</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Efficient and effective training of COVID-19 classification networks with self-supervised dual-track learning to rank</article-title>. <source>IEEE J Biomed Health Inform</source>. (<year>2020</year>) <volume>24</volume>:<fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2020.3018181</pub-id><pub-id pub-id-type="pmid">32816680</pub-id></citation></ref>
<ref id="B38">
<label>38.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>C</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Liu</surname> <given-names>C</given-names></name> <name><surname>Sun</surname> <given-names>Q</given-names></name> <name><surname>Peng</surname> <given-names>W</given-names></name></person-group>. <article-title>Differentiating novel coronavirus pneumonia from general pneumonia based on machine learning</article-title>. <source>Biomed Eng Online</source>. (<year>2020</year>) <volume>19</volume>:<fpage>66</fpage>.<pub-id pub-id-type="doi">10.1186/s12938-020-00809-9</pub-id><pub-id pub-id-type="pmid">32814568</pub-id></citation></ref>
<ref id="B39">
<label>39.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mei</surname> <given-names>X</given-names></name> <name><surname>Lee</surname> <given-names>HC</given-names></name> <name><surname>Diao</surname> <given-names>KY</given-names></name> <name><surname>Huang</surname> <given-names>M</given-names></name> <name><surname>Lin</surname> <given-names>B</given-names></name> <name><surname>Liu</surname> <given-names>C</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence-enabled rapid diagnosis of patients with COVID-19</article-title>. <source>Nat Med.</source> (<year>2020</year>) <volume>26</volume>:<fpage>1224</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-020-0931-3</pub-id><pub-id pub-id-type="pmid">32511559</pub-id></citation></ref>
<ref id="B40">
<label>40.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panwar</surname> <given-names>H</given-names></name> <name><surname>Gupta</surname> <given-names>PK</given-names></name> <name><surname>Siddiqui</surname> <given-names>MK</given-names></name> <name><surname>Morales-Menendez</surname> <given-names>R</given-names></name> <name><surname>Singh</surname> <given-names>V</given-names></name></person-group>. <article-title>Application of deep learning for fast detection of COVID-19 in X-Rays using nCOVnet</article-title>. <source>Chaos Solitons Fractals</source>. (<year>2020</year>) <volume>138</volume>:<fpage>109944</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.109944</pub-id><pub-id pub-id-type="pmid">32536759</pub-id></citation></ref>
<ref id="B41">
<label>41.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pathak</surname> <given-names>Y</given-names></name> <name><surname>Shukla</surname> <given-names>PK</given-names></name> <name><surname>Tiwari</surname> <given-names>A</given-names></name> <name><surname>Stalin</surname> <given-names>S</given-names></name> <name><surname>Singh</surname> <given-names>S</given-names></name> <name><surname>Shukla</surname> <given-names>PK</given-names></name></person-group>. <article-title>Deep transfer learning based classification model for COVID-19 disease</article-title>. <source>Ing Rech Biomed</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.irbm.2020.05.003</pub-id><pub-id pub-id-type="pmid">32837678</pub-id></citation></ref>
<ref id="B42">
<label>42.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>Y</given-names></name> <name><surname>Tang</surname> <given-names>YX</given-names></name> <name><surname>Lee</surname> <given-names>S</given-names></name> <name><surname>Zhu</surname> <given-names>Y</given-names></name> <name><surname>Summers</surname> <given-names>RM</given-names></name> <name><surname>Lu</surname> <given-names>Z</given-names></name></person-group>. <article-title>COVID-19-CT-CXR: a freely accessible and weakly labeled chest X-ray and CT image collection on COVID-19 from biomedical literature</article-title>. <source>ArXiv</source>. (<year>2020</year>). <pub-id pub-id-type="doi">10.1109/TBDATA.2020.3035935</pub-id><pub-id pub-id-type="pmid">32550254</pub-id></citation></ref>
<ref id="B43">
<label>43.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pu</surname> <given-names>J</given-names></name> <name><surname>Leader</surname> <given-names>J</given-names></name> <name><surname>Bandos</surname> <given-names>A</given-names></name> <name><surname>Shi</surname> <given-names>J</given-names></name> <name><surname>Du</surname> <given-names>P</given-names></name> <name><surname>Yu</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>Any unique image biomarkers associated with COVID-19?</article-title> <source>Eur Radiol</source>. (<year>2020</year>) <volume>30</volume>:<fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1007/s00330-020-06956-w</pub-id><pub-id pub-id-type="pmid">32462445</pub-id></citation></ref>
<ref id="B44">
<label>44.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Raajan</surname> <given-names>NR</given-names></name> <name><surname>Lakshmi</surname> <given-names>VSR</given-names></name> <name><surname>Prabaharan</surname> <given-names>N</given-names></name></person-group>. <article-title>Non-invasive technique-based novel corona (COVID-19) virus detection using CNN</article-title>. <source>Natl Acad Sci Lett</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1007/s40009-020-01009-8</pub-id><pub-id pub-id-type="pmid">32836613</pub-id></citation></ref>
<ref id="B45">
<label>45.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rajaraman</surname> <given-names>S</given-names></name> <name><surname>Siegelman</surname> <given-names>J</given-names></name> <name><surname>Alderson</surname> <given-names>PO</given-names></name> <name><surname>Folio</surname> <given-names>LS</given-names></name> <name><surname>Folio</surname> <given-names>LR</given-names></name> <name><surname>Antani</surname> <given-names>SK</given-names></name></person-group>. <article-title>Iteratively pruned deep learning ensembles for COVID-19 detection in chest X-rays</article-title>. <source>IEEE Access</source>. (<year>2020</year>) <volume>8</volume>:<fpage>115041</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3003810</pub-id></citation>
</ref>
<ref id="B46">
<label>46.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sakagianni</surname> <given-names>A</given-names></name> <name><surname>Feretzakis</surname> <given-names>G</given-names></name> <name><surname>Kalles</surname> <given-names>D</given-names></name> <name><surname>Koufopoulou</surname> <given-names>C</given-names></name> <name><surname>Kaldis</surname> <given-names>V</given-names></name></person-group>. <article-title>Setting up an easy-to-use machine learning pipeline for medical decision support: a case study for COVID-19 diagnosis based on deep learning with CT scans</article-title>. <source>Stud Health Technol Inform</source>. (<year>2020</year>) <volume>272</volume>:<fpage>13</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.3233/SHTI200481</pub-id><pub-id pub-id-type="pmid">32604588</pub-id></citation></ref>
<ref id="B47">
<label>47.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname> <given-names>S</given-names></name></person-group>. <article-title>Drawing insights from COVID-19-infected patients using CT scan images and machine learning techniques: a study on 200 patients</article-title>. <source>Environ Sci Pollut Res Int.</source> (<year>2020</year>) <volume>27</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.21203/rs.3.rs-23863/v1</pub-id><pub-id pub-id-type="pmid">32700269</pub-id></citation></ref>
<ref id="B48">
<label>48.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>D</given-names></name> <name><surname>Kumar</surname> <given-names>V</given-names></name> <name><surname>Vaishali</surname></name> <name><surname>Kaur</surname> <given-names>M</given-names></name></person-group>. <article-title>Classification of COVID-19 patients from chest CT images using multi-objective differential evolution-based convolutional neural networks</article-title>. <source>Eur J Clin Microbiol Infect Dis</source>. (<year>2020</year>) <volume>39</volume>:<fpage>1379</fpage>&#x02013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1007/s10096-020-03901-z</pub-id><pub-id pub-id-type="pmid">32337662</pub-id></citation></ref>
<ref id="B49">
<label>49.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>J</given-names></name> <name><surname>Wang</surname> <given-names>H</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Wu</surname> <given-names>W</given-names></name> <name><surname>Dai</surname> <given-names>G</given-names></name> <name><surname>Wu</surname> <given-names>Z</given-names></name> <etal/></person-group>. <article-title>End-to-end automatic differentiation of the coronavirus disease 2019 (COVID-19) from viral pneumonia based on chest CT</article-title>. <source>Eur J Nucl Med Mol Imaging.</source> (<year>2020</year>) <volume>47</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1007/s00259-020-04929-1</pub-id><pub-id pub-id-type="pmid">33660102</pub-id></citation></ref>
<ref id="B50">
<label>50.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>J</given-names></name> <name><surname>Bao</surname> <given-names>Y</given-names></name> <name><surname>Wen</surname> <given-names>Y</given-names></name> <name><surname>Lu</surname> <given-names>H</given-names></name> <name><surname>Luo</surname> <given-names>H</given-names></name> <name><surname>Xiang</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Prior-attention residual learning for more discriminative COVID-19 screening in CT images</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<fpage>2572</fpage>&#x02013;<lpage>83</lpage>.<pub-id pub-id-type="doi">10.1109/TMI.2020.2994908</pub-id><pub-id pub-id-type="pmid">32730210</pub-id></citation></ref>
<ref id="B51">
<label>51.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S</given-names></name> <name><surname>Zha</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>W</given-names></name> <name><surname>Wu</surname> <given-names>Q</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Niu</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>A fully automatic deep learning system for COVID-19 diagnostic and prognostic analysis</article-title>. <source>Eur Respir J</source>. (<year>2020</year>) <volume>56</volume>:<fpage>2000775</fpage>.<pub-id pub-id-type="doi">10.1183/13993003.00775-2020</pub-id><pub-id pub-id-type="pmid">32444412</pub-id></citation></ref>
<ref id="B52">
<label>52.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Warman</surname> <given-names>A</given-names></name> <name><surname>Warman</surname> <given-names>P</given-names></name> <name><surname>Sharma</surname> <given-names>A</given-names></name> <name><surname>Parikh</surname> <given-names>P</given-names></name> <name><surname>Warman</surname> <given-names>R</given-names></name> <name><surname>Viswanadhan</surname> <given-names>N</given-names></name> <etal/></person-group>. <article-title>Interpretable artificial intelligence for COVID-19 diagnosis from chest CT reveals specificity of ground-glass opacities</article-title>. <source>medRxiv</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1101/2020.05.16.20103408</pub-id><pub-id pub-id-type="pmid">32511545</pub-id></citation></ref>
<ref id="B53">
<label>53.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>X</given-names></name> <name><surname>Hui</surname> <given-names>H</given-names></name> <name><surname>Niu</surname> <given-names>M</given-names></name> <name><surname>Li</surname> <given-names>L</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>He</surname> <given-names>B</given-names></name> <etal/></person-group>. <article-title>Deep learning-based multi-view fusion model for screening 2019 novel coronavirus pneumonia: a multicentre study</article-title>. <source>Eur J Radiol</source>. (<year>2020</year>) <volume>128</volume>:<fpage>109041</fpage>. <pub-id pub-id-type="doi">10.1016/j.ejrad.2020.109041</pub-id><pub-id pub-id-type="pmid">32408222</pub-id></citation></ref>
<ref id="B54">
<label>54.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X</given-names></name> <name><surname>Jiang</surname> <given-names>X</given-names></name> <name><surname>Ma</surname> <given-names>C</given-names></name> <name><surname>Du</surname> <given-names>P</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Lv</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>A deep learning system to screen novel coronavirus disease 2019 pneumonia</article-title>. <source>Engineering.</source> (<year>2020</year>) <volume>6</volume>:<fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1016/j.eng.2020.04.010</pub-id><pub-id pub-id-type="pmid">32837749</pub-id></citation></ref>
<ref id="B55">
<label>55.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Ma</surname> <given-names>L</given-names></name> <name><surname>Yang</surname> <given-names>F</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Ma</surname> <given-names>K</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>A collaborative online AI engine for CT-based COVID-19 diagnosis</article-title>. <source>medRxiv.</source> (<year>2020</year>). <pub-id pub-id-type="doi">10.1101/2020.05.10.20096073</pub-id><pub-id pub-id-type="pmid">32511484</pub-id></citation></ref>
<ref id="B56">
<label>56.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yan</surname> <given-names>T</given-names></name> <name><surname>Wong</surname> <given-names>PK</given-names></name> <name><surname>Ren</surname> <given-names>H</given-names></name> <name><surname>Wang</surname> <given-names>H</given-names></name> <name><surname>Wang</surname> <given-names>J</given-names></name> <name><surname>Li</surname> <given-names>Y</given-names></name></person-group>. <article-title>Automatic distinction between COVID-19 and common pneumonia using multi-scale convolutional neural network on chest CT scans</article-title>. <source>Chaos Solitons Fractals</source>. (<year>2020</year>) <volume>140</volume>:<fpage>110153</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110153</pub-id><pub-id pub-id-type="pmid">32834641</pub-id></citation></ref>
<ref id="B57">
<label>57.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>S</given-names></name> <name><surname>Jiang</surname> <given-names>L</given-names></name> <name><surname>Cao</surname> <given-names>Z</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Cao</surname> <given-names>J</given-names></name> <name><surname>Feng</surname> <given-names>R</given-names></name> <etal/></person-group>. <article-title>Deep learning for detecting corona virus disease 2019 (COVID-19) on high-resolution computed tomography: a pilot study</article-title>. <source>Ann Transl Med.</source> (<year>2020</year>) <volume>8</volume>:<fpage>450</fpage>. <pub-id pub-id-type="doi">10.21037/atm.2020.03.132</pub-id><pub-id pub-id-type="pmid">32395494</pub-id></citation></ref>
<ref id="B58">
<label>58.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>Z</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Wang</surname> <given-names>J</given-names></name> <name><surname>Zhao</surname> <given-names>T</given-names></name> <name><surname>Chen</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Rapid identification of COVID-19 severity in CT scans through classification of deep features</article-title>. <source>Biomed Eng Online</source>. (<year>2020</year>) <volume>19</volume>:<fpage>63</fpage>.<pub-id pub-id-type="doi">10.1186/s12938-020-00807-x</pub-id><pub-id pub-id-type="pmid">32787937</pub-id></citation></ref>
<ref id="B59">
<label>59.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Al-Karawi</surname> <given-names>D</given-names></name> <name><surname>Al-Zaidi</surname> <given-names>S</given-names></name> <name><surname>Polus</surname> <given-names>N</given-names></name> <name><surname>Jassim</surname> <given-names>S</given-names></name></person-group>. <article-title>Machine learning analysis of chest CT scan images as a complementary digital test of coronavirus (COVID-19) patients</article-title>. <source>medRxiv</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1101/2020.04.13.20063479</pub-id></citation></ref>
<ref id="B60">
<label>60.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alom</surname> <given-names>MZ</given-names></name> <name><surname>Rahman</surname> <given-names>M</given-names></name> <name><surname>Nasrin</surname> <given-names>MS</given-names></name> <name><surname>Taha</surname> <given-names>TM</given-names></name> <name><surname>Asari</surname> <given-names>VK</given-names></name></person-group>. <article-title>COVID_MTNet: COVID-19 detection with multi-task deep learning approaches</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200403747.</citation></ref>
<ref id="B61">
<label>61.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barstugan</surname> <given-names>M</given-names></name> <name><surname>Ozkaya</surname> <given-names>U</given-names></name> <name><surname>Ozturk</surname> <given-names>S</given-names></name></person-group>. <article-title>Coronavirus (covid-19) classification using ct images by machine learning methods</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200309424.</citation></ref>
<ref id="B62">
<label>62.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>J</given-names></name> <name><surname>Wu</surname> <given-names>L</given-names></name> <name><surname>Zhang</surname> <given-names>J</given-names></name> <name><surname>Zhang</surname> <given-names>L</given-names></name> <name><surname>Gong</surname> <given-names>D</given-names></name> <name><surname>Zhao</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Deep learning-based model for detecting 2019 novel coronavirus pneumonia on high-resolution computed tomography</article-title>. <source>Sci Rep</source>. (<year>2020</year>) <volume>10</volume>:<fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1101/2020.02.25.20021568</pub-id><pub-id pub-id-type="pmid">33154542</pub-id></citation></ref>
<ref id="B63">
<label>63.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Farid</surname> <given-names>AA</given-names></name> <name><surname>Selim</surname> <given-names>GI</given-names></name> <name><surname>Awad</surname> <given-names>H</given-names></name> <name><surname>Khater</surname> <given-names>A</given-names></name></person-group>. <article-title>A novel approach of CT images feature analysis and prediction to screen for corona virus disease (COVID-19)</article-title>. <source>Int J Sci Eng Res</source>. (<year>2020</year>) <volume>11</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.14299/ijser.2020.03.02</pub-id></citation></ref>
<ref id="B64">
<label>64.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gozes</surname> <given-names>O</given-names></name> <name><surname>Frid-Adar</surname> <given-names>M</given-names></name> <name><surname>Greenspan</surname> <given-names>H</given-names></name> <name><surname>Browning</surname> <given-names>PD</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name> <name><surname>Ji</surname> <given-names>W</given-names></name> <etal/></person-group>. <article-title>Rapid ai development cycle for the coronavirus (covid-19) pandemic: initial results for automated detection &#x00026; patient monitoring using deep learning CT image analysis</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200305037.</citation></ref>
<ref id="B65">
<label>65.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>C</given-names></name> <name><surname>Chen</surname> <given-names>W</given-names></name> <name><surname>Cao</surname> <given-names>Y</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <name><surname>Zhang</surname> <given-names>X</given-names></name> <name><surname>Deng</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>Development and evaluation of an AI system for COVID-19 diagnosis</article-title>. <source>medRxiv</source>. (<year>2020</year>) <volume>11</volume>:<fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1101/2020.03.20.20039834</pub-id></citation></ref>
<ref id="B66">
<label>66.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>S</given-names></name> <name><surname>Wang</surname> <given-names>B</given-names></name> <name><surname>Xu</surname> <given-names>H</given-names></name> <name><surname>Luo</surname> <given-names>C</given-names></name> <name><surname>Wei</surname> <given-names>L</given-names></name> <name><surname>Zhao</surname> <given-names>W</given-names></name> <etal/></person-group>. <article-title>AI-assisted CT imaging analysis for COVID-19 screening: building and deploying a medical AI system in four weeks</article-title>. <source>medRxiv.</source> (<year>2020</year>). <pub-id pub-id-type="doi">10.1101/2020.03.19.20039354</pub-id></citation></ref>
<ref id="B67">
<label>67.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kassani</surname> <given-names>SH</given-names></name> <name><surname>Kassasni</surname> <given-names>PH</given-names></name> <name><surname>Wesolowski</surname> <given-names>MJ</given-names></name> <name><surname>Schneider</surname> <given-names>KA</given-names></name> <name><surname>Deters</surname> <given-names>R</given-names></name></person-group>. <article-title>Automatic detection of coronavirus disease (COVID-19) in X-ray and CT images: a machine learning-based approach</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200410641.</citation></ref>
<ref id="B68">
<label>68.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozkaya</surname> <given-names>U</given-names></name> <name><surname>Ozturk</surname> <given-names>S</given-names></name> <name><surname>Barstugan</surname> <given-names>M</given-names></name></person-group>. <article-title>Coronavirus (COVID-19) classification using deep features fusion and ranking technique</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200403698. <pub-id pub-id-type="doi">10.1007/978-3-030-55258-9_17</pub-id></citation></ref>
<ref id="B69">
<label>69.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shi</surname> <given-names>F</given-names></name> <name><surname>Xia</surname> <given-names>L</given-names></name> <name><surname>Shan</surname> <given-names>F</given-names></name> <name><surname>Wu</surname> <given-names>D</given-names></name> <name><surname>Wei</surname> <given-names>Y</given-names></name> <name><surname>Yuan</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Large-scale screening of covid-19 from community acquired pneumonia using infection size-aware classification</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200309860. <pub-id pub-id-type="doi">10.1088/1361-6560/abe838</pub-id><pub-id pub-id-type="pmid">33729998</pub-id></citation></ref>
<ref id="B70">
<label>70.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>C</given-names></name> <name><surname>Deng</surname> <given-names>X</given-names></name> <name><surname>Fu</surname> <given-names>Q</given-names></name> <name><surname>Zhou</surname> <given-names>Q</given-names></name> <name><surname>Feng</surname> <given-names>J</given-names></name> <name><surname>Ma</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Deep learning-based detection for COVID-19 from chest CT using weak label</article-title>. <source>medRxiv</source>. (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1101/2020.03.12.20027185</pub-id></citation></ref>
<ref id="B71">
<label>71.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Apostolopoulos</surname> <given-names>ID</given-names></name> <name><surname>Mpesiana</surname> <given-names>TA</given-names></name></person-group>. <article-title>Covid-19: automatic detection from X-ray images utilizing transfer learning with convolutional neural networks</article-title>. <source>Phys Eng Sci Med</source>. (<year>2020</year>) <volume>43</volume>:<fpage>635</fpage>&#x02013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1007/s13246-020-00865-4</pub-id><pub-id pub-id-type="pmid">32524445</pub-id></citation></ref>
<ref id="B72">
<label>72.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brunese</surname> <given-names>L</given-names></name> <name><surname>Mercaldo</surname> <given-names>F</given-names></name> <name><surname>Reginelli</surname> <given-names>A</given-names></name> <name><surname>Santone</surname> <given-names>A</given-names></name></person-group>. <article-title>Explainable deep learning for pulmonary disease and coronavirus COVID-19 detection from X-rays</article-title>. <source>Comput Methods Programs Biomed</source>. (<year>2020</year>) <volume>196</volume>:<fpage>105608</fpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2020.105608</pub-id><pub-id pub-id-type="pmid">32599338</pub-id></citation></ref>
<ref id="B73">
<label>73.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chowdhury</surname> <given-names>MEH</given-names></name> <name><surname>Rahman</surname> <given-names>T</given-names></name> <name><surname>Khandakar</surname> <given-names>A</given-names></name> <name><surname>Mazhar</surname> <given-names>R</given-names></name> <name><surname>Kadir</surname> <given-names>MA</given-names></name> <name><surname>Mahbub</surname> <given-names>ZB</given-names></name> <etal/></person-group>. <article-title>Can AI help in screening viral and COVID-19 pneumonia?</article-title> <source>IEEE Access</source>. (<year>2020</year>) <volume>8</volume>:<fpage>132665</fpage>&#x02013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3010287</pub-id></citation></ref>
<ref id="B74">
<label>74.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Civit-Masot</surname> <given-names>J</given-names></name> <name><surname>Luna-Perej&#x000F3;n</surname> <given-names>F</given-names></name> <name><surname>Morales</surname> <given-names>MD</given-names></name> <name><surname>Civit</surname> <given-names>A</given-names></name></person-group>. <article-title>Deep learning system for COVID-19 diagnosis aid using X-ray pulmonary images</article-title>. <source>Appl Sci</source>. (<year>2020</year>) <volume>10</volume>:<fpage>4640</fpage>. <pub-id pub-id-type="doi">10.3390/app10134640</pub-id></citation></ref>
<ref id="B75">
<label>75.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Elaziz</surname> <given-names>MA</given-names></name> <name><surname>Hosny</surname> <given-names>KM</given-names></name> <name><surname>Salah</surname> <given-names>A</given-names></name> <name><surname>Darwish</surname> <given-names>MM</given-names></name> <name><surname>Lu</surname> <given-names>S</given-names></name> <name><surname>Sahlol</surname> <given-names>AT</given-names></name></person-group>. <article-title>New machine learning method for image-based diagnosis of COVID-19</article-title>. <source>PLoS ONE</source>. (<year>2020</year>) <volume>15</volume>:<fpage>e0235187</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0235187</pub-id><pub-id pub-id-type="pmid">32589673</pub-id></citation></ref>
<ref id="B76">
<label>76.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hassantabar</surname> <given-names>S</given-names></name> <name><surname>Ahmadi</surname> <given-names>M</given-names></name> <name><surname>Sharifi</surname> <given-names>A</given-names></name></person-group>. <article-title>Diagnosis and detection of infected tissue of COVID-19 patients based on lung x-ray image using convolutional neural network approaches</article-title>. <source>Chaos Solitons Fractals.</source> (<year>2020</year>) <volume>140</volume>:<fpage>110170</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110170</pub-id><pub-id pub-id-type="pmid">32834651</pub-id></citation></ref>
<ref id="B77">
<label>77.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Islam</surname> <given-names>MZ</given-names></name> <name><surname>Islam</surname> <given-names>MM</given-names></name> <name><surname>Asraf</surname> <given-names>A</given-names></name></person-group>. <article-title>A combined deep CNN-LSTM network for the detection of novel coronavirus (COVID-19) using X-ray images</article-title>. <source>Inform Med Unlocked</source>. (<year>2020</year>) <volume>20</volume>:<fpage>100412</fpage>.<pub-id pub-id-type="doi">10.1016/j.imu.2020.100412</pub-id><pub-id pub-id-type="pmid">32835084</pub-id></citation></ref>
<ref id="B78">
<label>78.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>AI</given-names></name> <name><surname>Shah</surname> <given-names>JL</given-names></name> <name><surname>Bhat</surname> <given-names>MM</given-names></name></person-group>. <article-title>CoroNet: a deep neural network for detection and diagnosis of COVID-19 from chest x-ray images</article-title>. <source>Comput Methods Programs Biomed.</source> (<year>2020</year>) <volume>196</volume>:<fpage>105581</fpage>.<pub-id pub-id-type="doi">10.1016/j.cmpb.2020.105581</pub-id></citation>
</ref>
<ref id="B79">
<label>79.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khuzani</surname> <given-names>AZ</given-names></name> <name><surname>Heidari</surname> <given-names>M</given-names></name> <name><surname>Shariati</surname> <given-names>SA</given-names></name></person-group>. <article-title>COVID-Classifier: an automated machine learning model to assist in the diagnosis of COVID-19 infection in chest x-ray images</article-title>. <source>medRxiv.</source> (<year>2020</year>).<pub-id pub-id-type="pmid">32511510</pub-id></citation></ref>
<ref id="B80">
<label>80.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ko</surname> <given-names>H</given-names></name> <name><surname>Chung</surname> <given-names>H</given-names></name> <name><surname>Kang</surname> <given-names>WS</given-names></name> <name><surname>Kim</surname> <given-names>KW</given-names></name> <name><surname>Shin</surname> <given-names>Y</given-names></name> <name><surname>Kang</surname> <given-names>SJ</given-names></name> <etal/></person-group>. <article-title>COVID-19 pneumonia diagnosis using a simple 2D deep learning framework with a single chest CT image: model development and validation</article-title>. <source>J Med Internet Res</source>. (<year>2020</year>) <volume>22</volume>:<fpage>e19569</fpage>. <pub-id pub-id-type="doi">10.2196/19569</pub-id><pub-id pub-id-type="pmid">32568730</pub-id></citation></ref>
<ref id="B81">
<label>81.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Loey</surname> <given-names>M</given-names></name> <name><surname>Smarandache</surname> <given-names>F</given-names></name> <name><surname>Khalifa</surname> <given-names>NEM</given-names></name></person-group>. <article-title>Within the lack of chest COVID-19 X-ray dataset: a novel detection model based on GAN and deep transfer learning</article-title>. <source>Symmetry.</source> (<year>2020</year>) <volume>12</volume>:<fpage>651</fpage>. <pub-id pub-id-type="doi">10.3390/sym12040651</pub-id></citation></ref>
<ref id="B82">
<label>82.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mahmud</surname> <given-names>T</given-names></name> <name><surname>Rahman</surname> <given-names>MA</given-names></name> <name><surname>Fattah</surname> <given-names>SA</given-names></name></person-group>. <article-title>CovXNet: a multi-dilation convolutional neural network for automatic COVID-19 and other pneumonia detection from chest X-ray images with transferable multi-receptive feature optimization</article-title>. <source>Comput Biol Med</source>. (<year>2020</year>) <volume>122</volume>:<fpage>103869</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103869</pub-id><pub-id pub-id-type="pmid">32658740</pub-id></citation></ref>
<ref id="B83">
<label>83.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mart&#x000ED;nez</surname> <given-names>F</given-names></name> <name><surname>Mart&#x000ED;nez</surname> <given-names>F</given-names></name> <name><surname>Jacinto</surname> <given-names>E</given-names></name></person-group>. <article-title>Performance evaluation of the NASnet convolutional network in the automatic identification of COVID-19</article-title>. <source>Int J Adv Sci Engin Information Technol</source>. (<year>2020</year>) <volume>10</volume>:<fpage>662</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.18517/ijaseit.10.2.11446</pub-id></citation></ref>
<ref id="B84">
<label>84.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Minaee</surname> <given-names>S</given-names></name> <name><surname>Kafieh</surname> <given-names>R</given-names></name> <name><surname>Sonka</surname> <given-names>M</given-names></name> <name><surname>Yazdani</surname> <given-names>S</given-names></name> <name><surname>Jamalipour Soufi</surname> <given-names>G</given-names></name></person-group>. <article-title>Deep-COVID: predicting COVID-19 from chest X-ray images using deep transfer learning</article-title>. <source>Med Image Anal</source>. (<year>2020</year>) <volume>65</volume>:<fpage>101794</fpage>.<pub-id pub-id-type="doi">10.1016/j.media.2020.101794</pub-id><pub-id pub-id-type="pmid">32781377</pub-id></citation></ref>
<ref id="B85">
<label>85.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Narayan Das</surname> <given-names>N</given-names></name> <name><surname>Kumar</surname> <given-names>N</given-names></name> <name><surname>Kaur</surname> <given-names>M</given-names></name> <name><surname>Kumar</surname> <given-names>V</given-names></name> <name><surname>Singh</surname> <given-names>D</given-names></name></person-group>. <article-title>Automated deep transfer learning-based approach for detection of COVID-19 infection in chest X-rays</article-title>. <source>Ing Rech Biomed.</source> (<year>2020</year>) <fpage>1</fpage>&#x02013;<lpage>7</lpage>.<pub-id pub-id-type="doi">10.1016/j.irbm.2020.07.001</pub-id><pub-id pub-id-type="pmid">32837679</pub-id></citation></ref>
<ref id="B86">
<label>86.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nour</surname> <given-names>M</given-names></name> <name><surname>C&#x000F6;mert</surname> <given-names>Z</given-names></name> <name><surname>Polat</surname> <given-names>K</given-names></name></person-group>. <article-title>A novel medical diagnosis model for COVID-19 infection detection based on deep features and Bayesian optimization</article-title>. <source>Appl Soft Comput</source>. (<year>2020</year>) <volume>97</volume>:<fpage>1</fpage>&#x02013;<lpage>14</lpage>.<pub-id pub-id-type="doi">10.1016/j.asoc.2020.106580</pub-id><pub-id pub-id-type="pmid">32837453</pub-id></citation></ref>
<ref id="B87">
<label>87.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Novitasari</surname> <given-names>DCR</given-names></name> <name><surname>Hendradi</surname> <given-names>R</given-names></name> <name><surname>Caraka</surname> <given-names>RE</given-names></name> <name><surname>Rachmawati</surname> <given-names>Y</given-names></name> <name><surname>Fanani</surname> <given-names>NZ</given-names></name> <name><surname>Syarifudin</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Detection of COVID-19 chest x-ray using support vector machine and convolutional neural network</article-title>. <source>Commun Math Biol Neurosci.</source> (<year>2020</year>) <volume>2020</volume>:<fpage>1</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.28919/cmbn/4765</pub-id><pub-id pub-id-type="pmid">33363252</pub-id></citation></ref>
<ref id="B88">
<label>88.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oh</surname> <given-names>Y</given-names></name> <name><surname>Park</surname> <given-names>S</given-names></name> <name><surname>Ye</surname> <given-names>JC</given-names></name></person-group>. <article-title>Deep Learning COVID-19 Features on CXR using limited training data sets</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<fpage>2688</fpage>&#x02013;<lpage>700</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2020.2993291</pub-id><pub-id pub-id-type="pmid">32396075</pub-id></citation></ref>
<ref id="B89">
<label>89.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozturk</surname> <given-names>T</given-names></name> <name><surname>Talo</surname> <given-names>M</given-names></name> <name><surname>Yildirim</surname> <given-names>EA</given-names></name> <name><surname>Baloglu</surname> <given-names>UB</given-names></name> <name><surname>Yildirim</surname> <given-names>O</given-names></name> <name><surname>Rajendra Acharya</surname> <given-names>U</given-names></name></person-group>. <article-title>Automated detection of COVID-19 cases using deep neural networks with X-ray images</article-title>. <source>Comput Biol Med</source>. (<year>2020</year>) <volume>121</volume>:<fpage>103792</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103792</pub-id><pub-id pub-id-type="pmid">32568675</pub-id></citation></ref>
<ref id="B90">
<label>90.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pandit</surname> <given-names>MK</given-names></name> <name><surname>Banday</surname> <given-names>SA</given-names></name></person-group>. <article-title>SARS n-CoV2-19 detection from chest x-ray images using deep neural networks</article-title>. <source>Int J Pervasive Comput Commun</source>. (<year>2020</year>) <volume>16</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1108/IJPCC-06-2020-0060</pub-id><pub-id pub-id-type="pmid">32568675</pub-id></citation></ref>
<ref id="B91">
<label>91.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pereira</surname> <given-names>RM</given-names></name> <name><surname>Bertolini</surname> <given-names>D</given-names></name> <name><surname>Teixeira</surname> <given-names>LO</given-names></name> <name><surname>Silla</surname> <given-names>CN</given-names> <suffix>Jr.</suffix></name> <name><surname>Costa</surname> <given-names>YMG</given-names></name></person-group>.<article-title> COVID-19 identification in chest X-ray images on flat and hierarchical classification scenarios</article-title>. <source>Comput Methods Programs Biomed.</source> (<year>2020</year>) <volume>194</volume>:<fpage>105532</fpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2020.105532</pub-id><pub-id pub-id-type="pmid">32446037</pub-id></citation></ref>
<ref id="B92">
<label>92.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rahaman</surname> <given-names>MM</given-names></name> <name><surname>Li</surname> <given-names>C</given-names></name> <name><surname>Yao</surname> <given-names>Y</given-names></name> <name><surname>Kulwa</surname> <given-names>F</given-names></name> <name><surname>Rahman</surname> <given-names>MA</given-names></name> <name><surname>Wang</surname> <given-names>Q</given-names></name> <etal/></person-group>. <article-title>Identification of COVID-19 samples from chest X-Ray images using deep learning: a comparison of transfer learning approaches</article-title>. <source>J Xray Sci Technol</source>. (<year>2020</year>) <volume>28</volume>:<fpage>1</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.3233/XST-200715</pub-id><pub-id pub-id-type="pmid">32773400</pub-id></citation></ref>
<ref id="B93">
<label>93.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rahimzadeh</surname> <given-names>M</given-names></name> <name><surname>Attar</surname> <given-names>A</given-names></name></person-group>. <article-title>A modified deep convolutional neural network for detecting COVID-19 and pneumonia from chest X-ray images based on the concatenation of Xception and ResNet50V2</article-title>. <source>Inform Med Unlocked.</source> (<year>2020</year>) <volume>19</volume>:<fpage>100360</fpage>. <pub-id pub-id-type="doi">10.1016/j.imu.2020.100360</pub-id><pub-id pub-id-type="pmid">32501424</pub-id></citation></ref>
<ref id="B94">
<label>94.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sethy</surname> <given-names>PK</given-names></name> <name><surname>Behera</surname> <given-names>SK</given-names></name> <name><surname>Ratha</surname> <given-names>PK</given-names></name> <name><surname>Biswas</surname> <given-names>P</given-names></name></person-group>. <article-title>Detection of coronavirus disease (COVID-19) based on deep features and support vector machine</article-title>. <source>Int J Math Eng Manage Sci</source>. (<year>2020</year>) <volume>5</volume>:<fpage>643</fpage>&#x02013;<lpage>51</lpage>.<pub-id pub-id-type="doi">10.33889/IJMEMS.2020.5.4.052</pub-id><pub-id pub-id-type="pmid">33738639</pub-id></citation></ref>
<ref id="B95">
<label>95.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shibly</surname> <given-names>KH</given-names></name> <name><surname>Dey</surname> <given-names>SK</given-names></name> <name><surname>Islam</surname> <given-names>MT</given-names></name> <name><surname>Rahman</surname> <given-names>MM</given-names></name></person-group>. <article-title>COVID faster R-CNN: a novel framework to diagnose novel coronavirus disease (COVID-19) in X-ray images</article-title>. <source>Inform Med Unlocked.</source> (<year>2020</year>) <volume>20</volume>:<fpage>100405</fpage>. <pub-id pub-id-type="doi">10.1016/j.imu.2020.100405</pub-id></citation>
</ref>
<ref id="B96">
<label>96.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toga&#x000E7;ar</surname> <given-names>M</given-names></name> <name><surname>Ergen</surname> <given-names>B</given-names></name> <name><surname>C&#x000F6;mert</surname> <given-names>Z</given-names></name></person-group>. <article-title>COVID-19 detection using deep learning models to exploit social mimic optimization and structured chest X-ray images using fuzzy color and stacking approaches</article-title>. <source>Comput Biol Med</source>. (<year>2020</year>) <volume>121</volume>:<fpage>103805</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103805</pub-id><pub-id pub-id-type="pmid">32568679</pub-id></citation></ref>
<ref id="B97">
<label>97.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toraman</surname> <given-names>S</given-names></name> <name><surname>Alakus</surname> <given-names>TB</given-names></name> <name><surname>Turkoglu</surname> <given-names>I</given-names></name></person-group>. <article-title>Convolutional capsnet: a novel artificial neural network approach to detect COVID-19 disease from X-ray images using capsule networks</article-title>. <source>Chaos Solitons Fractals.</source> (<year>2020</year>) <volume>140</volume>:<fpage>110122</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110122</pub-id><pub-id pub-id-type="pmid">32834634</pub-id></citation></ref>
<ref id="B98">
<label>98.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tsiknakis</surname> <given-names>N</given-names></name> <name><surname>Trivizakis</surname> <given-names>E</given-names></name> <name><surname>Vassalou</surname> <given-names>EE</given-names></name> <name><surname>Papadakis</surname> <given-names>GZ</given-names></name> <name><surname>Spandidos</surname> <given-names>DA</given-names></name> <name><surname>Tsatsakis</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Interpretable artificial intelligence framework for COVID-19 screening on chest X-rays</article-title>. <source>Exp Ther Med</source>. (<year>2020</year>) <volume>20</volume>:<fpage>727</fpage>&#x02013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.3892/etm.2020.8797</pub-id><pub-id pub-id-type="pmid">32742318</pub-id></citation></ref>
<ref id="B99">
<label>99.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tuncer</surname> <given-names>T</given-names></name> <name><surname>Dogan</surname> <given-names>S</given-names></name> <name><surname>Ozyurt</surname> <given-names>F</given-names></name></person-group>. <article-title>An automated residual exemplar local binary pattern and iterative ReliefF based COVID-19 detection method using chest X-ray image</article-title>. <source>Chemometr Intell Lab Syst</source>. (<year>2020</year>) <volume>203</volume>:<fpage>104054</fpage>. <pub-id pub-id-type="doi">10.1016/j.chemolab.2020.104054</pub-id><pub-id pub-id-type="pmid">32427226</pub-id></citation></ref>
<ref id="B100">
<label>100.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ucar</surname> <given-names>F</given-names></name> <name><surname>Korkmaz</surname> <given-names>D</given-names></name></person-group>. <article-title>COVIDiagnosis-Net: deep Bayes-SqueezeNet based diagnosis of the coronavirus disease 2019 (COVID-19) from X-ray images</article-title>. <source>Med Hypotheses</source>. (<year>2020</year>) <volume>140</volume>:<fpage>109761</fpage>. <pub-id pub-id-type="doi">10.1016/j.mehy.2020.109761</pub-id><pub-id pub-id-type="pmid">32344309</pub-id></citation></ref>
<ref id="B101">
<label>101.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vaid</surname> <given-names>S</given-names></name> <name><surname>Kalantar</surname> <given-names>R</given-names></name> <name><surname>Bhandari</surname> <given-names>M</given-names></name></person-group>. <article-title>Deep learning COVID-19 detection bias: accuracy through artificial intelligence</article-title>. <source>Int Orthop</source>. (<year>2020</year>) <volume>44</volume>:<fpage>1539</fpage>&#x02013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1007/s00264-020-04609-7</pub-id><pub-id pub-id-type="pmid">32462314</pub-id></citation></ref>
<ref id="B102">
<label>102.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Waheed</surname> <given-names>A</given-names></name> <name><surname>Goyal</surname> <given-names>M</given-names></name> <name><surname>Gupta</surname> <given-names>D</given-names></name> <name><surname>Khanna</surname> <given-names>A</given-names></name> <name><surname>Al-Turjman</surname> <given-names>F</given-names></name> <name><surname>Pinheiro</surname> <given-names>PR</given-names></name></person-group>. <article-title>CovidGAN: data augmentation using auxiliary classifier GAN for improved Covid-19 detection</article-title>. <source>IEEE Access</source>. (<year>2020</year>) <volume>8</volume>:<fpage>91916</fpage>&#x02013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2994762</pub-id></citation>
</ref>
<ref id="B103">
<label>103.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yildirim</surname> <given-names>M</given-names></name> <name><surname>Cinar</surname> <given-names>A</given-names></name></person-group>. <article-title>A deep learning based hybrid approach for covid-19 disease detections</article-title>. <source>Traitement Signal</source>. (<year>2020</year>) <volume>37</volume>:<fpage>461</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.18280/ts.370313</pub-id></citation></ref>
<ref id="B104">
<label>104.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yoo</surname> <given-names>SH</given-names></name> <name><surname>Geng</surname> <given-names>H</given-names></name> <name><surname>Chiu</surname> <given-names>TL</given-names></name> <name><surname>Yu</surname> <given-names>SK</given-names></name> <name><surname>Cho</surname> <given-names>DC</given-names></name> <name><surname>Heo</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>Deep learning-based decision-tree classifier for COVID-19 diagnosis from chest X-ray imaging</article-title>. <source>Front Med</source>. (<year>2020</year>) <volume>7</volume>:<fpage>427</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2020.00427</pub-id><pub-id pub-id-type="pmid">32760732</pub-id></citation></ref>
<ref id="B105">
<label>105.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghoshal</surname> <given-names>B</given-names></name> <name><surname>Tucker</surname> <given-names>A</given-names></name></person-group>. <article-title>Estimating uncertainty and interpretability in deep learning for coronavirus (COVID-19) detection</article-title>. <source>arXiv.</source> (<year>2020</year>) Preprint arXiv:200310769.</citation></ref>
<ref id="B106">
<label>106.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hall</surname> <given-names>LO</given-names></name> <name><surname>Paul</surname> <given-names>R</given-names></name> <name><surname>Goldgof</surname> <given-names>DB</given-names></name> <name><surname>Goldgof</surname> <given-names>GM</given-names></name></person-group>. <article-title>Finding covid-19 from chest x-rays using deep learning on a small dataset</article-title>. <source>arXiv</source>. (<year>2020</year>) <volume>40</volume>:<fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.36227/techrxiv.12083964</pub-id></citation></ref>
<ref id="B107">
<label>107.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hammoudi</surname> <given-names>K</given-names></name> <name><surname>Benhabiles</surname> <given-names>H</given-names></name> <name><surname>Melkemi</surname> <given-names>M</given-names></name> <name><surname>Dornaika</surname> <given-names>F</given-names></name> <name><surname>Arganda-Carreras</surname> <given-names>I</given-names></name> <name><surname>Collard</surname> <given-names>D</given-names></name> <etal/></person-group>. <article-title>Deep learning on chest X-ray images to detect and evaluate pneumonia cases at the Era of COVID-19</article-title>. <source>arXiv</source>. (<year>2020</year>) Preprint arXiv:200403399.</citation></ref>
<ref id="B108">
<label>108.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hemdan</surname> <given-names>EE-D</given-names></name> <name><surname>Shouman</surname> <given-names>MA</given-names></name> <name><surname>Karar</surname> <given-names>ME</given-names></name></person-group>. <article-title>Covidx-net: a framework of deep learning classifiers to diagnose covid-19 in x-ray images</article-title>. <source>arXiv.</source> (<year>2020</year>) Preprint arXiv:200311055.</citation></ref>
<ref id="B109">
<label>109.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jain</surname> <given-names>G</given-names></name> <name><surname>Mittal</surname> <given-names>D</given-names></name> <name><surname>Thakur</surname> <given-names>D</given-names></name> <name><surname>Mittal</surname> <given-names>MK</given-names></name></person-group>. <article-title>A deep learning approach to detect Covid-19 coronavirus with X-Ray images</article-title>. <source>Biocybernet Biomed Eng</source>. (<year>2020</year>). <pub-id pub-id-type="doi">10.1016/j.bbe.2020.08.008</pub-id><pub-id pub-id-type="pmid">32921862</pub-id></citation></ref>
<ref id="B110">
<label>110.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luz</surname> <given-names>E</given-names></name> <name><surname>Silva</surname> <given-names>PL</given-names></name> <name><surname>Silva</surname> <given-names>R</given-names></name> <name><surname>Moreira</surname> <given-names>G</given-names></name></person-group>. <article-title>Towards an efficient deep learning model for covid-19 patterns detection in x-ray images</article-title>. <source>arXiv</source>. (<year>2020</year>) <volume>31</volume>:<fpage>1</fpage>&#x02013;<lpage>10</lpage>.</citation></ref>
<ref id="B111">
<label>111.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozturk</surname> <given-names>S</given-names></name> <name><surname>Ozkaya</surname> <given-names>U</given-names></name> <name><surname>Barstugan</surname> <given-names>M</given-names></name></person-group>. <article-title>Classification of coronavirus images using shrunken features</article-title>. <source>medRxiv</source>. (<year>2020</year>). <pub-id pub-id-type="doi">10.1101/2020.04.03.20048868</pub-id><pub-id pub-id-type="pmid">32904960</pub-id></citation></ref>
<ref id="B112">
<label>112.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J</given-names></name> <name><surname>Xie</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>Y</given-names></name> <name><surname>Shen</surname> <given-names>C</given-names></name> <name><surname>Xia</surname> <given-names>Y</given-names></name></person-group>. <article-title>Covid-19 screening on chest x-ray images using deep learning based anomaly detection</article-title>. <source>arXiv.</source> (<year>2020</year>) Preprint arXiv:200312338.</citation></ref>
<ref id="B113">
<label>113.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ravishankar</surname> <given-names>H</given-names></name> <name><surname>Sudhakar</surname> <given-names>P</given-names></name> <name><surname>Venkataramani</surname> <given-names>R</given-names></name> <name><surname>Thiruvenkadam</surname> <given-names>S</given-names></name> <name><surname>Annangi</surname> <given-names>P</given-names></name> <name><surname>Babu</surname> <given-names>N</given-names></name> <etal/></person-group>. <article-title>Understanding the mechanisms of deep transfer learning for medical images</article-title>. In: <source>Deep Learning and Data Labeling for Medical Applications</source>: <publisher-name>Springer</publisher-name> (<year>2016</year>). p. <fpage>188</fpage>&#x02013;<lpage>96</lpage>.</citation></ref>
<ref id="B114">
<label>114.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hosny</surname> <given-names>KM</given-names></name> <name><surname>Kassem</surname> <given-names>MA</given-names></name> <name><surname>Foaud</surname> <given-names>MM</given-names></name></person-group>. <article-title>Classification of skin lesions using transfer learning and augmentation with Alex-net</article-title>. <source>PLoS ONE</source>. (<year>2019</year>) <volume>14</volume>:<fpage>e0217293</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0217293</pub-id><pub-id pub-id-type="pmid">31112591</pub-id></citation></ref>
<ref id="B115">
<label>115.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>S</given-names></name> <name><surname>Islam</surname> <given-names>N</given-names></name> <name><surname>Jan</surname> <given-names>Z</given-names></name> <name><surname>Din</surname> <given-names>IU</given-names></name> <name><surname>Rodrigues</surname> <given-names>JJC</given-names></name></person-group>. <article-title>A novel deep learning based framework for the detection and classification of breast cancer using transfer learning</article-title>. <source>Pattern Recogn Lett</source>. (<year>2019</year>) <volume>125</volume>:<fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2019.03.022</pub-id></citation></ref>
<ref id="B116">
<label>116.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>A</given-names></name> <name><surname>Sengupta</surname> <given-names>S</given-names></name> <name><surname>Lakshminarayanan</surname> <given-names>V</given-names></name></person-group>. <article-title>Explainable deep learning models in medical image analysis</article-title>. <source>J Imaging</source>. (<year>2020</year>) <volume>6</volume>:<fpage>52</fpage>. <pub-id pub-id-type="doi">10.3390/jimaging6060052</pub-id></citation></ref>
</ref-list> 
</back>
</article> 