<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cardiovasc. Med.</journal-id>
<journal-title>Frontiers in Cardiovascular Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cardiovasc. Med.</abbrev-journal-title>
<issn pub-type="epub">2297-055X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcvm.2022.981901</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Cardiovascular Medicine</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Learning coronary artery calcium scoring in coronary CTA from non-contrast CT using unsupervised domain adaptation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhai</surname> <given-names>Zhiwei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1888711/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>van Velzen</surname> <given-names>Sanne G. M.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/842712/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lessmann</surname> <given-names>Nikolas</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Planken</surname> <given-names>Nils</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1438982/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Leiner</surname> <given-names>Tim</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>I&#x00161;gum</surname> <given-names>Ivana</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/840376/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Biomedical Engineering and Physics, Amsterdam University Medical Center, Location University of Amsterdam</institution>, <addr-line>Amsterdam</addr-line>, <country>Netherlands</country></aff>
<aff id="aff2"><sup>2</sup><institution>Faculty of Science, Informatics Institute, University of Amsterdam</institution>, <addr-line>Amsterdam</addr-line>, <country>Netherlands</country></aff>
<aff id="aff3"><sup>3</sup><institution>Amsterdam Cardiovascular Sciences, Heart Failure and Arrhythmias</institution>, <addr-line>Amsterdam</addr-line>, <country>Netherlands</country></aff>
<aff id="aff4"><sup>4</sup><institution>Diagnostic Image Analysis Group, Radboud University Medical Center Nijmegen</institution>, <addr-line>Nijmegen</addr-line>, <country>Netherlands</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Radiology and Nuclear Medicine, Amsterdam University Medical Center, Location University of Amsterdam</institution>, <addr-line>Amsterdam</addr-line>, <country>Netherlands</country></aff>
<aff id="aff6"><sup>6</sup><institution>Department of Radiology, Utrecht University Medical Center, University of Utrecht</institution>, <addr-line>Utrecht</addr-line>, <country>Netherlands</country></aff>
<aff id="aff7"><sup>7</sup><institution>Department of Radiology, Mayo Clinic</institution>, <addr-line>Rochester, MN</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Federico Caobelli, University Hospital of Basel, Switzerland</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Alexander Van Rosendael, Leiden University Medical Center (LUMC), Netherlands; Uxio Hermida, Faculty of Life Sciences and Medicine, King&#x00027;s College London, United Kingdom; Hieu Nguyen, Johns Hopkins University, United States</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Zhiwei Zhai <email>z.zhai&#x00040;amsterdamumc.nl</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Cardiovascular Imaging, a section of the journal Frontiers in Cardiovascular Medicine</p></fn></author-notes>
<pub-date pub-type="epub">
<day>12</day>
<month>09</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>9</volume>
<elocation-id>981901</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>06</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>11</day>
<month>08</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2022 Zhai, van Velzen, Lessmann, Planken, Leiner and I&#x00161;gum.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Zhai, van Velzen, Lessmann, Planken, Leiner and I&#x00161;gum</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract>
<p>Deep learning methods have demonstrated the ability to perform accurate coronary artery calcium (CAC) scoring. However, these methods require large and representative training data hampering applicability to diverse CT scans showing the heart and the coronary arteries. Training methods that accurately score CAC in cross-domain settings remains challenging. To address this, we present an unsupervised domain adaptation method that learns to perform CAC scoring in coronary CT angiography (CCTA) from non-contrast CT (NCCT). To address the domain shift between NCCT (source) domain and CCTA (target) domain, feature distributions are aligned between two domains using adversarial learning. A CAC scoring convolutional neural network is divided into a feature generator that maps input images to features in the latent space and a classifier that estimates predictions from the extracted features. For adversarial learning, a discriminator is used to distinguish the features between source and target domains. Hence, the feature generator aims to extract features with aligned distributions to fool the discriminator. The network is trained with adversarial loss as the objective function and a classification loss on the source domain as a constraint for adversarial learning. In the experiments, three data sets were used. The network is trained with 1,687 labeled chest NCCT scans from the National Lung Screening Trial. Furthermore, 200 labeled cardiac NCCT scans and 200 unlabeled CCTA scans were used to train the generator and the discriminator for unsupervised domain adaptation. Finally, a data set containing 313 manually labeled CCTA scans was used for testing. Directly applying the CAC scoring network trained on NCCT to CCTA led to a sensitivity of 0.41 and an average false positive volume 140 mm<sup>3</sup>/scan. The proposed method improved the sensitivity to 0.80 and reduced average false positive volume of 20 mm<sup>3</sup>/scan. The results indicate that the unsupervised domain adaptation approach enables automatic CAC scoring in contrast enhanced CT while learning from a large and diverse set of CT scans without contrast. This may allow for better utilization of existing annotated data sets and extend the applicability of automatic CAC scoring to contrast-enhanced CT scans without the need for additional manual annotations. The code is publicly available at <ext-link ext-link-type="uri" xlink:href="https://github.com/qurAI-amsterdam/CACscoringUsingDomainAdaptation">https://github.com/qurAI-amsterdam/CACscoringUsingDomainAdaptation</ext-link>.</p></abstract>
<kwd-group>
<kwd>coronary artery calcium scoring</kwd>
<kwd>unsupervised domain adaptation</kwd>
<kwd>convolutional neural network (CNN)</kwd>
<kwd>coronary CTA</kwd>
<kwd>adversarial learning</kwd>
</kwd-group>
<counts>
<fig-count count="4"/>
<table-count count="3"/>
<equation-count count="3"/>
<ref-count count="43"/>
<page-count count="12"/>
<word-count count="8589"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1. Introduction</title>
<p>In recent years, deep neural networks have achieved impressive performance on various medical image analysis tasks (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). This success is highly associated with the use of large amounts of representative annotated training data. However, the dependence on such data sets limits the applicability of already trained and well-performing networks to non-representative data sampled from a different distribution, such as images acquired at different sites, on different scanners, and by different acquisition protocols. Hence, generalizing deep neural networks trained on specific data to test data originating from a different domain remains a major challenge.</p>
<p>The domain shift, i.e., differences in data distributions and types of data between training and test domains, can be addressed by unsupervised domain adaptation methods that transfer a model that was trained on the source domain in a supervised manner to the target domain where no labels are available (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>). The common idea of unsupervised domain adaptation methods is to align features extracted by a network between two domains, aiming to generate similar feature distributions for both domains (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>). To achieve this, an adversarial learning strategy can be used. In this case, the generator network is optimized to extract features with similar distribution for the two domains while the discriminator network is trained to distinguish features from these domains (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>Several works have investigated methods for unsupervised approaches to domain shift problem for segmentation of cardiac images (<xref ref-type="bibr" rid="B7">7</xref>&#x02013;<xref ref-type="bibr" rid="B10">10</xref>). Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>) proposed an unsupervised adversarial domain adaptation network to transfer cardiac segmentation network between MRI and CT. In this work the feature distributions of source and target domains were aligned at multiple scales. Chen et al. (<xref ref-type="bibr" rid="B7">7</xref>) extended the work of Dou et al. by aligning the domains in both image and feature perspectives. This method was evaluated with cardiac segmentation and abdominal multi-organ segmentation between MRI and CT. Wu et al. (<xref ref-type="bibr" rid="B10">10</xref>) presented an unsupervised domain adaptation framework to adapt cardiac segmentation between MRI and CT. In this method, a novel distance metric was proposed to calculate the misalignment of feature distributions in latent space and enable explicit domain adaptation.</p>
<p>In this work, we address detection and quantification of coronary artery calcium (CAC scoring) in contrast-enhanced coronary CT angiography (CCTA). Our aim is to exploit large sets of already annotated data in CT scans without contrast enhancement and extend the applicability of CAC scoring to CCTA. Current CAC scoring protocols are performed in a highly standardized manner without injection of iodinated contrast. Coronary artery calcifications are identified as high density areas of &#x02265; 130 Houndsfield Units (HU) in the coronary artery (<xref ref-type="bibr" rid="B11">11</xref>). Manual CAC scoring can be tedious and time-consuming, therefore, automated CAC scoring methods have been proposed (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B13">13</xref>). Recent methods using deep learning have demonstrated accurate performance (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B15">15</xref>). Given that CAC scoring is commonly performed in non-contrast CT (NCCT), automated methods have mostly focused on application in these scans. While earlier methods focused on a single type of NCCT scans (<xref ref-type="bibr" rid="B16">16</xref>&#x02013;<xref ref-type="bibr" rid="B18">18</xref>) recent studies showed that the methods can generalize to diverse types of NCCT data. In a large-scale study containing data of 7,240 subjects, Van Velzen et al. (<xref ref-type="bibr" rid="B19">19</xref>) trained and evaluated a method proposed by Lessmann et al. (<xref ref-type="bibr" rid="B16">16</xref>) with different types of NCCT scans including scans from different hospitals, multiple scanners and multiple image acquisition protocols and demonstrated a good agreement between automated and manual scoring. Subsequently, Zeleznik et al. (<xref ref-type="bibr" rid="B20">20</xref>) demonstrated the robustness of a deep learning system for automated CAC scoring on routine cardiac gated and non-gated NCCT of 20,084 individuals.</p>
<p>In addition to CAC scoring in NCCT, CAC can be quantified in CCTA (<xref ref-type="bibr" rid="B21">21</xref>) and consequently, a number of methods automating the process have been developed (<xref ref-type="bibr" rid="B22">22</xref>&#x02013;<xref ref-type="bibr" rid="B25">25</xref>). In a clinical cardiac CT exam, commonly cardiac NCCT is acquired first to determine the calcium score, which is followed by the acquisition of CCTA to detect presence of non-calcified plaque and stenosis in the coronary arteries. However, the amount of calcified plaque extracted from CCTA scans allows accurate cardiovascular risk stratification (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>). Hence, when the scan without contrast is not available, calcium scoring in CCTA may allow determination of patient&#x00027;s cardiovascular risk and thus allow better utilization of the already acquired data. Furthermore, performing CAC scoring in CCTA could allow omitting acquisition of the NCCT and thereby reduce the radiation dose to the patient and save scan time (<xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>). Coronary artery calcium scoring in CCTA differs substantially from scoring in NCCT as the contrast material enhancing the coronary artery lumen typically exceeds the threshold (130 HU) used for CAC scoring in NCCT. Therefore, automatic methods trained on NCCT are not directly applicable to CCTA scans. Training the deep learning method with extra annotated CCTA data may improve its applicability to CCTA. However, manually annotating a large amount of representative training data is tedious and time consuming. To address this, in this study, we investigate the feasibility of adapting a CAC scoring network trained on a large set of labeled NCCT scans (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B19">19</xref>) to unlabeled CCTA scans using unsupervised domain adaptation. For this, we investigate a cross-domain approach described by Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>) to enable CAC scoring in CCTA without annotations while utilizing NCCT with available manual annotations.</p></sec>
<sec sec-type="materials" id="s2">
<title>2. Materials</title>
<sec>
<title>2.1. Image data</title>
<p>This study includes three data sets. First, a data set of <italic>labeled</italic> low-dose chest NCCT scans from the National Lung Screening Trail (NLST) was used. The NLST enrolled 53,454 current or former heavy smokers aged 55&#x02013;74 in the United States (<xref ref-type="bibr" rid="B26">26</xref>). In our previous study, a set of 1,687 baseline chest NCCT scans was selected (<xref ref-type="bibr" rid="B16">16</xref>). This set was designed to be diverse with respect to scanner model and reconstruction algorithm. The selected scans were acquired on 13 different scanner models in 31 hospitals. These chest NCCT scans were acquired with breath hold after inspiration and using a tube voltage 120 or 140 kVp, depending on the subjects weight. Scans were reconstructed to 0.49&#x02013;0.98 mm in-plane resolution, 1&#x02013;3 mm slice thickness, and 0.6&#x02013;3 mm increment. For our work, all scans were resampled to 3 mm slice thickness and 1.5 mm increment, following earlier studies (<xref ref-type="bibr" rid="B16">16</xref>).</p>
<p>Second, a mixed set of <italic>labeled</italic> cardiac NCCT and <italic>unlabeled</italic> CCTA scans was used. Specifically, 200 labeled cardiac NCCT scans were acquired in clinical patient workup at University Medical Center Utrecht, The Netherlands (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B27">27</xref>) and 200 unlabeled CCTA scans were acquired at Amsterdam University Medical Center location University of Amsterdam, The Netherlands. The cardiac NCCT scans were acquired with a Philips Brilliance iCT 256 scanner, with ECG synchronization and 120 kVp tube voltage. Scans were reconstructed to 0.29&#x02013;0.49 mm in-plane resolution, 3 mm slice thickness, and 1.5 increment. The CCTA scans were acquired with a Siemens Somatom Force CT Scanner, with ECG synchronization and 70&#x02013;120 kVp tube voltage. Scans were reconstructed to 0.22&#x02013;0.46 mm in-plane resolution, 0.6 mm slice thickness, and 0.4 mm increment.</p>
<p>Third, a data set of <italic>labeled</italic> 313 CCTA scans from Amsterdam University Medical Center location University of Amsterdam, The Netherlands was used to evaluate the CAC detection on the target domain (CCTA test set). These CCTA scans were acquired with the Siemens Somatom Force CT Scanner, with ECG synchronization and 70&#x02013;120 kVp tube voltage. Scans were reconstructed to 0.19&#x02013;0.77 mm in-plane resolution, 0.6&#x02013;1 mm slice thickness, and 0.4 mm increment.</p>
</sec>
<sec>
<title>2.2. Manual reference annotations</title>
<p>Manual reference labels of CAC were available from previous studies for the low-dose chest NCCT scans in the NLST data set(<xref ref-type="bibr" rid="B16">16</xref>) and the cardiac NCCT in the mixed set (<xref ref-type="bibr" rid="B19">19</xref>). The labeling was performed semi-automatically: all regions of &#x02265; 3 adjacent voxels with a CT value above 130 HU were shown as overlay. An observer manually identified lesions and labeled them according to their anatomical location, i.e., left anterior descending artery (LAD), left circumflex artery (LCX), or right coronary artery (RCA) (<xref ref-type="bibr" rid="B19">19</xref>). Given that chest CT without ECG synchronization does not allow visualization of the left main (LM) artery, CAC in the LM was labeled as LAD. Examples of chest NCCT slices and manual reference annotations are shown in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S1</xref>.</p>
<p>For the 200 CCTA scans in the mixed set, reference labels of CAC were not available. Hence, for the CCTA scans from the CCTA test set, CAC was manually annotated with a semi-automated method as either LAD, LCX, or RCA. This was done using an in-house developed software designed in MevisLab 3.2 (<xref ref-type="bibr" rid="B28">28</xref>). In agreement with manual labeling in NCCT, CAC in the LM was labeled as LAD. Because the standard 130 HU threshold for CAC detection in NCCT can not be used in CCTA, we used scan specific thresholds, following earlier studies (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B29">29</xref>). For this, a region of interest (ROI) defined by a bounding box with a size around 35 &#x000D7; 36 &#x000D7; 44 voxels in the ascending aorta at the level of the origin of the left coronary artery was manually selected. Subsequently, the mean <italic>mean</italic><sub><italic>ROI</italic></sub> and standard deviation <italic>STD</italic><sub><italic>ROI</italic></sub> from the CT values of the voxels within the ROI were used to compute a scan specific threshold <italic>mean</italic><sub>ROI</sub>&#x0002B;3<italic>STD</italic><sub>ROI</sub>. Using this threshold, each coronary artery calcification was manually identified by a mouse click on the lesion. Subsequently, all connected voxels in the lesion above the scan specific threshold were marked as CAC in LAD, LCX, or RCA using 3D connected component labeling considering six-voxel connectivity. Examples of CCTA slices and manual reference annotations are shown in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S2</xref>.</p>
<p>In this study, NCCT scans (both chest and cardiac) are considered the source domain and CCTA scans are representing the target domain. The NCCT scans with CAC annotations from the NLST data set were used to train the CAC detection network on the source domain. The mixed set of labeled cardiac NCCT (source domain) and unlabeled CCTA (target domain) was used to train our unsupervised domain adaptation method. The labeled CCTA scans (target domain) in the CCTA test set were only used to evaluate the CAC detection on the target domain. The description of data sets and their usage are illustrated in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Description of data and corresponding usage.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>Scan type</bold></th>
<th valign="top" align="left"><bold>&#x00023;Scans</bold></th>
<th valign="top" align="left"><bold>Reference</bold></th>
<th valign="top" align="left"><bold>Domain</bold></th>
<th valign="top" align="left"><bold>Usage</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Chest NCCT</td>
<td valign="top" align="left">1,687</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">Source</td>
<td valign="top" align="left">Training CAC scoring on source domain</td>
</tr>
<tr>
<td valign="top" align="left">Cardiac NCCT</td>
<td valign="top" align="left">200</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">Source</td>
<td valign="top" align="left">Training unsupervised domain adaptation</td>
</tr>
<tr>
<td valign="top" align="left">CCTA</td>
<td valign="top" align="left">200</td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">Target</td>
</tr>
<tr>
<td valign="top" align="left">CCTA</td>
<td valign="top" align="left">313</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">Target</td>
<td valign="top" align="left">Testing CAC scoring on target domain</td>
</tr>
</tbody>
</table>
</table-wrap></sec></sec>
<sec sec-type="methods" id="s3">
<title>3. Methods</title>
<p>A CNN is used for detecting CAC candidates in CCTA scans that is followed by false positive (FP) reduction, as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The CNN, which is trained on labeled NCCT data is adapted for application in CCTA using unsupervised domain adaptation. False positive reduction is performed by limiting the detected lesions to plausible CAC location and size.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Overview of the proposed method for coronary artery calcium (CAC) detection in CCTA. The CNN for CAC detection is divided into a feature generator and a classifier. The feature generator is trained on source domain and is adapted to the target domain using unsupervised domain adaptation. The classifier in the target domain is reused from the source domain. After detection of CAC candidates using the CNN, false positive (FP) reduction is applied to remove FP detections.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcvm-09-981901-g0001.tif"/>
</fig>
<sec>
<title>3.1. CAC detection in CCTA with unsupervised domain adaptation</title>
<p>Unsupervised domain adaptation aims to transfer a model trained with data from a source domain with labels <inline-formula><mml:math id="M3"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msub></mml:math></inline-formula> to a target domain without labels <inline-formula><mml:math id="M4"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msub></mml:math></inline-formula>, where <inline-formula><mml:math id="M5"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:math></inline-formula> represents domain, <italic>X</italic> represents images and <italic>Y</italic> represents labels. As proposed by Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>), we use an adversarial training strategy to adapt the CNN to the target domain. In our application, a large set of chest NCCT scans with CAC labels is available, and hence, we aim to transfer the knowledge from NCCT to CCTA for CAC scoring. Therefore, the CAC scoring CNN trained with <italic>labeled</italic> low-dose chest NCCT scans is transfered to CCTA using adversarial domain adaptation.</p>
<p>We used our previous CAC scoring method described by Lessmann et al. (<xref ref-type="bibr" rid="B16">16</xref>) that has been trained and evaluated with a large set of low-dose chest NCCT data. The method consists of two sequential convolutional neural networks (CNN). The first CAC scoring CNN detects CAC candidates and labels them according to their anatomical location, i.e., as CAC in LAD, LCX, or RCA. The second CNN reduces the number of false positive detections. In our current work, only the first CNN is used to transfer knowledge obtained by training the network with NCCT to enable application in CCTA data using unsupervised domain adaptation.</p>
<p>To adapt the CAC detection network(<xref ref-type="bibr" rid="B16">16</xref>) from the source domain to the unlabeled target domain, we aim to align the distributions of extracted features from the two domains following the work by Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>). For this, we divide the CAC detection network into two parts: a feature generator <italic>G</italic>(&#x000B7;) and a classifier <italic>C</italic>(&#x000B7;), as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The <italic>G</italic>(&#x000B7;) maps input images into feature representations in the latent space and the <italic>C</italic>(&#x000B7;) predicts the output class from the feature representations. The early layers of the network which are used for feature extraction are mostly related to the domain, while the deeper layers are mostly task-specific and learn semantic-level features for conducting the predictions (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B30">30</xref>). Hence, we adapt the feature generator <italic>G</italic>(&#x000B7;) trained with NCCT to enable application in CCTA with adversarial domain adaptation, and we reuse the classifier <italic>C</italic>(&#x000B7;) as originally trained.</p>
<p>To enable adversarial learning, we design a discriminator <italic>D</italic>(&#x000B7;) to identify whether the features are from the source domain or the target domain. While the feature generator <italic>G</italic>(&#x000B7;) aims to extract features with similar distributions for both domains, the <italic>D</italic>(&#x000B7;) discriminates between the two domains (<xref ref-type="fig" rid="F2">Figure 2</xref>). The adversarial loss based on the differences in feature distribution between the two domains is formulated as:</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M6"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msub><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>G</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msub><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>G</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>G</italic>(&#x000B7;) is optimized to minimize the adversarial loss, and <italic>D</italic>(&#x000B7;) is optimized to maximize the same loss. The generator <italic>G</italic>(&#x000B7;) is optimized based on the objective function calculated from the discriminator <italic>D</italic>(&#x000B7;), which can lead to an incorrect optimization forgetting the classification task. That means the features extracted by the trained <italic>G</italic>(&#x000B7;) can fool the <italic>D</italic>(&#x000B7;). However, these features are not beneficial for the final classification task <italic>C</italic>(<italic>G</italic>(&#x000B7;)). For cross domain learning with <italic>paired</italic> data, the alignment loss in feature space, such as <italic>L</italic>1(<italic>G</italic>(<italic>x</italic><sub><italic>s</italic></sub>), <italic>G</italic>(<italic>x</italic><sub><italic>t</italic></sub>)) or <italic>L</italic>2(<italic>G</italic>(<italic>x</italic><sub><italic>s</italic></sub>), <italic>G</italic>(<italic>x</italic><sub><italic>t</italic></sub>)), can be used as a constraint for the generator optimization (<xref ref-type="bibr" rid="B31">31</xref>). For cross-domain learning with <italic>unpaired</italic> training data as in our case, such an alignment loss in feature space can not be used as a constraint for the generator optimization. In this work, the images were not registered to a common space either. Instead, as proposed in the work by Chen et al. (<xref ref-type="bibr" rid="B4">4</xref>), we use a classification loss in the source domain <inline-formula><mml:math id="M7"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> as constraint to stabilize the training and avoid catastrophic forgetting.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Unsupervised domain adaptation with <italic>unpaired</italic> data is performed using an adversarial learning strategy. The discriminator is optimized to distinguish the features from NCCT (source) domain and CCTA (target) domain. The generator is trained to extract features with similar distributions for the two domains. The blue dots in latent space represent features from the source domain, the orange ones from the target domain. The <inline-formula><mml:math id="M1"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is used as the objective function and the <inline-formula><mml:math id="M2"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is used as a constraint, which is determined on the source domain using the classifier.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcvm-09-981901-g0002.tif"/>
</fig>
<p>The classification loss is formulated as:</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="right center left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi><mml:mi>E</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>C</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>G</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>L</italic><sub><italic>CE</italic></sub> is the cross-entropy loss, <italic>X</italic><sub><italic>s</italic></sub> and <italic>Y</italic><sub><italic>s</italic></sub> are the images and the corresponding reference labels on the source domain. During training, the <italic>D</italic>(&#x000B7;) is trained to maximize the objective of <inline-formula><mml:math id="M9"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, while the <italic>G</italic>(&#x000B7;) is optimized to minimize the objective of <inline-formula><mml:math id="M10"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="M11"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. These are formulated as:</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M12"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:msubsup><mml:mrow></mml:mrow><mml:mrow><mml:mtext>&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mi>max</mml:mi><mml:msub><mml:mi>&#x02112;</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msubsup><mml:mrow></mml:mrow><mml:mrow><mml:mtext>&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>min</mml:mi><mml:msub><mml:mi>&#x02112;</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:msub><mml:mi>&#x02112;</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msubsup></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003B1; is a hyper-parameter for balancing the two loss terms. It is set to 2.0 in this work, based on a grid search strategy.</p>
</sec>
<sec>
<title>3.2. FP reduction</title>
<p>To identify CAC lesions, 3D connected component labeling is performed from the detected voxels and the scan specific threshold (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B29">29</xref>). To remove potential false positive detections, detected lesions smaller than 1 mm<sup>3</sup> are discarded as those are likely noise voxels. Similarly, detected lesions larger than 500 mm<sup>3</sup> are discarded as those exceed the expected CAC volume (<xref ref-type="bibr" rid="B27">27</xref>). In addition, lesions detected outside the heart are discarded. For this, the heart volume is defined by segmentation of cardiac chambers, as described by Bruns et al. (<xref ref-type="bibr" rid="B32">32</xref>) which was trained with CCTA scans of 12 patients scanned for transcatheter aortic valve implantation (SOMATOM Force, Siemens, 70&#x02013;120 kVp, 310&#x02013;628 mAs, in-plane resolution 0.31&#x02013;0.61 mm, slice thickess 0.31&#x02013;0.61 mm, slice increment 0.45 mm). No additional changes or fine tuning for the data in this current study was performed. Subsequently, the segmentation of cardiac chambers was dilated by a sphere as a structuring element with diameter of 10.0 mm to ensure the heart wall and coronary arteries are included in the segmentation.</p>
</sec>
<sec>
<title>3.3. Evaluation</title>
<p>To evaluate the performance of CAC scoring on CCTA, the volume-wise and lesion-wise performance was determined by comparing automatically detected CAC with the manually annotated reference. Since the typically used Agatston score (<xref ref-type="bibr" rid="B11">11</xref>) is not applicable for CAC quantification in CCTA, the volume score was used. The evaluation was performed for total CAC and separately for CAC in LAD, LCX, and RCA. Both the volume-wise and lesion-wise performance was evaluated using sensitivity, false-positive (FP) rate, and F1 score (<xref ref-type="bibr" rid="B16">16</xref>). The agreement of calcium volume and number of lesions between the automatic detection and the reference labels was determined with Spearman correlation coefficients. Finally, the agreement between automatic volume scores and manual reference volume scores was assessed by examining Bland-Altman plots including 95% limits of agreement. Since errors tend to increase with increasing CAC volume, the variation of absolute differences between automatic and manual scores was modeled using regression for nonuniform differences(<xref ref-type="bibr" rid="B33">33</xref>). Because the absolute differences have a half-normal distribution, the modeled absolute differences were multiplied by 1.96 &#x000D7; (&#x003C0;/2)<sup>0.5</sup> to obtain the 95% limits of agreement.</p></sec></sec>
<sec id="s4">
<title>4. Experiments and results</title>
<sec>
<title>4.1. CAC scoring on CCTA</title>
<p>First, we retrained the two-stage CNNs for CAC detection (<xref ref-type="bibr" rid="B16">16</xref>) with the <italic>labeled</italic> chest NCCT data as the source domain. For this, the 1,687 NCCT scans in the NLST data set were randomly divided into 60% training set (1,012 scans), 10% validation set (169 scans), and 30% test set (506 scans). As originally reported (<xref ref-type="bibr" rid="B16">16</xref>), during the training, categorical cross-entropy was used as loss function, Adam was used as optimizer with a learning rate of 5 &#x000D7; 10<sup>&#x02212;4</sup>. The first CNN was trained with three orthogonal (axial, sagittal and coronal) patches of 155&#x000D7;155 pixels and the second CNN with three orthogonal patches of 65&#x000D7;65 pixels (<xref ref-type="bibr" rid="B16">16</xref>). Randomized patch extraction was used as augmentation for training.</p>
<p>Next, to stabilize adversarial training in the unsupervised domain adaptation, the generator was initialized with the weights of the CAC scoring model trained with the chest NCCT data from the NLST dataset. The unsupervised domain adaptation method was trained with the mixed dataset of <italic>labeled</italic> cardiac NCCT data from source domain and <italic>unlabeled</italic> CCTA data from target domain. When performing unsupervised domain adaptation with mixed data containing <italic>labeled</italic> cardiac NCCT and <italic>unlabeled</italic> CCTA scans the method achieved sensitivity of 0.78 in CCTA (<xref ref-type="table" rid="T2">Table 2</xref>). For comparison, the sensitivity of 0.53 was achieved when unsupervised domain adaptation was performed with mixed data containing <italic>labeled</italic> chest NCCT and <italic>unlabeled</italic> CCTA scans. <italic>Labeled</italic> cardiac NCCT data was chosen because these scans resemble CCTA scans more than chest NCCT. <italic>Unlabeled</italic> CCTA were used as <italic>unlabeled</italic> data from the target domain. To obtain a reliable discriminator, the discriminator was solely pretrained for 1,000 iterations first. Thereafter, the generator and discriminator were optimized together by training alternately. Specifically, the generator was optimized one iteration after every 20 iterations of the discriminator, according to the heuristic rules of training a Wasserstein GAN (<xref ref-type="bibr" rid="B34">34</xref>). Following the standard for adversarial training (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B35">35</xref>), the discriminator was kept in a compact space. To enforce this constraint, the weights were clipped between [&#x02212;0.1, 0.1]. The RMSProp optimizer was used to optimize the discriminator with a learning rate of 5 &#x000D7; 10<sup>&#x02212;4</sup>, and the generator with a learning rate of 5 &#x000D7; 10<sup>&#x02212;5</sup>, respectively (<xref ref-type="bibr" rid="B36">36</xref>). The optimal hyperparameters were determined by grid search. The adversarial learning was trained for 200 epoch. The networks were implemented in PyTorch (<xref ref-type="bibr" rid="B37">37</xref>). All the training was trained on NVIDIA GeForce RTX 2080 Ti.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Results of the automatic CAC scoring evaluated by volume-wise sensitivity, FP volume per scan, and F1-score between automatic detection and manual reference.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th/>
<th valign="top" align="left"><bold>NCCT [506]</bold></th>
<th valign="top" align="left" colspan="4"><bold>CCTA [313]</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td/>
<td valign="top" align="left"><inline-formula><mml:math id="M13"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula></td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02717;</td>
</tr>
<tr>
<td/>
<td valign="top" align="left"><inline-formula><mml:math id="M14"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula></td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02717;</td>
</tr>
<tr style="border-bottom: thin solid #000000;">
<td/>
<td valign="top" align="left">FP reduction</td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02713;</td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02717;</td>
<td valign="top" align="left">&#x02717;</td>
</tr> <tr>
<td valign="top" align="left">CAC</td>
<td valign="top" align="left">Sensitivity</td>
<td valign="top" align="left">0.89 (0.25)</td>
<td valign="top" align="left">0.80 (0.32)</td>
<td valign="top" align="left">0.78 (0.33)</td>
<td valign="top" align="left">0.68 (0.38)</td>
<td valign="top" align="left">0.41 (0.48)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">FP volume/scan</td>
<td valign="top" align="left">73.6 (141)</td>
<td valign="top" align="left">19.8 (60.6)</td>
<td valign="top" align="left">64.5 (150)</td>
<td valign="top" align="left">25.8 (70)</td>
<td valign="top" align="left">132 (205)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">F1</td>
<td valign="top" align="left">0.66 (0.37)</td>
<td valign="top" align="left">0.66 0.38</td>
<td valign="top" align="left">0.41 (0.40)</td>
<td valign="top" align="left">0.49 (0.41)</td>
<td valign="top" align="left">0.16 (0.36)</td>
</tr>
<tr>
<td valign="top" align="left">LAD</td>
<td valign="top" align="left">Sensitivity</td>
<td valign="top" align="left">0.92 (0.21)</td>
<td valign="top" align="left">0.89 (0.27)</td>
<td valign="top" align="left">0.86 (0.28)</td>
<td valign="top" align="left">0.79 (0.33)</td>
<td valign="top" align="left">0.47 (0.48)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">FP volume/scan</td>
<td valign="top" align="left">31.6 (79.6)</td>
<td valign="top" align="left">13.9 (45.5)</td>
<td valign="top" align="left">44.5 (118)</td>
<td valign="top" align="left">20.2 (54.4)</td>
<td valign="top" align="left">55.8 (90.5)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">F1</td>
<td valign="top" align="left">0.79 (0.34)</td>
<td valign="top" align="left">0.74 (0.37)</td>
<td valign="top" align="left">0.48 (0.42)</td>
<td valign="top" align="left">0.56 (0.42)</td>
<td valign="top" align="left">0.24 0.41</td>
</tr>
<tr>
<td valign="top" align="left">LCX</td>
<td valign="top" align="left">Sensitivity</td>
<td valign="top" align="left">0.88 (0.29)</td>
<td valign="top" align="left">0.74 (0.44)</td>
<td valign="top" align="left">0.71 (0.45)</td>
<td valign="top" align="left">0.71 (0.46)</td>
<td valign="top" align="left">0.66 (0.48)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">FP volume/scan</td>
<td valign="top" align="left">19.7 (55.6)</td>
<td valign="top" align="left">0.13 (1.13)</td>
<td valign="top" align="left">0.17 (1.01)</td>
<td valign="top" align="left">0.02 (0.31)</td>
<td valign="top" align="left">1.60 (0.30)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">F1</td>
<td valign="top" align="left">0.67 (0.42)</td>
<td valign="top" align="left">0.74 (0.44)</td>
<td valign="top" align="left">0.69 (0.46)</td>
<td valign="top" align="left">0.70 (0.46)</td>
<td valign="top" align="left">0.66 (0.48)</td>
</tr>
<tr>
<td valign="top" align="left">RCA</td>
<td valign="top" align="left">Sensitivity</td>
<td valign="top" align="left">0.89 (0.26)</td>
<td valign="top" align="left">0.87 (0.30)</td>
<td valign="top" align="left">0.87 (0.31)</td>
<td valign="top" align="left">0.80 (0.38)</td>
<td valign="top" align="left">0.67 (0.47)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">FP volume/scan</td>
<td valign="top" align="left">30.1 (73.4)</td>
<td valign="top" align="left">6.80 (35.6)</td>
<td valign="top" align="left">21.3 (78.1)</td>
<td valign="top" align="left">6.64 (35.6)</td>
<td valign="top" align="left">77.6 (157)</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">F1</td>
<td valign="top" align="left">0.65 (0.42)</td>
<td valign="top" align="left">0.73 (0.41)</td>
<td valign="top" align="left">0.52 (0.46)</td>
<td valign="top" align="left">0.68 (0.44)</td>
<td valign="top" align="left">0.31 (0.46)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The method with different settings (using adversarial loss and classification loss in the CAC detection network, and false positive reduction stage) is tested on chest NCCT data and CCTA data. FP volume/scan is given in mm<sup>3</sup>.</p>
<p>The results are shown as average (standard deviation) for total CAC as well as for LAD, LCX, and RCA separately. <inline-formula><mml:math id="M15"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, adversarial loss; <inline-formula><mml:math id="M16"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, classification loss; CAC, coronary artery calcification; LAD, left anterior descending artery; LCX, left circumflex artery; RCA, right coronary artery.</p>
</table-wrap-foot>
</table-wrap>
<p>To establish the performance of the CNN adapted from NCCT to CCTA, the network was evaluated with the 313 labeled CCTA test scans. The adapted CNN obtained an average volume-wise sensitivity of 0.78, an average FP volume per scan of 73.9 mm<sup>3</sup> and an F1-score of 0.41. After the FP reduction, the proposed method achieved an average volume-wise sensitivity of 0.80 with an average FP volume per scan of 19.8 mm<sup>3</sup>, and F1 of 0.66. There were 36 patients without CAC but with FP detected by the proposed method, with an average FP volume per scan of 40 mm<sup>3</sup>. The Spearman correlation between automatically detected and reference CAC volume was 0.73. The Bland-Altman plots comparing automatically detected CAC volume with manually annotated reference are illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Bland-Altman plots comparing automatically detected CAC volume with the manual reference volume. 95% limits of agreement are represented by the formula: <italic>Difference</italic> &#x0003D; &#x000B1;1.96 &#x000D7; (&#x003C0;/2)<sup>0.5</sup>&#x000D7;(<italic>b</italic>&#x0002B;<italic>a</italic>&#x000D7;<italic>Mean</italic><sup>0.5</sup>), with <italic>a</italic> &#x0003D; 10.9 and <italic>b</italic> &#x0003D; &#x02212;17.8. Two outlier cases are colored orange. The Bland-Altman plot of lesions with volume less than 150 mm<sup>3</sup> is shown on the left and all lesions is shown on the right.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcvm-09-981901-g0003.tif"/>
</fig>
<p>Coronary CT angiography slices and corresponding automatic CAC detections for two outliers cases (marked orange in <xref ref-type="fig" rid="F3">Figure 3</xref>) are shown in <xref ref-type="fig" rid="F4">Figures 4a</xref>,<xref ref-type="fig" rid="F4">b</xref>. In addition, two representative cases from the labeled CCTA test set are shown in <xref ref-type="fig" rid="F4">Figures 4c</xref>,<xref ref-type="fig" rid="F4">d</xref>. For lesion-wise evaluation, the proposed method achieved an average sensitivity of 0.79 and FP lesion per scan of 1.06. The correlation between the number of automatically detected and manually annotated reference lesions was 0.69.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Automated CAC detection results in CCTA scans of four patients. The images in the first row show CCTA slices and the detected CACs are shown as overlay in the second row. Panels <bold>(a)</bold> and <bold>(b)</bold> illustrate the two largest outliers shown by orange dots in <xref ref-type="fig" rid="F3">Figure 3</xref>, and false negative CAC are indicated by orange circles. Panels <bold>(c)</bold> and <bold>(d)</bold> show two cases with correct automatic CAC detections.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcvm-09-981901-g0004.tif"/>
</fig>
</sec>
<sec>
<title>4.2. Ablation study</title>
<p>To establish whether our retraining of the original CAC scoring network on the source domain led to adequate performance, the CAC scoring network was evaluated on NLST test set (Section CAC scoring on CCTA) and compared with the originally reported results (<xref ref-type="bibr" rid="B16">16</xref>). Results are listed in <xref ref-type="table" rid="T2">Table 2</xref> (column 3 showing NCCT results). Our retained network obtained a sensitivity of 0.89, an average FP volume of 73.6 mm<sup>3</sup> per scan and F1 of 0.66. The sensitivity is in agreement with the results (0.84 - 0.91) reported in the original work (<xref ref-type="bibr" rid="B16">16</xref>), while the originally reported FP rate (40.7&#x02013;62.8 mm<sup>3</sup>) and therefore F1 (0.84&#x02013;0.89) slightly outperform our results.</p>
<p>To evaluate the performance of the two-stage CAC scoring networks trained on NCCT to CCTA, the trained CNNs was directly applied to CCTA test scans without adversarial domain adaptation learning. This led to an average sensitivity of 0.41, an average FP volume per scan of 139.7 mm<sup>3</sup>, and F1 of 0.16 (<xref ref-type="table" rid="T2">Table 2</xref>, column 7 showing the CCTA results). Subsequently, adding FP reduction led to an average sensitivity of 0.43, an average FP volume of 0.58 mm<sup>3</sup> and F1 of 0.41. Note that FP reduction stage slightly improved the sensitivity as the region-growing algorithm (<xref ref-type="bibr" rid="B38">38</xref>) used to define the lesions from the voxels detected by the CNN may improve lesion segmentation and lead to better agreement with manual reference that used the region-growing algorithm to define CAC lesions.</p>
<p>To investigate the benefit of using the adversarial loss and classification loss for domain adaptation, and FP reduction, additional experiments were performed. The proposed method obtained a volume-wise sensitivity of 0.80, average FP volume per scan of 19.8 mm<sup>3</sup>, and F1 of 0.66. Without FP reduction, the volume-wise sensitivity decreased to 0.78, average FP volume per scan increased to 64.5 mm<sup>3</sup> and consequently, F1 score decreased to 0.41. Furthermore, removing the classification loss <inline-formula><mml:math id="M17"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> from the objective function resulted in the volume-wise sensitivity of 0.68, average FP volume per scan of 25.8 mm<sup>3</sup>, and F1 of 0.49. Finally, as described above, removing the adversarial loss <inline-formula><mml:math id="M18"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">L</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> (i.e., without adversarial domain adaptation learning) led to sensitivity of 0.41, FP volume of 139.7 mm<sup>3</sup> per scan, and F1 of 0.16. Detailed results are listed in <xref ref-type="table" rid="T2">Table 2</xref> columns 4&#x02013;7.</p>
</sec>
<sec>
<title>4.3. Comparison with previous work</title>
<p>The performance of the proposed method was compared with previously published methods that use deep learning for CAC scoring in CCTA scans (<xref ref-type="bibr" rid="B22">22</xref>&#x02013;<xref ref-type="bibr" rid="B25">25</xref>). Wolterink et al. (<xref ref-type="bibr" rid="B25">25</xref>) proposed a method that employed paired CNNs for CAC scoring. The first CNN was used to identify CAC-like voxels and the second CNN was used to reduce CAC-like negatives. Fischer et al. (<xref ref-type="bibr" rid="B22">22</xref>) proposed a method that firstly detected the coronary artery centerlines and then identified CAC in cross-sectional images along the detected centerlines using long short-term memory (LSTM). In the study by Liu et al. (<xref ref-type="bibr" rid="B23">23</xref>), a vessel focused 3D CNN was proposed for CAC detection. The coronary arteries were firstly extracted and straightened volumes were reformed along the coronary arteries. Thereafter, a CNN was used for CAC detection. The results as reported in the original work are listed in <xref ref-type="table" rid="T3">Table 3</xref>. These demonstrate that our unsupervised method achieved competitive performance. Given that the original implementations of these earlier studies are not publicly available, the compared methods the results should be used as indication only.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comparison with previously published results on automated coronary artery calcium scoring on CCTA.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th/>
<th/>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>Lesion-wise evaluation</bold></th>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>Volume-wise evaluation</bold></th>
</tr>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center"><bold>&#x00023; train</bold></th>
<th valign="top" align="center"><bold>&#x00023; test</bold></th>
<th valign="top" align="center"><bold>Sensitivity</bold></th>
<th valign="top" align="center"><bold>FP lesion</bold></th>
<th valign="top" align="center"><bold>F1</bold></th>
<th valign="top" align="center"><bold>Sensitivity</bold></th>
<th valign="top" align="center"><bold>FP volume</bold></th>
<th valign="top" align="center"><bold>F1</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Wolterink et al. (<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="top" align="center">150</td>
<td valign="top" align="center">100</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
</tr>
<tr>
<td valign="top" align="left">Liu et al. (<xref ref-type="bibr" rid="B23">23</xref>)</td>
<td valign="top" align="center">80</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">0.83</td>
</tr>
<tr>
<td valign="top" align="left">Fischer et al. (<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="top" align="center">232</td>
<td valign="top" align="center">194</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center">0.20</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">&#x02013;</td>
</tr>
<tr>
<td valign="top" align="left">Ours</td>
<td valign="top" align="center">&#x02013;</td>
<td valign="top" align="center">313</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">1.06</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">19.8</td>
<td valign="top" align="center">0.66</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The number of labeled CCTA scans used for training (&#x00023; train) and testing (&#x00023; test) are listed. Performance [sensitivity, false positives (FP) per scan and F1-score] using CAC lesions and volume are given.</p>
</table-wrap-foot>
</table-wrap>
</sec></sec>
<sec sec-type="discussion" id="s5">
<title>5. Discussion</title>
<p>In this work, we have utilized an unsupervised domain adaptation method described by Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>) employing a CNN architecture which enables CAC scoring in CCTA while learning from annotated non-representative CT scans without contrast and representative CCTA without reference annotations. For this, the first-stage CNN as previously designed by Lessmann et al. for CAC scoring (<xref ref-type="bibr" rid="B16">16</xref>) is divided into a feature generator and a classifier. The feature generator is adapted from NCCT to CCTA through adversarial unsupervised domain adaptation and the classifier trained on NCCT is reused. An adversarial loss and classification loss on source domain are used as the objective function. The results demonstrate that the method achieves a competitive performance.</p>
<p>Like previous methods for automatic calcium scoring, our method consists of two distinct stages. In the first stage, a CNN for CAC detection and labeling in non-contrast chest CT from previous work (<xref ref-type="bibr" rid="B16">16</xref>) is adjusted for the CAC scoring in CCTA. The ablation study showed that our retraining of the CAC detection CNN did not lead to the same performance reported in the original manuscript (<xref ref-type="bibr" rid="B16">16</xref>). However, there are several differences. First, although training and test scans originate from the same set, exact division on the scans into training and test set differs. Second, the original work reported results separately for sharp and soft kernel CT reconstructions, while we did not distinguish between these. Like in the original work, a second stage is used to reduce the number of false positives. Using the described approach for CAC scoring in CCTA, simple image processing (restricting allowed volume of CAC, limiting the analysis to the volume of interest) substantially reduced false positive detections. Nevertheless, retrospective analysis showed that occasionally false positives remain inside heart and in the coronary arteries with high HU value. Visual analysis of the results showed small false positive detections in the distal RCA representing contrast material. This is also reflected in the limited Spearman correlation coefficient between the detected and reference lesions. This might be due to the varying contrast levels of CCTA, where parts of the coronary artery lumen had a very high HU value. Likely, locally defined threshold for the extraction of CAC would alleviate the problem. Future research should investigate whether this would would benefit the overall performance. In few cases false positive detections were representing extra-coronary calcifications. Those were aortic calcification in the vicinity of the coronary ostia or calcifications in the aortic valves, which is not uncommon to automatic calcium scoring methods(<xref ref-type="bibr" rid="B19">19</xref>).</p>
<p>Retrospective analysis of the outliers shown in <xref ref-type="fig" rid="F3">Figures 3</xref> and <xref ref-type="fig" rid="F4">4</xref> showed that in one case, a large CAC in the RCA (625 mm<sup>3</sup>) was detected by the CNN but removed in the FP reduction stage because its volume exceeded the maximum expected CAC volume. In the other case, large CAC in LCX (313 mm<sup>3</sup>) was not detected by the CNN. In our training set, median (Q1, Q3) CAC was 7.1 (1.6, 29.2) mm<sup>3</sup> and 95th percentile was 188 mm<sup>3</sup>. This shows that the volumes of our false negatives substantially exceeded CAC examples in the training set. Adding examples of large CAC lesions in the training set or learning specifically focused on rare CAC examples might improve the performance.</p>
<p>To train the CNN for detection and labeling of CAC, three different data sets were used. First, we reused the CNN trained on a large set of labeled chest CTs without contrast enhancement. To achieve unsupervised domain adaptation, non-representative labeled cardiac CT without contrast and representative unlabeled CCTA were used. Future work could investigate the optimal size of each set and the optimal way of injecting different data into the training, e.g., training the CNN with different non-contrast CT scan types, refinement with specific data or introducing different data in the domain adaptation stage.</p>
<p>To make the cross domain training stable with unpaired data, the classification loss on the source domain was used. For cross domain learning with paired data, a feature-wise loss could be used (<xref ref-type="bibr" rid="B31">31</xref>). Given that we don&#x00027;t have paired data or register the images to a common space, this kind of loss is not applicable in our study. In our work, the feature generator was adapted from source domain to target domain, however, the classifier was directly reused. This could be done even though the input images to feature generator are from different domains because the classifier performs the same task with aligned feature distributions.</p>
<p>To transfer the knowledge of CAC detection from NCCT to CCTA, unsupervised domain adaptation was used. When a limited set of annotated training data from the target domain is available, it is common to pretrain the network with labeled data from the source domain and fine-tune the network with this small set (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B39">39</xref>). In our case, annotated training data from the target domain is not available and unsupervised domain adaptation allows the training with labeled data from the source domain and unlabelled data from the target domain. Future work could investigate whether a small set of annotated images from the target domain may benefit the performance, possibly also by combining transfer learning approaches with unsupervised domain adaptation.</p>
<p>In this study, following the work by Dou et al. (<xref ref-type="bibr" rid="B8">8</xref>), the knowledge about CAC detection was transferred from NCCT to CCTA by aligning the feature distributions between the two domains. However, Chen et al. (<xref ref-type="bibr" rid="B7">7</xref>) performed unsupervised domain adaptation by aligning the domains in both image and feature perspectives. The image alignment was used to transform the image appearance and narrow the domain shift between source and target domains. However, we opted for feature alignment only because lack of visible anatomical boundaries in non-contrast scans (arteries, cardiac chambers) to guide the image registration renders image alignment a highly challenging task. Moreover, very small CAC may disappear due to registration, which would not be beneficial for learning.</p>
<p>Comparing the proposed method with previously published deep learning methods on CAC scoring in CCTA scans showed that the proposed method achieved a competitive sensitivity. However, the number of false positive detections did not reach the performance of supervised methods. Methods (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>) that limited the ROI for CAC scoring with coronary artery extraction, achieved a lower number of FP detections. Future research could investigate whether limiting the the analysis to the vicinity of the coronary arteries like proposed by Fischer et al. (<xref ref-type="bibr" rid="B22">22</xref>) and Liu et al. (<xref ref-type="bibr" rid="B23">23</xref>) would be beneficial. For this, tracking the coronary artery centerline (<xref ref-type="bibr" rid="B40">40</xref>) could be used.</p>
<p>Bland-Altman plot shown in <xref ref-type="fig" rid="F3">Figure 3</xref> shows heteroskedastic-like behavior of CAC scores. This behavior is not uncommon for CAC scoring methods, because typically errors tend to increase with higher CAC scores (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B24">24</xref>). False negative detections tend to be larger in patients with higher calcium burden, possibly because their lesions tend to be larger. Moreover, larger false positive detections often consist of non-coronary calcifications, e.g., aortic calcifications in the vicinity of the coronary ostia or cardiac valves, which are also typically larger in patients with a higher coronary calcium burden. To calculate the 95% confidence intervals of the Bland-Altman plots we accounted for the heteroskedastic behavior by modeling the variation in absolute differences (<xref ref-type="bibr" rid="B33">33</xref>).</p>
<p>While CCTA scans are mainly made to provide important information on the presence and the amount of non-calcified plaque and stenosis, cardiac CT scans without contrast enhancement are the reference modality for quantification of calcified coronary artery plaque. Hence, limitation of our method is its ability to quantify calcified plaque in CCTA only. To fully exploit information contained in CCTA, our further work will focus on extending the method to quantification of calcified and non-calcified plaque and stenosis.</p>
<p>In this work, the unsupervised domain adaptation method was trained with 200 NCCT scans and 200 CCTA scans. Like with any machine learning methods, training the unsupervised domain adaptation method with more scans that include more diversity would likely lead to more accurate performance. Finding the optimal set size should be a topic of future research.</p>
<p>In the literature, a wide range in inter-observer agreement for CAC quantification in CCTA has been reported. Specifically, 11% variability in CAC volume when utilizing a scan-specific threshold (<xref ref-type="bibr" rid="B41">41</xref>) and 13&#x02013;25% when using manual delineation of CAC (<xref ref-type="bibr" rid="B42">42</xref>). Moreover, correlation of CAC volume between observers of 0.89&#x02013;0.98 has been reported (<xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B43">43</xref>). In the current study the variability between automatic and reference scores was 21%, with a correlation of 0.73. Given that no clinically used risk categories are defined based on CAC volume or other CAC score quantified from CCTA, it remains unclear whether the obtained errors impact clinical decision-making. Therefore, further work needs to investigate the value of the extracted CAC scores for predicting cardiovascular events.</p>
<p>In conclusion, an unsupervised domain adaptation method for CAC scoring that transfers knowledge from NCCT with reference labels to CCTA without reference labels has been presented. The results show that the method achieves a competitive performance. This may allow for better utilization of the existing large and annotated data sets and extend applicability to diverse CT scans without the requirement of extra annotations.</p></sec>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The data analyzed in this study is subject to the following licenses/restrictions: The data from NLST for this study can be requested at the provider. The cardiac NCCT and CCTA are in-home data. Requests to access these datasets should be directed to <ext-link ext-link-type="uri" xlink:href="https://cdas.cancer.gov/datasets/nlst/">https://cdas.cancer.gov/datasets/nlst/</ext-link>.</p></sec>
<sec id="s7">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by University Medical Center Utrecht; Amsterdam University Medical Center. The patients/participants provided their written informed consent to participate in this study.</p></sec>
<sec id="s8">
<title>Author contributions</title>
<p>ZZ: conceptualized the study, developed the software, analyzed the data, and drafted the article and revised the manuscript. SV and II: conceptualized the study and drafted and revised the manuscript. NL, NP, and TL: acquired data and revised the manuscript. All authors contributed to the article and approved the submitted version.</p></sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>This work is part of the research program Deep Learning for Medical Image Analysis under project number P15-26 project 3 financed by the Dutch Technology Foundation with contribution by Philips Healthcare.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>Author II reports institutional research grants by Pie Medical Imaging, Esaote, Dutch Technology Foundation with participation of Pie Medical Imaging and Philips Healthcare (DLMedIA P15-26). Author TL reports institutional research grants by Pie Medical Imaging, Dutch Technology Foundation with participation of Pie Medical Imaging and Philips Healthcare (DLMedIA P15-26). The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
</body>
<back>
<ack><p>The authors thank the National Cancer Institute for access to NCI&#x00027;s data collected by the National Lung Screening Trial. The statements contained herein are solely those of the authors and do not represent or imply concurrence or endorsement by NCI.</p>
</ack><sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fcvm.2022.981901/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fcvm.2022.981901/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.PDF" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ker</surname> <given-names>J</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Rao</surname> <given-names>J</given-names></name> <name><surname>Lim</surname> <given-names>T</given-names></name></person-group>. <article-title>Deep learning applications in medical image analysis</article-title>. <source>IEEE Access.</source> (<year>2017</year>) <volume>6</volume>:<fpage>9375</fpage>&#x02013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2017.2788044</pub-id><pub-id pub-id-type="pmid">35472844</pub-id></citation></ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Litjens</surname> <given-names>G</given-names></name> <name><surname>Kooi</surname> <given-names>T</given-names></name> <name><surname>Bejnordi</surname> <given-names>BE</given-names></name> <name><surname>Setio</surname> <given-names>AAA</given-names></name> <name><surname>Ciompi</surname> <given-names>F</given-names></name> <name><surname>Ghafoorian</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>A survey on deep learning in medical image analysis</article-title>. <source>Med Image Anal.</source> (<year>2017</year>) <volume>42</volume>:<fpage>60</fpage>&#x02013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id><pub-id pub-id-type="pmid">33901992</pub-id></citation></ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ben-David</surname> <given-names>S</given-names></name> <name><surname>Blitzer</surname> <given-names>J</given-names></name> <name><surname>Crammer</surname> <given-names>K</given-names></name> <name><surname>Pereira</surname> <given-names>F</given-names></name></person-group>. <article-title>Analysis of representations for domain adaptation</article-title>. In: <person-group person-group-type="editor"><name><surname>Sch&#x000F6;lkopf</surname> <given-names>B</given-names></name> <name><surname>Platt</surname> <given-names>J</given-names></name> <name><surname>Hoffman</surname> <given-names>T</given-names></name></person-group> editors. <source>Advances in Neural Information Processing Systems.</source> Vol. 19. Barcelona 2006). p. <fpage>137</fpage>&#x02013;<lpage>44</lpage>.</citation>
</ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>M</given-names></name> <name><surname>Zhao</surname> <given-names>S</given-names></name> <name><surname>Liu</surname> <given-names>H</given-names></name> <name><surname>Cai</surname> <given-names>D</given-names></name></person-group>. <article-title>Adversarial-learned loss for domain adaptation</article-title>. <source>Proc AAAI Confer Artif Intell.</source> (<year>2022</year>) <volume>34</volume>:<fpage>3521</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1609/aaai.v34i04.5757</pub-id></citation>
</ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>G</given-names></name> <name><surname>Lan</surname> <given-names>C</given-names></name> <name><surname>Zeng</surname> <given-names>W</given-names></name> <name><surname>Chen</surname> <given-names>Z</given-names></name></person-group>. <article-title>Metaalign: Coordinating domain alignment and classification for unsupervised domain adaptation</article-title>. In: <source>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</source> Nashville, TN (<year>2021</year>). p. <fpage>16643</fpage>&#x02013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR46437.2021.01637</pub-id></citation>
</ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>G</given-names></name> <name><surname>Lan</surname> <given-names>C</given-names></name> <name><surname>Zeng</surname> <given-names>W</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Chen</surname> <given-names>Z</given-names></name></person-group>. <article-title>ToAlign: task-oriented alignment for unsupervised domain adaptation</article-title>. <source>35th Conference on Neural Information Processing Systems (NeurIPS 2021).</source> (<year>2021</year>) <fpage>13834</fpage>&#x02013;<lpage>46</lpage>.</citation>
</ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>C</given-names></name> <name><surname>Dou</surname> <given-names>Q</given-names></name> <name><surname>Chen</surname> <given-names>H</given-names></name> <name><surname>Qin</surname> <given-names>J</given-names></name> <name><surname>Heng</surname> <given-names>PA</given-names></name></person-group>. <article-title>Unsupervised bidirectional cross-modality adaptation via deeply synergistic image and feature alignment for medical image segmentation</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<fpage>2494</fpage>&#x02013;<lpage>505</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2020.2972701</pub-id><pub-id pub-id-type="pmid">32054572</pub-id></citation></ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname> <given-names>Q</given-names></name> <name><surname>Ouyang</surname> <given-names>C</given-names></name> <name><surname>Chen</surname> <given-names>C</given-names></name> <name><surname>Chen</surname> <given-names>H</given-names></name> <name><surname>Glocker</surname> <given-names>B</given-names></name> <name><surname>Zhuang</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>PnP-Adanet: plug-and-play adversarial domain adaptation network at unpaired cross-modality cardiac segmentation</article-title>. <source>IEEE Access.</source> (<year>2019</year>) <volume>7</volume>:<fpage>99065</fpage>&#x02013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2929258</pub-id></citation>
</ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guan</surname> <given-names>H</given-names></name> <name><surname>Liu</surname> <given-names>M</given-names></name></person-group>. <article-title>Domain adaptation for medical image analysis: a survey</article-title>. <source>IEEE Trans Biomed Eng.</source> (<year>2021</year>) <volume>69</volume>:<fpage>1173</fpage>&#x02013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2021.3117407</pub-id><pub-id pub-id-type="pmid">34606445</pub-id></citation></ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>F</given-names></name> <name><surname>Zhuang</surname> <given-names>X</given-names></name></person-group>. <article-title>CF distance: a new domain discrepancy metric and application to explicit domain adaptation for cross-modality cardiac image segmentation</article-title>. <source>IEEE Trans Med Imaging.</source> (<year>2020</year>) <volume>39</volume>:<fpage>4274</fpage>&#x02013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2020.3016144</pub-id><pub-id pub-id-type="pmid">32784131</pub-id></citation></ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Agatston</surname> <given-names>AS</given-names></name> <name><surname>Janowitz</surname> <given-names>WR</given-names></name> <name><surname>Hildner</surname> <given-names>FJ</given-names></name> <name><surname>Zusmer</surname> <given-names>NR</given-names></name> <name><surname>Viamonte</surname> <given-names>M</given-names></name> <name><surname>Detrano</surname> <given-names>R</given-names></name></person-group>. <article-title>Quantification of coronary artery calcium using ultrafast computed tomography</article-title>. <source>J Amer Coll Cardiol.</source> (<year>1990</year>) <volume>15</volume>:<fpage>827</fpage>&#x02013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1016/0735-1097(90)90282-T</pub-id><pub-id pub-id-type="pmid">2407762</pub-id></citation></ref>
<ref id="B12">
<label>12.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hecht</surname> <given-names>HS</given-names></name></person-group>. <article-title>Coronary artery calcium scanning: past, present, and future</article-title>. <source>JACC Cardiovasc Imaging.</source> (<year>2015</year>) <volume>8</volume>:<fpage>579</fpage>&#x02013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2015.02.006</pub-id><pub-id pub-id-type="pmid">26965738</pub-id></citation></ref>
<ref id="B13">
<label>13.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>van Velzen</surname> <given-names>SG</given-names></name> <name><surname>Hampe</surname> <given-names>N</given-names></name> <name><surname>de Vos</surname> <given-names>BD</given-names></name> <name><surname>I&#x00161;gum</surname> <given-names>I</given-names></name></person-group>. <article-title>Artificial intelligence-based evaluation of coronary calcium</article-title>. In: <person-group person-group-type="editor"><name><surname>De Cecco</surname> <given-names>CN</given-names></name> <name><surname>van Assen</surname> <given-names>M</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name></person-group> editors. <source>Artificial Intelligence in Cardiothoracic Imaging. Contemporary Medical Imaging.</source> <publisher-loc>Cham</publisher-loc>: <publisher-name>Humana</publisher-name> (<year>2022</year>) p. <fpage>245</fpage>&#x02013;<lpage>57</lpage>.<pub-id pub-id-type="pmid">33285350</pub-id></citation></ref>
<ref id="B14">
<label>14.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hampe</surname> <given-names>N</given-names></name> <name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>Van Velzen</surname> <given-names>SG</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <name><surname>I&#x00161;gum</surname> <given-names>I</given-names></name></person-group>. <article-title>Machine learning for assessment of coronary artery disease in cardiac CT: a survey</article-title>. <source>Front Cardiovasc Med.</source> (<year>2019</year>) <volume>6</volume>:<fpage>172</fpage>. <pub-id pub-id-type="doi">10.3389/fcvm.2019.00172</pub-id><pub-id pub-id-type="pmid">32039237</pub-id></citation></ref>
<ref id="B15">
<label>15.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Litjens</surname> <given-names>G</given-names></name> <name><surname>Ciompi</surname> <given-names>F</given-names></name> <name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>de Vos</surname> <given-names>BD</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <name><surname>Teuwen</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>State-of-the-art deep learning in cardiovascular image analysis</article-title>. <source>JACC Cardiovasc Imaging.</source> (<year>2019</year>) <volume>12</volume>(<issue>8 Pt 1</issue>):<fpage>1549</fpage>&#x02013;<lpage>65</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2019.06.009</pub-id><pub-id pub-id-type="pmid">31395244</pub-id></citation></ref>
<ref id="B16">
<label>16.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lessmann</surname> <given-names>N</given-names></name> <name><surname>van Ginneken</surname> <given-names>B</given-names></name> <name><surname>Zreik</surname> <given-names>M</given-names></name> <name><surname>de Jong</surname> <given-names>PA</given-names></name> <name><surname>de Vos</surname> <given-names>BD</given-names></name> <name><surname>Viergever</surname> <given-names>MA</given-names></name> <etal/></person-group>. <article-title>Automatic calcium scoring in low-dose chest CT using deep neural networks with dilated convolutions</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2017</year>) <volume>37</volume>:<fpage>615</fpage>&#x02013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2017.2769839</pub-id><pub-id pub-id-type="pmid">29408789</pub-id></citation></ref>
<ref id="B17">
<label>17.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martin</surname> <given-names>SS</given-names></name> <name><surname>van Assen</surname> <given-names>M</given-names></name> <name><surname>Rapaka</surname> <given-names>S</given-names></name> <name><surname>Hudson Jr</surname> <given-names>HT</given-names></name> <name><surname>Fischer</surname> <given-names>AM</given-names></name> <name><surname>Varga-Szemes</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Evaluation of a deep learning&#x02013;based automated CT coronary artery calcium scoring algorithm</article-title>. <source>Cardiovasc Imaging.</source> (<year>2020</year>) <volume>13</volume>(<issue>2_Pt_1</issue>):<fpage>524</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2019.09.015</pub-id><pub-id pub-id-type="pmid">31734200</pub-id></citation></ref>
<ref id="B18">
<label>18.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van den Oever</surname> <given-names>LB</given-names></name> <name><surname>Cornelissen</surname> <given-names>L</given-names></name> <name><surname>Vonder</surname> <given-names>M</given-names></name> <name><surname>Xia</surname> <given-names>C</given-names></name> <name><surname>van Bolhuis</surname> <given-names>JN</given-names></name> <name><surname>Vliegenthart</surname> <given-names>R</given-names></name> <etal/></person-group>. <article-title>Deep learning for automated exclusion of cardiac CT examinations negative for coronary artery calcium</article-title>. <source>Eur J Radiol</source>. (<year>2020</year>) <volume>129</volume>:<fpage>109114</fpage>. <pub-id pub-id-type="doi">10.1016/j.ejrad.2020.109114</pub-id><pub-id pub-id-type="pmid">32531719</pub-id></citation></ref>
<ref id="B19">
<label>19.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Velzen</surname> <given-names>SG</given-names></name> <name><surname>Lessmann</surname> <given-names>N</given-names></name> <name><surname>Velthuis</surname> <given-names>BK</given-names></name> <name><surname>Bank</surname> <given-names>IE</given-names></name> <name><surname>van den Bongard</surname> <given-names>DH</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>Deep learning for automatic calcium scoring in CT: validation using multiple cardiac CT and chest CT protocols</article-title>. <source>Radiology</source>. (<year>2020</year>) <volume>295</volume>:<fpage>66</fpage>&#x02013;<lpage>79</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020191621</pub-id><pub-id pub-id-type="pmid">32043947</pub-id></citation></ref>
<ref id="B20">
<label>20.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeleznik</surname> <given-names>R</given-names></name> <name><surname>Foldyna</surname> <given-names>B</given-names></name> <name><surname>Eslami</surname> <given-names>P</given-names></name> <name><surname>Weiss</surname> <given-names>J</given-names></name> <name><surname>Alexander</surname> <given-names>I</given-names></name> <name><surname>Taron</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>Deep convolutional neural networks to predict cardiovascular risk from computed tomography</article-title>. <source>Nat Commun.</source> (<year>2021</year>) <volume>12</volume>:<fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1038/s41467-021-20966-2</pub-id><pub-id pub-id-type="pmid">33514711</pub-id></citation></ref>
<ref id="B21">
<label>21.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Al&#x00027;Aref</surname> <given-names>SJ</given-names></name> <name><surname>Maliakal</surname> <given-names>G</given-names></name> <name><surname>Singh</surname> <given-names>G</given-names></name> <name><surname>van Rosendael</surname> <given-names>AR</given-names></name> <name><surname>Ma</surname> <given-names>X</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <etal/></person-group>. <article-title>Machine learning of clinical variables and coronary artery calcium scoring for the prediction of obstructive coronary artery disease on coronary computed tomography angiography: analysis from the CONFIRM registry</article-title>. <source>Eur Heart J.</source> (<year>2020</year>) <volume>41</volume>:<fpage>359</fpage>&#x02013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1093/eurheartj/ehz565</pub-id><pub-id pub-id-type="pmid">31513271</pub-id></citation></ref>
<ref id="B22">
<label>22.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fischer</surname> <given-names>AM</given-names></name> <name><surname>Eid</surname> <given-names>M</given-names></name> <name><surname>De Cecco</surname> <given-names>CN</given-names></name> <name><surname>Gulsun</surname> <given-names>MA</given-names></name> <name><surname>Van Assen</surname> <given-names>M</given-names></name> <name><surname>Nance</surname> <given-names>JW</given-names></name> <etal/></person-group>. <article-title>Accuracy of an artificial intelligence deep learning algorithm implementing a recurrent neural network with long short-term memory for the automated detection of calcified plaques from coronary computed tomography angiography</article-title>. <source>J Thorac Imaging.</source> (<year>2020</year>) <volume>35</volume>:<fpage>S49</fpage>&#x02013;<lpage>57</lpage>.<pub-id pub-id-type="doi">10.1097/RTI.0000000000000491</pub-id><pub-id pub-id-type="pmid">32168163</pub-id></citation></ref>
<ref id="B23">
<label>23.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Jin</surname> <given-names>C</given-names></name> <name><surname>Feng</surname> <given-names>J</given-names></name> <name><surname>Du</surname> <given-names>Y</given-names></name> <name><surname>Lu</surname> <given-names>J</given-names></name> <name><surname>Zhou</surname> <given-names>J</given-names></name></person-group>. <article-title>A vessel-focused 3D convolutional network for automatic segmentation and classification of coronary artery plaques in cardiac CTA</article-title>. In: <source>International Workshop on Statistical Atlases and Computational Models of the Heart</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2018</year>). p. <fpage>131</fpage>&#x02013;<lpage>41</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-12029-0_15</pub-id></citation>
</ref>
<ref id="B24">
<label>24.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mu</surname> <given-names>D</given-names></name> <name><surname>Bai</surname> <given-names>J</given-names></name> <name><surname>Chen</surname> <given-names>W</given-names></name> <name><surname>Yu</surname> <given-names>H</given-names></name> <name><surname>Liang</surname> <given-names>J</given-names></name> <name><surname>Yin</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Calcium scoring at coronary CT angiography using deep learning</article-title>. <source>Radiology.</source> (<year>2022</year>) <volume>302</volume>:<fpage>309</fpage>&#x02013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2021211483</pub-id><pub-id pub-id-type="pmid">34812674</pub-id></citation></ref>
<ref id="B25">
<label>25.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <name><surname>de Vos</surname> <given-names>BD</given-names></name> <name><surname>van Hamersvelt</surname> <given-names>RW</given-names></name> <name><surname>Viergever</surname> <given-names>MA</given-names></name> <name><surname>I&#x00161;gum</surname> <given-names>I</given-names></name></person-group>. <article-title>Automatic coronary artery calcium scoring in cardiac CT angiography using paired convolutional neural networks</article-title>. <source>Med Image Anal</source>. (<year>2016</year>) <volume>34</volume>:<fpage>123</fpage>&#x02013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2016.04.004</pub-id><pub-id pub-id-type="pmid">27138584</pub-id></citation></ref>
<ref id="B26">
<label>26.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Team</surname> <given-names>NLSTR</given-names></name></person-group>. <article-title>Reduced lung-cancer mortality with low-dose computed tomographic screening</article-title>. <source>N Engl J Med.</source> (<year>2011</year>) <volume>365</volume>:<fpage>395</fpage>&#x02013;<lpage>409</lpage>. <pub-id pub-id-type="doi">10.1056/NEJMoa1102873</pub-id><pub-id pub-id-type="pmid">21714641</pub-id></citation></ref>
<ref id="B27">
<label>27.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <name><surname>Takx</surname> <given-names>RA</given-names></name> <name><surname>Viergever</surname> <given-names>MA</given-names></name> <name><surname>I&#x00161;gum</surname> <given-names>I</given-names></name></person-group>. <article-title>Automatic coronary calcium scoring in non-contrast-enhanced ECG-triggered cardiac CT with ambiguity detection</article-title>. <source>IEEE Trans Med Imaging.</source> (<year>2015</year>) <volume>34</volume>:<fpage>1867</fpage>&#x02013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2015.2412651</pub-id><pub-id pub-id-type="pmid">25794387</pub-id></citation></ref>
<ref id="B28">
<label>28.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ritter</surname> <given-names>F</given-names></name> <name><surname>Boskamp</surname> <given-names>T</given-names></name> <name><surname>Homeyer</surname> <given-names>A</given-names></name> <name><surname>Laue</surname> <given-names>H</given-names></name> <name><surname>Schwier</surname> <given-names>M</given-names></name> <name><surname>Link</surname> <given-names>F</given-names></name> <etal/></person-group>. <article-title>Medical image analysis: a visual approach</article-title>. <source>IEEE Pulse.</source> (<year>2011</year>) <volume>2</volume>:<fpage>60</fpage>&#x02013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.1109/MPUL.2011.942929</pub-id><pub-id pub-id-type="pmid">22147070</pub-id></citation></ref>
<ref id="B29">
<label>29.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mylonas</surname> <given-names>I</given-names></name> <name><surname>Alam</surname> <given-names>M</given-names></name> <name><surname>Amily</surname> <given-names>N</given-names></name> <name><surname>Small</surname> <given-names>G</given-names></name> <name><surname>Chen</surname> <given-names>L</given-names></name> <name><surname>Yam</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Quantifying coronary artery calcification from a contrast-enhanced cardiac computed tomography angiography study</article-title>. <source>Eur Heart J Cardiovasc Imaging.</source> (<year>2014</year>) <volume>15</volume>:<fpage>210</fpage>&#x02013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1093/ehjci/jet144</pub-id><pub-id pub-id-type="pmid">23943125</pub-id></citation></ref>
<ref id="B30">
<label>30.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yosinski</surname> <given-names>J</given-names></name> <name><surname>Clune</surname> <given-names>J</given-names></name> <name><surname>Bengio</surname> <given-names>Y</given-names></name> <name><surname>Lipson</surname> <given-names>H</given-names></name></person-group>. <article-title>How transferable are features in deep neural networks? In: Ghahramani Z, Welling M, Cortes C, Lawrence N, Weinberger KQ</article-title>. <source>Advances in Neural Information Processing Systems 27 (NIPS 2014)</source>. Montreal, QC (<year>2014</year>) <volume>27</volume>:<fpage>3320</fpage>&#x02013;<lpage>8</lpage>.</citation>
</ref>
<ref id="B31">
<label>31.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Tulder</surname> <given-names>G</given-names></name> <name><surname>de Bruijne</surname> <given-names>M</given-names></name></person-group>. <article-title>Learning cross-modality representations from multi-modal images</article-title>. <source>IEEE Trans Med Imaging.</source> (<year>2018</year>) <volume>38</volume>:<fpage>638</fpage>&#x02013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2018.2868977</pub-id><pub-id pub-id-type="pmid">30188817</pub-id></citation></ref>
<ref id="B32">
<label>32.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bruns</surname> <given-names>S</given-names></name> <name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>van den Boogert</surname> <given-names>TP</given-names></name> <name><surname>Runge</surname> <given-names>JH</given-names></name> <name><surname>Bouma</surname> <given-names>BJ</given-names></name> <name><surname>Henriques</surname> <given-names>JP</given-names></name> <etal/></person-group>. <article-title>Deep learning-based whole-heart segmentation in 4D contrast-enhanced cardiac CT</article-title>. <source>Comput Biol Med.</source> (<year>2022</year>) <volume>142</volume>:<fpage>105191</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.105191</pub-id><pub-id pub-id-type="pmid">35026571</pub-id></citation></ref>
<ref id="B33">
<label>33.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sevrukov</surname> <given-names>AB</given-names></name> <name><surname>Bland</surname> <given-names>JM</given-names></name> <name><surname>Kondos</surname> <given-names>GT</given-names></name></person-group>. <article-title>Serial electron beam CT measurements of coronary artery calcium: Has your patient&#x00027;s calcium score actually changed?</article-title> <source>Amer J Roentgenol.</source> (<year>2005</year>) <volume>185</volume>:<fpage>1546</fpage>&#x02013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.2214/AJR.04.1589</pub-id><pub-id pub-id-type="pmid">16304011</pub-id></citation></ref>
<ref id="B34">
<label>34.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arjovsky</surname> <given-names>M</given-names></name> <name><surname>Chintala</surname> <given-names>S</given-names></name> <name><surname>Bottou</surname> <given-names>L</given-names></name></person-group>. <article-title>Wasserstein generative adversarial networks</article-title>. In: <source>International Conference on Machine Learning.</source> PMLR. Sydney (<year>2017</year>). <volume>70</volume>:<fpage>214</fpage>&#x02013;<lpage>23</lpage>.</citation>
</ref>
<ref id="B35">
<label>35.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gulrajani</surname> <given-names>I</given-names></name> <name><surname>Ahmed</surname> <given-names>F</given-names></name> <name><surname>Arjovsky</surname> <given-names>M</given-names></name> <name><surname>Dumoulin</surname> <given-names>V</given-names></name> <name><surname>Courville</surname> <given-names>AC</given-names></name></person-group>. <article-title>Improved training of wasserstein GANS</article-title>. <source>NIPS&#x00027;17: Proceedings of the 31st International Conference on Neural Information Processing Systems</source>. Long Beach, CA (<year>2017</year>) <volume>30</volume>:<fpage>5767</fpage>&#x02013;<lpage>77</lpage>.</citation>
</ref>
<ref id="B36">
<label>36.</label>
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Graves</surname> <given-names>A</given-names></name></person-group>. <article-title>Generating sequences with recurrent neural networks</article-title>. <source>arXiv Preprint</source>. (<year>2013</year>) arXiv:13080850.</citation>
</ref>
<ref id="B37">
<label>37.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Paszke</surname> <given-names>A</given-names></name> <name><surname>Gross</surname> <given-names>S</given-names></name> <name><surname>Massa</surname> <given-names>F</given-names></name> <name><surname>Lerer</surname> <given-names>A</given-names></name> <name><surname>Bradbury</surname> <given-names>J</given-names></name> <name><surname>Chanan</surname> <given-names>G</given-names></name> <etal/></person-group>. <article-title>PyTorch: an imperative style, high-performance deep learning library</article-title>. In: <source>Advances in Neural Information Processing Systems.</source> Vol. 32. Curran Associates, Inc. (<year>2019</year>). p. <fpage>8024</fpage>&#x02013;<lpage>35</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf">http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf</ext-link>.</citation>
</ref>
<ref id="B38">
<label>38.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hojjatoleslami</surname> <given-names>S</given-names></name> <name><surname>Kittler</surname> <given-names>J</given-names></name></person-group>. <article-title>Region growing: a new approach</article-title>. <source>IEEE Trans Image Process.</source> (<year>1998</year>) <volume>7</volume>:<fpage>1079</fpage>&#x02013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1109/83.701170</pub-id><pub-id pub-id-type="pmid">18276325</pub-id></citation></ref>
<ref id="B39">
<label>39.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Minaee</surname> <given-names>S</given-names></name> <name><surname>Kafieh</surname> <given-names>R</given-names></name> <name><surname>Sonka</surname> <given-names>M</given-names></name> <name><surname>Yazdani</surname> <given-names>S</given-names></name> <name><surname>Soufi</surname> <given-names>GJ</given-names></name></person-group>. <article-title>Deep-COVID: predicting COVID-19 from chest X-ray images using deep transfer learning</article-title>. <source>Med Image Anal.</source> (<year>2020</year>) <volume>65</volume>:<fpage>101794</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2020.101794</pub-id><pub-id pub-id-type="pmid">32781377</pub-id></citation></ref>
<ref id="B40">
<label>40.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wolterink</surname> <given-names>JM</given-names></name> <name><surname>van Hamersvelt</surname> <given-names>RW</given-names></name> <name><surname>Viergever</surname> <given-names>MA</given-names></name> <name><surname>Leiner</surname> <given-names>T</given-names></name> <name><surname>I&#x00161;gum</surname> <given-names>I</given-names></name></person-group>. <article-title>Coronary artery centerline extraction in cardiac CT angiography using a CNN-based orientation classifier</article-title>. <source>Med Image Anal</source>. (<year>2019</year>) <volume>51</volume>:<fpage>46</fpage>&#x02013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2018.10.005</pub-id><pub-id pub-id-type="pmid">30388501</pub-id></citation></ref>
<ref id="B41">
<label>41.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>&#x000D8;vrehus</surname> <given-names>KA</given-names></name> <name><surname>Schuhbaeck</surname> <given-names>A</given-names></name> <name><surname>Marwan</surname> <given-names>M</given-names></name> <name><surname>Achenbach</surname> <given-names>S</given-names></name> <name><surname>N&#x000F8;rgaard</surname> <given-names>BL</given-names></name> <name><surname>B&#x000F8;tker</surname> <given-names>HE</given-names></name> <etal/></person-group>. <article-title>Reproducibility of semi-automatic coronary plaque quantification in coronary CT angiography with sub-mSv radiation dose</article-title>. <source>J Cardiovasc Comp Tomogr.</source> (<year>2016</year>) <volume>10</volume>:<fpage>114</fpage>&#x02013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcct.2015.11.003</pub-id><pub-id pub-id-type="pmid">27576861</pub-id></citation></ref>
<ref id="B42">
<label>42.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cheng</surname> <given-names>VY</given-names></name> <name><surname>Nakazato</surname> <given-names>R</given-names></name> <name><surname>Dey</surname> <given-names>D</given-names></name> <name><surname>Gurudevan</surname> <given-names>S</given-names></name> <name><surname>Tabak</surname> <given-names>J</given-names></name> <name><surname>Budoff</surname> <given-names>MJ</given-names></name> <etal/></person-group>. <article-title>Reproducibility of coronary artery plaque volume and composition quantification by 64-detector row coronary computed tomographic angiography: an intraobserver, interobserver, and interscan variability study</article-title>. <source>J Cardiovasc Comp Tomogr.</source> (<year>2009</year>) <volume>3</volume>:<fpage>312</fpage>&#x02013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcct.2009.07.001</pub-id><pub-id pub-id-type="pmid">19709947</pub-id></citation></ref>
<ref id="B43">
<label>43.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dey</surname> <given-names>D</given-names></name> <name><surname>Cheng</surname> <given-names>VY</given-names></name> <name><surname>Slomka</surname> <given-names>PJ</given-names></name> <name><surname>Nakazato</surname> <given-names>R</given-names></name> <name><surname>Ramesh</surname> <given-names>A</given-names></name> <name><surname>Gurudevan</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>Automated 3-dimensional quantification of noncalcified and calcified coronary plaque from coronary CT angiography</article-title>. <source>J Cardiovasc Comp Tomogr.</source> (<year>2009</year>) <volume>3</volume>:<fpage>372</fpage>&#x02013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcct.2009.09.004</pub-id><pub-id pub-id-type="pmid">20083056</pub-id></citation></ref>
</ref-list>
</back>
</article>