<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cardiovasc. Med.</journal-id>
<journal-title>Frontiers in Cardiovascular Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cardiovasc. Med.</abbrev-journal-title>
<issn pub-type="epub">2297-055X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcvm.2023.1213290</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Cardiovascular Medicine</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Quality control-driven deep ensemble for accountable automated segmentation of cardiac magnetic resonance LGE and VNE images</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Gonzales</surname><given-names>Ricardo A.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/1454339/overview"/></contrib>
<contrib contrib-type="author"><name><surname>Ib&#x00E1;&#x00F1;ez</surname><given-names>Daniel H.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2289634/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Hann</surname><given-names>Evan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Popescu</surname><given-names>Iulia A.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Burrage</surname><given-names>Matthew K.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Lee</surname><given-names>Yung P.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Altun</surname><given-names>&#x0130;brahim</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Weintraub</surname><given-names>William S.</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Kwong</surname><given-names>Raymond Y.</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Kramer</surname><given-names>Christopher M.</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Neubauer</surname><given-names>Stefan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref></contrib>
<contrib contrib-type="author">
<collab>Hypertrophic Cardiomyopathy Registry (HCMR) Investigators</collab></contrib>
<contrib contrib-type="author">
<collab>Oxford Acute Myocardial Infarction (OxAMI) Study</collab>
<xref ref-type="author-notes" rid="an1"><sup>&#x2020;</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Ferreira</surname><given-names>Vanessa M.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2121058/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Zhang</surname><given-names>Qiang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1822921/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Piechnik</surname><given-names>Stefan K.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1533164/overview" /></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><addr-line>Oxford Centre for Clinical Magnetic Resonance Research (OCMR), Division of Cardiovascular Medicine, Radcliffe Department of Medicine, John Radcliffe Hospital</addr-line>, <institution>University of Oxford</institution>, <addr-line>Oxford</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff2"><label><sup>2</sup></label><institution>Artificio</institution>, <addr-line>Cambridge, MA</addr-line>, <country>United States</country></aff>
<aff id="aff3"><label><sup>3</sup></label><addr-line>Faculty of Medicine</addr-line>, <institution>University of Queensland</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<aff id="aff4"><label><sup>4</sup></label><addr-line>MedStar Health Research Institute</addr-line>, <institution>Georgetown University</institution>, <addr-line>Washington, DC</addr-line>, <country>United States</country></aff>
<aff id="aff5"><label><sup>5</sup></label><addr-line>Cardiovascular Division, Department of Medicine, Brigham and Women&#x2019;s Hospital</addr-line>, <institution>Harvard Medical School</institution>, <addr-line>Boston, MA</addr-line>, <country>United States</country></aff>
<aff id="aff6"><label><sup>6</sup></label><addr-line>Department of Medicine</addr-line>, <institution>University of Virginia Health System</institution>, <addr-line>Charlottesville, VA</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited by:</bold> Grigorios Korosoglou, GRN Klinik Weinheim, Germany</p></fn>
<fn fn-type="edited-by"><p><bold>Reviewed by:</bold> Thomas Hadler, Charite University Medicine Berlin, Germany Elisa Scalco, National Research Council (CNR), Italy</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Ricardo A. Gonzales <email>ricardo.gonzales@cardiov.ox.ac.uk</email></corresp>
<fn id="an1"><label><sup>&#x2020;</sup></label><p>A list of HCMR and OxAMI Investigators is provided in <xref ref-type="sec" rid="s111">Supplementary Material</xref></p></fn>
</author-notes>
<pub-date pub-type="epub"><day>11</day><month>09</month><year>2023</year></pub-date>
<pub-date pub-type="collection"><year>2023</year></pub-date>
<volume>10</volume><elocation-id>1213290</elocation-id>
<history>
<date date-type="accepted"><day>16</day><month>08</month><year>2023</year></date>
<date date-type="received"><day>27</day><month>04</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Gonzales, Ib&#x00E1;&#x00F1;ez, Hann, Popescu, Burrage, Lee, Altun, Weintraub, Kwong, Kramer, Neubauer, Ferreira, Zhang and Piechnik.</copyright-statement>
<copyright-year>2023</copyright-year><copyright-holder>Gonzales, Ib&#x00E1;&#x00F1;ez, Hann, Popescu, Burrage, Lee, Altun, Weintraub, Kwong, Kramer, Neubauer, Ferreira, Zhang and Piechnik</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<sec><title>Background</title>
<p>Late gadolinium enhancement (LGE) cardiovascular magnetic resonance (CMR) imaging is the gold standard for non-invasive myocardial tissue characterisation. However, accurate segmentation of the left ventricular (LV) myocardium remains a challenge due to limited training data and lack of quality control. This study addresses these issues by leveraging generative adversarial networks (GAN)-generated virtual native enhancement (VNE) images to expand the training set and incorporating an automated quality control-driven (QCD) framework to improve segmentation reliability.</p>
</sec>
<sec><title>Methods</title>
<p>A dataset comprising 4,716 LGE images (from 1,363 patients with hypertrophic cardiomyopathy and myocardial infarction) was used for development. To generate additional clinically validated data, LGE data were augmented with a GAN-based generator to produce VNE images. LV was contoured on these images manually by clinical observers. To create diverse candidate segmentations, the QCD framework involved multiple U-Nets, which were combined using statistical rank filters. The framework predicted the Dice Similarity Coefficient (DSC) for each candidate segmentation, with the highest predicted DSC indicating the most accurate and reliable result. The performance of the QCD ensemble framework was evaluated on both LGE and VNE test datasets (309 LGE/VNE images from 103 patients), assessing segmentation accuracy (DSC) and quality prediction (mean absolute error (MAE) and binary classification accuracy).</p>
</sec>
<sec><title>Results</title>
<p>The QCD framework effectively and rapidly segmented the LV myocardium (&#x003C;1&#x2009;s per image) on both LGE and VNE images, demonstrating robust performance on both test datasets with similar mean DSC (LGE: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM1"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.075</mml:mn></mml:math></inline-formula>; VNE: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM2"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.071</mml:mn></mml:math></inline-formula>; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM3"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mi>s</mml:mi></mml:math></inline-formula>). Incorporating GAN-generated VNE data into the training process consistently led to enhanced performance for both individual models and the overall framework. The quality control mechanism yielded a high performance (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM4"><mml:mrow><mml:mi mathvariant="normal">MAE</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>0.043</mml:mn></mml:math></inline-formula>, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM5"><mml:mrow><mml:mi mathvariant="normal">accuracy</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mn>0.951</mml:mn></mml:math></inline-formula>) emphasising the accuracy of the quality control-driven strategy in predicting segmentation quality in clinical settings. Overall, no statistical difference (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM6"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mi>s</mml:mi></mml:math></inline-formula>) was found when comparing the LGE and VNE test sets across all experiments.</p>
</sec>
<sec><title>Conclusions</title>
<p>The QCD ensemble framework, leveraging GAN-generated VNE data and an automated quality control mechanism, significantly improved the accuracy and reliability of LGE segmentation, paving the way for enhanced and accountable diagnostic imaging in routine clinical use.</p>
</sec>
</abstract>
<kwd-group>
<kwd>data augmentation</kwd>
<kwd>generative adversarial networks</kwd>
<kwd>quality control</kwd>
<kwd>segmentation</kwd>
<kwd>late gadolinium enhancement</kwd>
<kwd>virtual native enhancement</kwd>
<kwd>cardiovascular magnetic resonance</kwd>
</kwd-group>
<contract-num rid="cn001">CH/16/1/32013</contract-num>
<contract-num rid="cn002">RE/18/3/34214</contract-num>
<contract-num rid="cn003">&#x00A0;</contract-num>
<contract-num rid="cn004">PPG/15/71/31731</contract-num>
<contract-num rid="cn005">U01HL117006-01A1</contract-num>
<contract-num rid="cn006">CH/16/1/32013</contract-num>
<contract-num rid="cn007">RG/13/1/30181</contract-num>
<contract-sponsor id="cn001">BHF<named-content content-type="fundref-id">10.13039/501100000274</named-content></contract-sponsor>
<contract-sponsor id="cn002">Oxford BHF Centre of Research Excellence<named-content content-type="fundref-id">10.13039/501100005617</named-content></contract-sponsor>
<contract-sponsor id="cn003">John Fell Oxford University Press Research Fund<named-content content-type="fundref-id">10.13039/501100004789</named-content></contract-sponsor>
<contract-sponsor id="cn004">BHF<named-content content-type="fundref-id">10.13039/501100000274</named-content></contract-sponsor>
<contract-sponsor id="cn005">National Heart, Lung, and Blood Institute<named-content content-type="fundref-id">10.13039/100000050</named-content></contract-sponsor>
<contract-sponsor id="cn006">British Heart Foundation<named-content content-type="fundref-id">10.13039/501100000274</named-content></contract-sponsor>
<contract-sponsor id="cn007">BHF Centre of Research Excellence, Oxford<named-content content-type="fundref-id">10.13039/501100005617</named-content></contract-sponsor>
<counts>
<fig-count count="6"/>
<table-count count="1"/><equation-count count="43"/><ref-count count="49"/><page-count count="0"/><word-count count="0"/></counts><custom-meta-wrap><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Cardiovascular Imaging</meta-value></custom-meta></custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro"><label>1.</label><title>Introduction</title>
<p>Late gadolinium enhancement (LGE) is a cardiovascular magnetic resonance (CMR) imaging technique that provides crucial information about the extent and location of myocardial damage, allowing clinicians to make accurate diagnoses and treatment decisions (<xref ref-type="bibr" rid="B1">1</xref>). It is considered the imaging gold standard for non-invasive myocardial tissue characterisation in a variety of cardiovascular diseases. LGE can identify areas of scar tissue or fibrosis (<xref ref-type="bibr" rid="B2">2</xref>), which are often associated with heart disease, such as myocardial infarction (MI) (<xref ref-type="bibr" rid="B3">3</xref>) and hypertrophic cardiomyopathy (<xref ref-type="bibr" rid="B4">4</xref>). Its quantification provides important information, such as scar-burden, which can predict adverse clinical outcomes like heart failure and sudden death, and may guide risk-modification strategies, such as the use of implantable cardioverter-defibrillator devices (<xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>To quantify the extent and location of myocardial pathology in LGE images, the left ventricular (LV) myocardium must be segmented. Manual contouring by experts has been the conventional method, but it is time-consuming and subjective. Recently, there has been growing development in automated segmentation methods to improve efficiency and reduce inter-observer variability. These methods can be broadly categorised as either model-driven (<xref ref-type="bibr" rid="B6">6</xref>) or data-driven (<xref ref-type="bibr" rid="B7">7</xref>). Model-driven methods use prior knowledge about the structure of the LV myocardium to guide the segmentation process, while data-driven methods use machine learning algorithms to learn from examples in a training dataset, typically yielding superior results than model-driven methods (<xref ref-type="bibr" rid="B8">8</xref>). However, despite progress in automated segmentation techniques, clinical translation has been limited by two major challenges. First, data-driven methods require a large amount of high-quality training data (<xref ref-type="bibr" rid="B9">9</xref>), which may not always be available, particularly for rare or heterogeneous diseases. Second, even with sufficient training data, unflagged segmentation errors can still occur (<xref ref-type="bibr" rid="B10">10</xref>), leading to inaccurate scar quantification, posing a significant concern for clinical decision-making. Thus, there remains a pressing need for a well-validated, automated quality control (QC) mechanism that can detect and flag segmentation errors in a reliable and efficient manner (<xref ref-type="bibr" rid="B11">11</xref>).</p>
<p>To overcome the challenge of data scarcity, or limited access, in medical applications, various approaches have been proposed, such as transfer learning, domain adaptation, and data augmentation (<xref ref-type="bibr" rid="B12">12</xref>). Transfer learning and domain adaptation aim to leverage knowledge from pre-existing datasets, while data augmentation methods generate new data by applying transformations to existing data. Among these approaches, data augmentation with synthetic data, using Generative Adversarial Networks (GANs), has gained popularity due to its potential to generate large amounts of diverse and realistic data, which can be particularly useful for limited datasets (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B14">14</xref>). However, the use of synthetic data for medical applications poses a challenge of clinical validation, as the generated data may not accurately reflect the true biological and pathological variations seen in real-world data (<xref ref-type="bibr" rid="B15">15</xref>). Therefore, it is crucial to validate the synthetic data before using it for medical purposes.</p>
<p>Automated approaches for flagging inaccuracies in automatic segmentation have gained increasing attention in recent years (<xref ref-type="bibr" rid="B11">11</xref>). Post-analysis QC tools have been recently proposed to assess the reliability of segmentation outputs, which are considered the final indicator of a model&#x2019;s performance. These methods typically act as binary classifiers (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>), assigning correct/incorrect labels to a segmentation, or as regressors (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>), which attempt to infer well-known validation metrics or uncertainty estimates. While these approaches have been successfully applied to CMR T1 mapping (<xref ref-type="bibr" rid="B20">20</xref>) and short-axis cines (<xref ref-type="bibr" rid="B21">21</xref>), a QC pipeline for LGE segmentation&#x2014;an important clinical tool&#x2014;is still missing.</p>
<p>In this study, we present a novel approach for LGE segmentation that overcomes the challenges of both limited training data and lack of quality control for clinical applications. Our framework leverages the power of GAN-generated data, incorporating virtual native enhancement (VNE) images (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>), to further expand the training dataset with clinically-validated data. This emerging contrast-agent-free CMR modality exploits native signals to produce &#x201C;virtual&#x201D; LGE images. Additionally, we extend an automated quality control mechanism to flag problematic cases for focused inspection before clinical use. We build upon the quality control-driven (QCD) framework (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B20">20</xref>), which can predict a confidence metric in absence of ground truth.</p>
</sec>
<sec id="s2" sec-type="methods"><label>2.</label><title>Materials and methods</title>
<sec id="s2a"><label>2.1.</label><title>Imaging data</title>
<p>The development dataset of 4,716 LGE images (1,363 patients) was obtained from the following: (1) the multi-centre Hypertrophic Cardiomyopathy Registry study (<xref ref-type="bibr" rid="B24">24</xref>) (HCMR, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM7"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>3,286</mml:mn></mml:math></inline-formula> images from 1,129 patients, 24 centres); (2) the University of Oxford Centre for Clinical Magnetic Resonance Research clinical service (OCMR, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM8"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>712</mml:mn></mml:math></inline-formula> images from 109 patients), and (3) the Oxford Acute Myocardial Infarction study (<xref ref-type="bibr" rid="B25">25</xref>) (OxAMI, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM9"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>718</mml:mn></mml:math></inline-formula> images from 125 patients), with institutional review committee and ethics approvals. Altogether, the 4,716 LGE images comprised of 3,286 LGE images from 1,129 patients with hypertrophic cardiomyopathy, and 1,430 LGE images from 234 patients with MI (255 images from 65 patients with chronic MI; 1,175 LGE images from 169 patients with acute MI). CMR scanning was undertaken in Siemens MR scanners (Siemens Healthcare, Germany) with magnetic field strengths of 1.5T (71&#x0025; of data) and 3T (29&#x0025; of data). CMR protocols included cine steady-state free precession imaging, native and post-contrast T1 mapping using the ShMOLLI (Shortened Modified Look-Locker Inversion recovery) sequence (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B27">27</xref>), and LGE imaging acquired at around 10&#x2009;min after intravenous administration of 0.1 to 0.2&#x2009;mmol/kg of a gadolinium-based contrast agent, typically with the phase-sensitive inversion recovery sequence (<xref ref-type="bibr" rid="B24">24</xref>). Briefly, the manual quality control involved selection of uncorrupted, paired cines, T1 maps and LGE images, which were manually segmented by experienced trained observers (MKB, YPL and IA), in previous studies (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B28">28</xref>).</p>
</sec>
<sec id="s2b"><label>2.2.</label><title>Data augmentation using a generative adversarial network</title>
<p>The data were augmented with a conditional generative adversarial network (cGAN) approach to generate VNE images (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>) from paired short-axis cine and T1 map. These VNE images exploited native components, including native T1 mapping and pre-contrast cine frames throughout the cardiac cycle. This provided image contrast, alterations in myocardial tissue properties, myocardial structure (such as wall thickness/thickness), motion data of the cardiac wall, and more distinct myocardial borders. The deep learning generator processed these inputs to produce VNE images that closely resembled LGE images in terms of structure and contrast. The clinical utility of VNE lies in its ability to generate &#x201C;virtual&#x201D; LGE images without the need for gadolinium, enabling faster, lower-cost, and contrast-free CMR scans.</p>
<p>The VNE generator (<xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>) consisted of parallel convolutional neural network streams that processed cine frames and motion-corrected T1 maps (<xref ref-type="bibr" rid="B29">29</xref>) individually. Each stream utilised a six-level encoder-decoder U-Net structure (<xref ref-type="bibr" rid="B30">30</xref>). The encoder computed image features at various scales, with successive convolutional layers for feature extraction and downsampling at each level, offering a multiscale feature representation. The corresponding decoder fused these multiscale features to generate the final feature maps, with symmetrical upsampling layers and convolutions for sequential combination of the multiscale features. These feature maps from the streams were concatenated and fed into an additional two-level encoder-decoder block, which combined information from the different modalities to create the final VNE image in a late fusion manner. Each encoder-decoder block was followed by a tanh activation function.</p>
<fig id="F1" position="float"><label>Figure 1</label>
<caption><p>Data augmentation framework. A late gadolinium enhancement (LGE) image is augmented by using its paired short-axis cine and T1 map, producing a virtual native enhancement (VNE) image, using a modified conditional generative adversarial network approach. Parallel deep auto-encoders extract features from native signals, which are fused through a shallow autoencoder to derive a VNE image. The discriminators, D1 and D2, during training, are used to enhance the D1 image &#x201C;clarity&#x201D; and the image &#x201C;realness&#x201D; with perceptual similarity, respectively.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g001.tif"/>
</fig>
<p>In the customised cGAN approach (<xref ref-type="bibr" rid="B31">31</xref>), the architecture included two discriminators, D1 and D2, modelled after the VGG16 model (<xref ref-type="bibr" rid="B32">32</xref>). Discriminator D1, aimed at verifying the &#x201C;clarity&#x201D; of larger images, used an expanded architecture with an input layer accommodating the resultant VNE and the input cine stack, which ensured sharper clearer images. This involved a series of convolutional layers, alternating between feature extraction and downsampling, each followed by leaky rectified linear unit activation functions. Discriminator D2, designed to check the &#x201C;realness&#x201D; in single-channel images, adopted a similar and more compact structure, processing both the resultant VNE and the paired LGE. The generator&#x2019;s objective was to create VNE images that had a high perceptual similarity (<xref ref-type="bibr" rid="B33">33</xref>) to LGE images and were indiscernible from LGE contrast images. The discriminator&#x2019;s objective was to differentiate between VNE and LGE images. After training the neural networks in an adversarial manner, we obtained a trained generator capable of translating native CMR signals into LGE-like representations.</p>
<p>With the previously trained VNE generator (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>), we expanded the LGE images in the development data by producing corresponding VNE images (<xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>), in independent datasets. Expansion of the imaging data was successfully carried out for all cases, except for the subset related to acute myocardial infarction, which is awaiting further validation before inclusion. All augmented data were also manually segmented. Through the utilisation of position-matched T1 maps and cine, the derived VNE closely resembled the position-matched LGE; however, in some cases, there were slight differences in slice position between the paired T1/cine and the final LGE, for instance, due to patient movement between the image acquisitions (<xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>, cases 5 and 6). Serendipitously, this introduced increased diversity and realism into the training data, thereby enhancing the robustness of the model.</p>
<fig id="F2" position="float"><label>Figure 2</label>
<caption><p>The resultant database includes the late gadolinium enhancement (LGE) data and virtual native enhancement (VNE) data, as ways of data augmentation and additional validation.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g002.tif"/>
</fig>
</sec>
<sec id="s2c"><label>2.3.</label><title>Quality control-driven ensemble framework</title>
<p>A quality control-driven ensemble framework (<xref ref-type="bibr" rid="B19">19</xref>) (<xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>) was developed to enhance the accuracy and reliability of the segmentation process by leveraging the strengths of multiple convolutional neural networks. This framework utilised various U-Nets (<xref ref-type="bibr" rid="B30">30</xref>) with different depths to create a diverse set of candidate segmentations. These segmentations were then combined using statistical rank filters in a pixel-wise fashion (<xref ref-type="bibr" rid="B34">34</xref>), further expanding the pool of segmentation candidates and improving robustness.</p>
<fig id="F3" position="float"><label>Figure 3</label>
<caption><p>Illustrative quality control-driven ensemble framework depicted with 3 (out of 6) U-Nets. (<bold>A</bold>) A late gadolinium enhancement (LGE) image is processed by (<bold>B</bold>) an ensemble of independent U-Net segmentation models to produce (<bold>C</bold>) single candidate segmentations (SCSs). (<bold>D</bold>) The SCSs are then combined via a pixel-wise label voting scheme to derive combined candidate segmentations (CCSs). (<bold>E</bold>) An association matrix of Dice Similarity Coefficients (DSC) is generated upon the agreement between SCSs and CCSs. The inter-candidate DSCs are supplied to the (<bold>F</bold>) linear regressors (LR), and (<bold>G</bold>) each model outputs the predicted the DSC, in absence of ground truth (GT); finally, (<bold>H</bold>) the model with the highest predicted DSC and its corresponding automated segmentation output are selected on-the-fly.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g003.tif"/>
</fig>
<p>In the ensemble framework, six U-Nets (<xref ref-type="bibr" rid="B30">30</xref>), with depths ranging from 1 to 6 levels, were employed. Each U-Net consisted of an encoder and a decoder. The encoder had convolutional layers followed by dropout layers (<xref ref-type="bibr" rid="B35">35</xref>) for regularisation, with increasing dropout rate with each layer to mitigate overfitting. Post-convolution and dropout, a max pooling operation was applied. The decoder mirrored the encoder but used transposed convolutional layers for upscaling. It also utilised skip connections, coupling outputs from the decoder with corresponding encoder layers. The final layer underwent additional convolutions and a softmax activation to produce the final segmentation. The overall process, which was repeated for each depth, allowed the generation of diverse candidate segmentations, contributing to the ensemble&#x2019;s performance.</p>
<p>The automatic quality scoring mechanism at the core of the framework predicted the Dice Similarity Coefficient (DSC) for each candidate segmentation by exploiting their differences. It calculated the pairwise agreement, or inter-segmentation DSC matrix, between segmentations, capturing the overlap and divergence between different candidates. These DSC matrices were then fed into separate linear regression models for each candidate, with the target being the DSC between the candidate and the ground truth.</p>
<p>For each input image, the framework assigned a predicted DSC, with respect to the ground truth segmentation, to every candidate segmentation, both single and combined. The final segmentation was selected by identifying the candidate with the highest predicted DSC, indicating the most accurate and reliable result. This selection process was performed automatically by the framework, without any manual intervention. This approach effectively emulated a multidisciplinary clinical team, where the consistency among multiple expert opinions served as a marker for the best approach in managing complex cases. By incorporating this quality control-driven strategy, the ensemble framework aimed to improve overall segmentation performance and provide confidence metrics, particularly useful in clinical settings.</p>
</sec>
<sec id="s2d"><label>2.4.</label><title>Implementation</title>
<p>The data were augmented with the VNE technology using available co-located short-axis cines and ShMOLLI T1 maps, resulting in 3,541 VNE images. The development dataset was randomly partitioned into: (1) 85&#x0025; for the training dataset (4,092 LGE images and 2,917 VNE images from 1,158 patients); (2) 7.5&#x0025; for validation (309 LGE/VNE images from 102 patients); (3) 7.5&#x0025; for the test dataset (309 LGE/VNE images from 103 patients), per recommended guidelines (<xref ref-type="bibr" rid="B36">36</xref>). Image pixel values were scaled from 0 to 1 and zero-padded to <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM10"><mml:mn>256</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula>. For the segmentation models, the Adam method (<xref ref-type="bibr" rid="B37">37</xref>) was used for optimising the categorical cross-entropy loss, with a learning rate of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM11"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:msup><mml:mn>10</mml:mn><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>5</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula> for 200 epochs; an automated early stop was used to avoid overfitting, using the validation set. For the quality prediction models, a linear regressor for each candidate segmentation was fit with the inter-agreement between its corresponding candidate segmentation and the rest. These regressors were trained on the validation set to avoid autocorrelation with the training set. The models were trained and tested on TensorFlow (<xref ref-type="bibr" rid="B38">38</xref>) with an NVIDIA GeForce RTX 3090 GPU, taking approximately 11.5&#x2009;h.</p>
</sec>
<sec id="s2e"><label>2.5.</label><title>Evaluation</title>
<p>The performance of the QCD ensemble framework was evaluated for myocardial contours on both LGE and VNE test datasets, and across the main pathologies. The segmentation accuracy was assessed by DSC, comparing the agreement between the optimally selected mask and the ground-truth mask. The predicted segmentation accuracy was assessed in terms of the mean absolute error (MAE) and binary classification accuracy, with a DSC threshold at 0.7 (<xref ref-type="bibr" rid="B39">39</xref>). The former was used to measure the difference between the predicted DSC and the observed ground-truth DSC derived from the manual segmentation. The latter assessed whether the segmentations were classified into good (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM12"><mml:mo>&#x2265;</mml:mo></mml:math></inline-formula>0.7) or poor quality (&#x003C;0.7) to demonstrate the practical usage of the DSC prediction. In the evaluation, false positives occurred when the predicted DSC was <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM13"><mml:mo>&#x2265;</mml:mo></mml:math></inline-formula>0.7 but the real accuracy was &#x003C;0.7, while false negatives arose when the predicted DSC was &#x003C;0.7 despite the real accuracy being <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM14"><mml:mo>&#x2265;</mml:mo></mml:math></inline-formula>0.7. The threshold of 0.7 ensured a balance between sensitivity and specificity in the classification of good and bad quality segmentations. As evidenced in our prior works on aortic (<xref ref-type="bibr" rid="B19">19</xref>) and myocardial segmentation (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B40">40</xref>), this threshold provided a standard measure of segmentation quality across different studies. In this context, a DSC of 0.7 implied that 70&#x0025; of the segmentation correctly overlapped with the ground truth, which was considered to be an acceptable level of accuracy for our applications. A Wilcoxon signed-rank test was conducted using Python to determine if there was a statistically significant difference between the segmentation results obtained on LGE data and VNE data, paired when possible, and within pathology groups. <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM15"><mml:mi>P</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>0.05</mml:mn></mml:math></inline-formula> was considered significant. This analysis helped to ascertain the robustness of the model when segmenting both types of images and the potential benefits of incorporating VNE data into the training process.</p>
<sec id="s2e1"><label>2.5.1.</label><title>Comparative analysis</title>
<p>A comparative study was conducted to investigate the pipeline&#x2019;s main components performance and the impact of incorporating VNE data. Firstly, the performance of the deepest, top-performing employed U-Net (depth of 6 levels) and the QCD segmentation framework were assessed to highlight the benefit of a higher segmentation accuracy with a quality predictive capacity. Secondly, transversely, each experiment involved training with LGE data, VNE data, and both combined, to thoroughly evaluate the data augmentation capability of the GAN-generated VNE data. Thirdly, each experiment was also tested on LGE data, VNE data, and both combined, to exhibit the robustness of the proposed method. The segmentation accuracy and the quality prediction accuracy were assessed in all experiments, to compare the differences. Lastly, the extensive data augmentation techniques, proposed in the nnU-Net framework (<xref ref-type="bibr" rid="B41">41</xref>) were also implemented, to assess the added benefit of VNE data in the pipeline.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3.</label><title>Results</title>
<sec id="s3a"><label>3.1.</label><title>Segmentation and prediction accuracy</title>
<p>The scatter plots (<xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>) reflect the parity between the ground-truth DSC and the predicted DSC for the resultant framework output and for every candidate model output in the test set, being able to accurately predict from underperforming models to highperforming models. The QCD framework successfully and rapidly segmented the LV myocardium on LGE and VNE images. The QCD framework demonstrated robust segmentation performance on both LGE and VNE test datasets, with similar mean DSC (LGE: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM16"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.075</mml:mn></mml:math></inline-formula>; VNE: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM17"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.071</mml:mn></mml:math></inline-formula>; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM18"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mi>s</mml:mi></mml:math></inline-formula>). The QCD framework also exhibited robust segmentation performance across the main pathologies (hypertrophic cardiomyopathy: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM19"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.069</mml:mn></mml:math></inline-formula>; MI: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM20"><mml:mn>0.844</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.085</mml:mn></mml:math></inline-formula>; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM21"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mi>s</mml:mi></mml:math></inline-formula>). The mean absolute error (MAE) for the predicted DSC was low at <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM22"><mml:mn>0.043</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.043</mml:mn></mml:math></inline-formula>, demonstrating the accuracy of the quality control-driven strategy in predicting the segmentation quality. Moreover, using the DSC threshold of 0.7, the binary classification accuracy was high at 0.951, further emphasising the practical usefulness of the proposed ensemble framework in clinical settings. <xref ref-type="fig" rid="F5">Figures&#x00A0;5</xref> and <xref ref-type="fig" rid="F6">6</xref> show representative test cases of the QCD framework on LGE and VNE images, respectively, for true positive, true negative, false positive and false negative cases.</p>
<fig id="F4" position="float"><label>Figure 4</label>
<caption><p>Scatter plots of the observed ground-truth Dice Similarity Coefficient (DSC) (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM23"><mml:mi>x</mml:mi></mml:math></inline-formula>-axis) versus the predicted DSC (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM24"><mml:mi>y</mml:mi></mml:math></inline-formula>-axis) for myocardial contours in (<bold>A</bold>,<bold>B</bold>) late gadolinium enhancement (LGE) and (<bold>C</bold>,<bold>D</bold>) virtual native enhancement (VNE) images for the optimal candidates (<bold>A</bold>,<bold>C</bold>) and for all single (SCS) and combined segmentation (CCS) models (<bold>B</bold>,<bold>D</bold>). The shown overall binary classification accuracy is measured as the proportion of true results (true positive (TP) or true negative (TN)&#x2014;light blue background), in a population of both true and negative results (false positive (FP) or false negative (FN)&#x2014;grey background), with a binary threshold of DSC <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM25"><mml:mo>&#x2265;</mml:mo></mml:math></inline-formula>0.7.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g004.tif"/>
</fig>
<fig id="F5" position="float"><label>Figure 5</label>
<caption><p>Examples of true positive (93.9&#x0025;), true negative (1.9&#x0025;), false positive (2.3&#x0025;) and false negative (1.9&#x0025;) for predicted quality-controlled (QC) segmentations in late gadolinium enhanced (LGE) images. The left ventricular myocardium is manually segmented in red and automatically segmented in green, from different single (SCS) and combined candidate segmentation (CCS) models. The corresponding observed ground-truth (GT) Dice Similarity Coefficient (DSC) and predicted DSC are provided at the bottom.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g005.tif"/>
</fig>
<fig id="F6" position="float"><label>Figure 6</label>
<caption><p>Examples of true positive (94.2&#x0025;), true negative (0.3&#x0025;), false positive (5.2&#x0025;) and false negative (0.3&#x0025;) for predicted quality-controlled (QC) segmentations in virtual native enhancement (VNE) images. The left ventricular myocardium is manually segmented in red and automatically segmented in green, from different single (SCS) and combined candidate segmentation (CCS) models. The corresponding observed ground-truth (GT) Dice Similarity Coefficient (DSC) and predicted DSC are provided at the bottom.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fcvm-10-1213290-g006.tif"/>
</fig>
</sec>
<sec id="s3b"><label>3.2.</label><title>Comparative analysis</title>
<p>The comparative analysis (<xref ref-type="table" rid="T1">Table&#x00A0;1</xref>) highlights the contribution of the GAN-generated VNE as data augmentation and the QCD segmentation framework as an automated quality control mechanism. Firstly, the individual performance of the deepest U-Net was on par with the ensemble performance of the QCD segmentation framework, which was also able to estimate the quality of the resultant segmentation. When analysing the results using all datasets for training and testing, the deepest U-Net achieved a segmentation accuracy of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM26"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.070</mml:mn></mml:math></inline-formula>, similar to the segmentation accuracy of the QCD segmentation framework. Secondly, the performance of the deepest U-Net, trained on only LGE data, completely generalised to the VNE test set, with a DSC of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM27"><mml:mn>0.836</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.082</mml:mn></mml:math></inline-formula> and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM28"><mml:mn>0.838</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.075</mml:mn></mml:math></inline-formula> for the LGE and VNE test sets, respectively. Similarly, the deepest U-Net, trained on only VNE data, with 30&#x0025; less data, also generalised to the LGE test set, with a DSC of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM29"><mml:mn>0.791</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.119</mml:mn></mml:math></inline-formula> and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM30"><mml:mn>0.824</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.084</mml:mn></mml:math></inline-formula> for the LGE and VNE test sets, respectively. These findings, similar to the ones with the ensemble framework, effectively support the resemblance of VNE to LGE images, and validates the data augmentation approach for virtually yielding the same performance as its counterpart.</p>
<table-wrap id="T1" position="float"><label>Table 1</label>
<caption><p>Comparative analysis of individual models (U-Net with depth of 6 levels) and quality control-driven (QCD) segmentation framework.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="center" rowspan="2" colspan="2">Models and traning data</th>
<th valign="top" align="center" colspan="9">Test data</th>
</tr>
<tr>
<th valign="top" align="center" colspan="3">LGE (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM31"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>309</mml:mn></mml:math></inline-formula>)</th>
<th valign="top" align="center" colspan="3">VNE (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM32"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>309</mml:mn></mml:math></inline-formula>)</th>
<th valign="top" align="center" colspan="3">LGE + VNE (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM33"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>618</mml:mn></mml:math></inline-formula>)</th>
</tr>
<tr>
<th valign="top" align="left">Models</th>
<th valign="top" align="center">Training set</th>
<th valign="top" align="center">DSC</th>
<th valign="top" align="center">MAE</th>
<th valign="top" align="center">Acc.</th>
<th valign="top" align="center">DSC</th>
<th valign="top" align="center">MAE</th>
<th valign="top" align="center">Acc.</th>
<th valign="top" align="center">DSC</th>
<th valign="top" align="center">MAE</th>
<th valign="top" align="center">Acc.</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">U-Net</td>
<td valign="top" align="center">LGE</td>
<td valign="top" align="center">0.836</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">0.838</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">0.837</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
</tr>
<tr>
<td valign="top" align="center">U-Net</td>
<td valign="top" align="center">VNE</td>
<td valign="top" align="center">0.791</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">0.824</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">0.807</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
</tr>
<tr>
<td valign="top" align="center">U-Net</td>
<td valign="top" align="center">LGE + VNE</td>
<td valign="top" align="center">0.844</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center"><bold>0.846</bold></td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">0.845</td>
<td valign="top" align="center">&#x2014;</td>
<td valign="top" align="center">&#x2014;</td>
</tr>
<tr>
<td valign="top" align="center">QCD</td>
<td valign="top" align="center">LGE</td>
<td valign="top" align="center">0.835</td>
<td valign="top" align="center"><bold>0.042</bold></td>
<td valign="top" align="center"><bold>0.971</bold></td>
<td valign="top" align="center">0.838</td>
<td valign="top" align="center">0.046</td>
<td valign="top" align="center">0.922</td>
<td valign="top" align="center">0.837</td>
<td valign="top" align="center">0.044</td>
<td valign="top" align="center">0.947</td>
</tr>
<tr>
<td valign="top" align="center">QCD</td>
<td valign="top" align="center">VNE</td>
<td valign="top" align="center">0.799</td>
<td valign="top" align="center">0.057</td>
<td valign="top" align="center">0.922</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center"><bold>0.041</bold></td>
<td valign="top" align="center"><bold>0.958</bold></td>
<td valign="top" align="center">0.816</td>
<td valign="top" align="center">0.049</td>
<td valign="top" align="center">0.940</td>
</tr>
<tr>
<td valign="top" align="center"><bold>QCD</bold></td>
<td valign="top" align="center"><bold>LGE + VNE</bold></td>
<td valign="top" align="center"><bold>0.845</bold></td>
<td valign="top" align="center">0.042</td>
<td valign="top" align="center">0.958</td>
<td valign="top" align="center">0.845</td>
<td valign="top" align="center">0.043</td>
<td valign="top" align="center">0.945</td>
<td valign="top" align="center"><bold>0.845</bold></td>
<td valign="top" align="center"><bold>0.043</bold></td>
<td valign="top" align="center"><bold>0.951</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="table-fn1"><p>The models were trained on late gadolinium enhancement (LGE; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM34"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>4,092</mml:mn></mml:math></inline-formula>) and/or virtual native enhancement (VNE; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM35"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>2,917</mml:mn></mml:math></inline-formula>) data and tested on LGE (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM36"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>309</mml:mn></mml:math></inline-formula>) and/or VNE (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM37"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mn>309</mml:mn></mml:math></inline-formula>) data, evaluated by their segmentation performance with the mean Dice Similarity Coefficient (DSC) and the quality predictive capacity with the mean absolute error (MAE) and the binary classification accuracy (Acc.). The best results are highlighted in bold.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Thirdly, including the GAN-generated data consistently improved the performance in both experiments of the individual model and the QCD segmentation framework, in every experiment. For instance, the QCD segmentation framework trained on only LGE data yielded a mean DSC of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM38"><mml:mn>0.835</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.082</mml:mn></mml:math></inline-formula> and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM39"><mml:mn>0.838</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.080</mml:mn></mml:math></inline-formula> for the LGE and VNE test sets, respectively; whereas the QCD segmentation framework trained on both LGE and VNE data yielded a higher segmentation performance of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM40"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.075</mml:mn></mml:math></inline-formula> and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM41"><mml:mn>0.845</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.071</mml:mn></mml:math></inline-formula>, indicating the benefits of including VNE data in the training process for the segmentation accuracy. The quality predictive capacity also improved when trained and tested in both datasets. Lastly, the individual segmentation performance of the model trained with only LGE data including extensive data augmentation (<xref ref-type="bibr" rid="B41">41</xref>) (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM42"><mml:mn>0.846</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.072</mml:mn></mml:math></inline-formula>) was also surpassed by the same framework when the VNE data were added (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM43"><mml:mn>0.851</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.068</mml:mn></mml:math></inline-formula>). Overall, no significant differences were observed when comparing LGE and VNE test sets between the experiments.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4.</label><title>Discussion</title>
<p>In this study, we demonstrated that the proposed framework, leveraging GAN-generated VNE data and incorporating an automated quality control mechanism, significantly improved the accuracy and reliability of LGE segmentation. The comparative analysis demonstrated the benefits of using VNE data, with generally better image quality and more consistency (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>), in the training process and the effectiveness of the ensemble framework in enhancing segmentation performance. Moreover, the proposed framework revealed robustness in both LGE and VNE data. This represents an accountable pipeline for automated segmentation in the gold-standard and emerging contrast-agent-free modalities, paving the way for faster and reliable diagnosis of myocardial damage.</p>
<p>Data scarcity or lack of access remains a persistent challenge in developing robust and reliable deep learning models for medical image segmentation, particularly in the context of LGE segmentation. This limitation stems from the high costs and ethical considerations associated with acquiring, labelling, and sharing large-scale annotated datasets, which often result in insufficient representation of diverse and rare pathological cases (<xref ref-type="bibr" rid="B9">9</xref>). As a consequence, models may underperform or fail to generalise well to unseen cases (<xref ref-type="bibr" rid="B10">10</xref>), hampering their clinical utility. The application of generating VNE images (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>) assured a data augmentation technique with more reliable image contrast. This significantly improves over prior methods of using synthetic LGE images (<xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B43">43</xref>), which had not been clinically-validated and were not designed for displaying LGE lesion signals. Addressing data scarcity through the generation of VNE images, as demonstrated in this study, can alleviate this issue by augmenting the already-available LGE data, or potentially substituting the data, leading to a more robust and reliable model that can better handle the complexity of clinical cases and ultimately improve patient care.</p>
<p>The presented work focused on the integration of an automated quality assurance framework, which was developed with a traditional encoder-decoder U-Net architecture (<xref ref-type="bibr" rid="B30">30</xref>) with different depths. The quality control-driven strategy provided reliable quality predictions, crucial for clinical decision-making. The adapted regression-based quality prediction scheme enables further exploitation of the diversity of different candidates, with a deterministic approach of assessing the agreement between candidates, proven to be more effective than the emerging Monte Carlo-based quality assurance scheme (<xref ref-type="bibr" rid="B40">40</xref>). Newer network architectures and advanced pre-processing schemes could be incorporated to increase such diversity, ranging from existing LGE segmentation approaches (<xref ref-type="bibr" rid="B44">44</xref>) to spatial transformation-based pre-processing (<xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B46">46</xref>). Future work will cover dataset extension, different candidate models, and scar burden evaluation.</p>
<p>The clinical implications of the proposed QCD ensemble framework are substantial, as it introduces an automated quality control mechanism for the first time in automated LGE segmentation, improving both accuracy and reliability. Moreover, the quality control-driven strategy allows for the identification and refinement of suboptimal segmentations, ensuring the system&#x2019;s efficiency and trustworthiness, paving the way for increased adoption in clinical workflows. This enhancement streamlines the diagnostic process, reduces contouring variability, and bolsters clinician confidence in automated segmentation results, potentially leading to easier scar burden quantification on a routine basis, better-informed treatment decisions and improved patient outcomes.</p>
<p>This work has some limitations. The main goal has been the fundamental task of the myocardial delineation, thus avoiding engaging the generalisation into the known difficulties of scar tissue quantification (<xref ref-type="bibr" rid="B47">47</xref>). In particular the diffuse, less structured and scattered lesions require lower segmentation thresholds, and the targets are subject to many methodological choices and biases in ground truth data (<xref ref-type="bibr" rid="B48">48</xref>, <xref ref-type="bibr" rid="B49">49</xref>). While our model was evaluated on an extensive international database, enabling potential generalisation to various conditions requiring LGEs, the findings have been primarily concentrated on patients with hypertrophic cardiomyopathy and MI, suggesting the need for future validation across a wider spectrum of pathologies. Lastly, our selection of the DSC threshold of 0.7 for discerning between acceptable and unacceptable segmentations, despite being effective in our prior work (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B40">40</xref>), might not address all the geometric properties of cardiac structures (<xref ref-type="bibr" rid="B10">10</xref>). We expect that proposed methods would generalise well to any single particular threshold or application, yet further research is needed to explore the available choices for the clinical routine use beyond the scope of this work.</p>
</sec>
<sec id="s5" sec-type="conclusions"><label>5.</label><title>Conclusion</title>
<p>In conclusion, our study presents a novel approach for automated LGE segmentation that overcomes the challenges of limited training data and lack of quality control, for clinical applications. By leveraging the power of GAN-generated VNE images and incorporating an automated quality control mechanism, we demonstrate the potential for improved automated segmentation performance and reliability. This framework could be seamlessly integrated into clinical studies, providing an efficient, quality-controlled and reliable tool for clinicians in diagnosing and managing patients with myocardial damage.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The data analysed in this study is subject to the following licenses/restrictions: HCMR, OCMR and OxAMI studies can be approached independently for data sharing upon reasonable request. De-identified clinical imaging data can be shared with interested parties, subject to ethical approval, consent, data-protection governance, data sharing agreements and funding available. Requests to access these datasets should be directed to HCMR, <ext-link ext-link-type="uri" xlink:href="https://hcmregistry.org">https://hcmregistry.org</ext-link>; OCMR, <ext-link ext-link-type="uri" xlink:href="https://www.rdm.ox.ac.uk">https://www.rdm.ox.ac.uk</ext-link>; OxAMI, <ext-link ext-link-type="uri" xlink:href="https://oxami.org.uk">https://oxami.org.uk</ext-link>.</p>
</sec>
<sec id="s7" sec-type="ethics-statement"><title>Ethics statement</title>
<p>Ethical approval was not required for the studies involving humans because the data used were retrospectively obtained from subjects of previously ethically-approved research studies. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s8" sec-type="author-contributions"><title>Author contributions</title>
<p>RG designed the study and drafted the manuscript. DI and QZ curated the database. RG and DI conducted the experiments. EH and IP assisted in the design of the algorithm. MB, YL and I&#x00307;A performed manual segmentation. WW, RK, CK, SN, HCMR and OxAMI study investigators acquired the original MRI datasets. VF, QZ and SP provided research guidance and conceived the study. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s9" sec-type="funding-information"><title>Funding</title>
<p>RG acknowledges support for his DPhil studies from the Clarendon Fund, the Balliol College and the Radcliffe Department of Medicine, University of Oxford. DI is funded by Artificio Inc. and supported by the Research Experience for Peruvian Undergraduates (REPU) Program. IP, MB, VF, and SP acknowledge support from the National Institute for Health Research (NIHR) Oxford Biomedical Research Centre at The Oxford University Hospitals NHS Foundation Trust. VF acknowledges support from the BHF (CH/16/1/32013). MB is supported by a British Heart Foundation (BHF) Clinical Research Training Fellowship (FS/19/65/34692). MB, VF, QZ and SP acknowledge support from the Oxford BHF Centre of Research Excellence (RE/18/3/34214). VF, QZ and SP acknowledge the John Fell Oxford University Press Research Fund. The authors thank Hypertrophic Cardiomyopathy Registry (HCMR) Investigators for the imaging and available ground truth data, which was collected under BHF (project grant PPG/15/71/31731), the National Heart, Lung, and Blood Institute (grant U01HL117006-01A1). The authors also thank the Oxford Acute Myocardial Infarction (OxAMI) study, supported by the British Heart Foundation (CH/16/1/32013) and BHF Centre of Research Excellence, Oxford (grant RG/13/1/30181).</p>
</sec>
<sec id="s10" sec-type="COI-statement"><title>Conflict of interest</title>
<p>VF, QZ, and SP have authorship rights for patent WO2021/044153 (&#x201C;Enhancement of Medical Images&#x201D;; granted March 11, 2021). EH, IP, VF, QZ and SP have authorship rights for patent WO/2020/161481 (&#x201C;Method and Apparatus for Quality Prediction&#x201D;; granted August 13, 2020). SP has patent authorship rights for US patent US20120078084A1 (&#x201C;Systems and Methods for Shortened Look Locker Inversion Recovery [Sh-MOLLI] Cardiac Gated Mapping of T1&#x201D;; granted March 15, 2016). DI was employed by Artificio Inc.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s111" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fcvm.2023.1213290/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fcvm.2023.1213290/full#supplementary-material</ext-link>.</p>
<supplementary-material id="SD1" content-type="local-data">
<media mimetype="application" mime-subtype="pdf" xlink:href="Datasheet1.pdf"/>
</supplementary-material>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>RJ</given-names></name><name><surname>Wu</surname><given-names>E</given-names></name><name><surname>Rafael</surname><given-names>A</given-names></name><name><surname>Chen</surname><given-names>EL</given-names></name><name><surname>Parker</surname><given-names>MA</given-names></name><name><surname>Simonetti</surname><given-names>O</given-names></name></person-group>, et al. <article-title>The use of contrast-enhanced magnetic resonance imaging to identify reversible myocardial dysfunction</article-title>. <source>N Engl J Med</source>. (<year>2000</year>) <volume>343</volume>:<fpage>1445</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1056/NEJM200011163432003</pub-id><pub-id pub-id-type="pmid">11078769</pub-id></citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sebastian</surname><given-names>K</given-names></name><name><surname>Roes</surname><given-names>SD</given-names></name><name><surname>Klein</surname><given-names>C</given-names></name><name><surname>Kokocinski</surname><given-names>T</given-names></name><name><surname>de Roos</surname><given-names>A</given-names></name><name><surname>Eckart</surname><given-names>F</given-names></name></person-group>, et al. <article-title>Prognostic value of myocardial infarct size, contractile reserve using magnetic resonance imaging</article-title>. <source>J Am Coll Cardiol</source>. (<year>2009</year>) <volume>54</volume>:<fpage>1770</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1016/j.jacc.2009.07.027</pub-id><pub-id pub-id-type="pmid">19874990</pub-id></citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roes</surname><given-names>SD</given-names></name><name><surname>Kelle</surname><given-names>S</given-names></name><name><surname>Kaandorp</surname><given-names>TA</given-names></name><name><surname>Kokocinski</surname><given-names>T</given-names></name><name><surname>Poldermans</surname><given-names>D</given-names></name><name><surname>Lamb</surname><given-names>HJ</given-names></name></person-group>, et al. <article-title>Comparison of myocardial infarct size assessed with contrast-enhanced magnetic resonance imaging, left ventricular function and volumes to predict mortality in patients with healed myocardial infarction</article-title>. <source>Am J Cardiol</source>. (<year>2007</year>) <volume>100</volume>:<fpage>930</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.amjcard.2007.04.029</pub-id><pub-id pub-id-type="pmid">17826372</pub-id></citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Weng</surname><given-names>Z</given-names></name><name><surname>Yao</surname><given-names>J</given-names></name><name><surname>Chan</surname><given-names>RH</given-names></name><name><surname>He</surname><given-names>J</given-names></name><name><surname>Yang</surname><given-names>X</given-names></name><name><surname>Zhou</surname><given-names>Y</given-names></name></person-group>, et al. <article-title>Prognostic value of LGE-CMR in HCM: a meta-analysis</article-title>. <source>JACC: Cardiovasc Imaging</source>. (<year>2016</year>) <volume>9</volume>:<fpage>1392</fpage>&#x2013;<lpage>402</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2016.02.031</pub-id><pub-id pub-id-type="pmid">27450876</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname><given-names>A</given-names></name><name><surname>Chen</surname><given-names>W</given-names></name><name><surname>Patel</surname><given-names>HN</given-names></name><name><surname>Alvi</surname><given-names>N</given-names></name><name><surname>Kawaji</surname><given-names>K</given-names></name><name><surname>Besser</surname><given-names>SA</given-names></name></person-group>, et al. <article-title>Impact of wideband late gadolinium enhancement cardiac magnetic resonance imaging on device-related artifacts in different implantable cardioverter-defibrillator types</article-title>. <source>J Magn Reson Imaging</source>. (<year>2021</year>) <volume>54</volume>:<fpage>1257</fpage>&#x2013;<lpage>65</lpage>. <pub-id pub-id-type="doi">10.1002/jmri.27608</pub-id><pub-id pub-id-type="pmid">33742522</pub-id></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Karim</surname><given-names>R</given-names></name><name><surname>Bhagirath</surname><given-names>P</given-names></name><name><surname>Claus</surname><given-names>P</given-names></name><name><surname>James Housden</surname><given-names>R</given-names></name><name><surname>Chen</surname><given-names>Z</given-names></name><name><surname>Karimaghaloo</surname><given-names>Z</given-names></name></person-group>, et al. <article-title>Evaluation of state-of-the-art segmentation algorithms for left ventricle infarct from late gadolinium enhancement MR images</article-title>. <source>Med Image Anal</source>. (<year>2016</year>) <volume>30</volume>:<fpage>95</fpage>&#x2013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2016.01.004</pub-id><pub-id pub-id-type="pmid">26891066</pub-id></citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhuang</surname><given-names>X</given-names></name><name><surname>Xu</surname><given-names>J</given-names></name><name><surname>Luo</surname><given-names>X</given-names></name><name><surname>Chen</surname><given-names>C</given-names></name><name><surname>Ouyang</surname><given-names>C</given-names></name><name><surname>Rueckert</surname><given-names>D</given-names></name></person-group>, et al. <article-title>Cardiac segmentation on late gadolinium enhancement MRI: a benchmark study from multi-sequence cardiac MR segmentation challenge</article-title>. <source>Med Image Anal</source>. (<year>2022</year>) <volume>81</volume>:<fpage>102528</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2022.102528</pub-id><pub-id pub-id-type="pmid">35834896</pub-id></citation></ref>
<ref id="B8"><label>8.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Minaee</surname><given-names>S</given-names></name><name><surname>Boykov</surname><given-names>Y</given-names></name><name><surname>Porikli</surname><given-names>F</given-names></name><name><surname>Plaza</surname><given-names>A</given-names></name><name><surname>Kehtarnavaz</surname><given-names>N</given-names></name><name><surname>Terzopoulos</surname><given-names>D</given-names></name></person-group>. <article-title>Image segmentation using deep learning: a survey</article-title>. <source>IEEE Trans Pattern Anal Mach Intell</source>. (<year>2022</year>) <volume>44</volume>:<fpage>3523</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2021.3059968</pub-id><pub-id pub-id-type="pmid">33596172</pub-id></citation></ref>
<ref id="B9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Willemink</surname><given-names>MJ</given-names></name><name><surname>Koszek</surname><given-names>WA</given-names></name><name><surname>Hardell</surname><given-names>C</given-names></name><name><surname>Wu</surname><given-names>J</given-names></name><name><surname>Fleischmann</surname><given-names>D</given-names></name><name><surname>Harvey</surname><given-names>H</given-names></name></person-group>, et al. <article-title>Preparing medical imaging data for machine learning</article-title>. <source>Radiology</source>. (<year>2020</year>) <volume>295</volume>:<fpage>4</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020192224</pub-id><pub-id pub-id-type="pmid">32068507</pub-id></citation></ref>
<ref id="B10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bernard</surname><given-names>O</given-names></name><name><surname>Lalande</surname><given-names>A</given-names></name><name><surname>Zotti</surname><given-names>C</given-names></name><name><surname>Cervenansky</surname><given-names>F</given-names></name><name><surname>Yang</surname><given-names>X</given-names></name><name><surname>Heng</surname><given-names>PA</given-names></name></person-group>, et al. <article-title>Deep learning techniques for automatic MRI cardiac multi-structures segmentation and diagnosis: is the problem solved?</article-title> <source>IEEE Trans Med Imaging</source>. (<year>2018</year>) <volume>37</volume>:<fpage>2514</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2018.2837502</pub-id><pub-id pub-id-type="pmid">29994302</pub-id></citation></ref>
<ref id="B11"><label>11.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Galati</surname><given-names>F</given-names></name><name><surname>Ourselin</surname><given-names>S</given-names></name><name><surname>Zuluaga</surname><given-names>MA</given-names></name></person-group>. <article-title>From accuracy to reliability and robustness in cardiac magnetic resonance image segmentation: a review</article-title>. <source>Appl Sci</source>. (<year>2022</year>) <volume>12</volume>:<fpage>3936</fpage>. <pub-id pub-id-type="doi">10.3390/app12083936</pub-id></citation></ref>
<ref id="B12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tajbakhsh</surname><given-names>N</given-names></name><name><surname>Jeyaseelan</surname><given-names>L</given-names></name><name><surname>Li</surname><given-names>Q</given-names></name><name><surname>Chiang</surname><given-names>JN</given-names></name><name><surname>Wu</surname><given-names>Z</given-names></name><name><surname>Ding</surname><given-names>X</given-names></name></person-group>. <article-title>Embracing imperfect datasets: a review of deep learning solutions for medical image segmentation</article-title>. <source>Med Image Anal</source>. (<year>2020</year>) <volume>63</volume>:<fpage>101693</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2020.101693</pub-id><pub-id pub-id-type="pmid">32289663</pub-id></citation></ref>
<ref id="B13"><label>13.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garcea</surname><given-names>F</given-names></name><name><surname>Serra</surname><given-names>A</given-names></name><name><surname>Lamberti</surname><given-names>F</given-names></name><name><surname>Morra</surname><given-names>L</given-names></name></person-group>. <article-title>Data augmentation for medical imaging: a systematic literature review</article-title>. <source>Comput Biol Med</source>. (<year>2023</year>) <volume>152</volume>:<fpage>106391</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106391</pub-id><pub-id pub-id-type="pmid">36549032</pub-id></citation></ref>
<ref id="B14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Al Khalil</surname><given-names>Y</given-names></name><name><surname>Amirrajab</surname><given-names>S</given-names></name><name><surname>Lorenz</surname><given-names>C</given-names></name><name><surname>Weese</surname><given-names>J</given-names></name><name><surname>Pluim</surname><given-names>J</given-names></name><name><surname>Breeuwer</surname><given-names>M</given-names></name></person-group>. <article-title>On the usability of synthetic data for improving the robustness of deep learning-based segmentation of cardiac magnetic resonance images</article-title>. <source>Med Image Anal</source>. (<year>2023</year>) <volume>84</volume>:<fpage>102688</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2022.102688</pub-id><pub-id pub-id-type="pmid">36493702</pub-id></citation></ref>
<ref id="B15"><label>15.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>RJ</given-names></name><name><surname>Lu</surname><given-names>MY</given-names></name><name><surname>Chen</surname><given-names>TY</given-names></name><name><surname>Williamson</surname><given-names>DFK</given-names></name><name><surname>Mahmood</surname><given-names>F</given-names></name></person-group>. <article-title>Synthetic data in machine learning for medicine and healthcare</article-title>. <source>Nat Biomed Eng</source>. (<year>2021</year>) <volume>5</volume>:<fpage>493</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1038/s41551-021-00751-8</pub-id><pub-id pub-id-type="pmid">34131324</pub-id></citation></ref>
<ref id="B16"><label>16.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alb&#x00E0;</surname><given-names>X</given-names></name><name><surname>Lekadir</surname><given-names>K</given-names></name><name><surname>Perea&#x00F1;ez</surname><given-names>M</given-names></name><name><surname>Medrano-Gracia</surname><given-names>P</given-names></name><name><surname>Young</surname><given-names>AA</given-names></name><name><surname>Frangi</surname><given-names>AF</given-names></name></person-group>. <article-title>Automatic initialization and quality control of large-scale cardiac MRI segmentations</article-title>. <source>Med Image Anal</source>. (<year>2018</year>) <volume>43</volume>:<fpage>129</fpage>&#x2013;<lpage>41</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.10.001</pub-id></citation></ref>
<ref id="B17"><label>17.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sander</surname><given-names>J</given-names></name><name><surname>de Vos</surname><given-names>BD</given-names></name><name><surname>I&#x0161;gum</surname><given-names>I</given-names></name></person-group>. <article-title>Automatic segmentation with detection of local segmentation failures in cardiac MRI</article-title>. <source>Sci Rep</source>. (<year>2020</year>) <volume>10</volume>:<fpage>21769</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-77733-4</pub-id><pub-id pub-id-type="pmid">33303782</pub-id></citation></ref>
<ref id="B18"><label>18.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Valindria</surname><given-names>VV</given-names></name><name><surname>Lavdas</surname><given-names>I</given-names></name><name><surname>Bai</surname><given-names>W</given-names></name><name><surname>Kamnitsas</surname><given-names>K</given-names></name><name><surname>Aboagye</surname><given-names>EO</given-names></name><name><surname>Rockall</surname><given-names>AG</given-names></name></person-group>, et al. <article-title>Reverse classification accuracy: predicting segmentation performance in the absence of ground truth</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2017</year>) <volume>36</volume>:<fpage>1597</fpage>&#x2013;<lpage>606</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2017.2665165</pub-id><pub-id pub-id-type="pmid">28436849</pub-id></citation></ref>
<ref id="B19"><label>19.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Hann</surname><given-names>E</given-names></name><name><surname>Biasiolli</surname><given-names>L</given-names></name><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Werys</surname><given-names>K</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name></person-group>, et al. <comment>Quality control-driven image segmentation towards reliable automatic image analysis in large-scale cardiovascular magnetic resonance aortic cine imaging. In: Shen D, Liu T, Peters TM, Staib LH, Essert C, Zhou S, et al., editors. <italic>Medical Image Computing and Computer Assisted &#x2013; MICCAI 2019</italic>. Cham: Springer International Publishing (2019). p. 750&#x2013;8. Available from: https://doi.org/10.1007/978-3-030-32245-8&#x005F;83</comment>.</citation></ref>
<ref id="B20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hann</surname><given-names>E</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Barut&#x00E7;u</surname><given-names>A</given-names></name><name><surname>Neubauer</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Deep neural network ensemble for on-the-fly quality control-driven segmentation of cardiac MRI T1 mapping</article-title>. <source>Med Image Anal</source>. (<year>2021</year>) <volume>71</volume>:<fpage>102029</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2021.102029</pub-id><pub-id pub-id-type="pmid">33831594</pub-id></citation></ref>
<ref id="B21"><label>21.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ruijsink</surname><given-names>B</given-names></name><name><surname>Puyol-Ant&#x00F3;n</surname><given-names>E</given-names></name><name><surname>Oksuz</surname><given-names>I</given-names></name><name><surname>Sinclair</surname><given-names>M</given-names></name><name><surname>Bai</surname><given-names>W</given-names></name><name><surname>Schnabel</surname><given-names>JA</given-names></name></person-group>, et al. <article-title>Fully automated, quality-controlled cardiac analysis from CMR</article-title>. <source>JACC: Cardiovasc Imaging</source>. (<year>2020</year>) <volume>13</volume>:<fpage>684</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2019.05.030</pub-id><pub-id pub-id-type="pmid">31326477</pub-id></citation></ref>
<ref id="B22"><label>22.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Burrage</surname><given-names>MK</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Shanmuganathan</surname><given-names>M</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Nikolaidou</surname><given-names>C</given-names></name></person-group>, et al. <article-title>Toward replacing late gadolinium enhancement with artificial intelligence virtual native enhancement for gadolinium-free cardiovascular magnetic resonance tissue characterization in hypertrophic cardiomyopathy</article-title>. <source>Circulation</source>. (<year>2021</year>) <volume>144</volume>:<fpage>589</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1161/CIRCULATIONAHA.121.054432</pub-id><pub-id pub-id-type="pmid">34229451</pub-id></citation></ref>
<ref id="B23"><label>23.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Burrage</surname><given-names>MK</given-names></name><name><surname>Shanmuganathan</surname><given-names>M</given-names></name><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Thomas</surname><given-names>KE</given-names></name></person-group>, et al. <article-title>Artificial intelligence for contrast-free MRI: scar assessment in myocardial infarction using deep learning&#x2013;based virtual native enhancement</article-title>. <source>Circulation</source>. (<year>2022</year>) <volume>146</volume>:<fpage>1492</fpage>&#x2013;<lpage>503</lpage>. <pub-id pub-id-type="doi">10.1161/CIRCULATIONAHA.122.060137</pub-id><pub-id pub-id-type="pmid">36124774</pub-id></citation></ref>
<ref id="B24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kramer</surname><given-names>CM</given-names></name><name><surname>Appelbaum</surname><given-names>E</given-names></name><name><surname>Desai</surname><given-names>MY</given-names></name><name><surname>Desvigne-Nickens</surname><given-names>P</given-names></name><name><surname>DiMarco</surname><given-names>JP</given-names></name><name><surname>Friedrich</surname><given-names>MG</given-names></name></person-group>, et al. <article-title>Hypertrophic cardiomyopathy registry: the rationale and design of an international, observational study of hypertrophic cardiomyopathy</article-title>. <source>Am Heart J</source>. (<year>2015</year>) <volume>170</volume>:<fpage>223</fpage>&#x2013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.1016/j.ahj.2015.05.013</pub-id><pub-id pub-id-type="pmid">26299218</pub-id></citation></ref>
<ref id="B25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scarsini</surname><given-names>R</given-names></name><name><surname>Shanmuganathan</surname><given-names>M</given-names></name><name><surname>De Maria</surname><given-names>GL</given-names></name><name><surname>Borlotti</surname><given-names>A</given-names></name><name><surname>Kotronias</surname><given-names>RA</given-names></name><name><surname>Burrage</surname><given-names>MK</given-names></name></person-group>, et al. <article-title>Coronary microvascular dysfunction assessed by pressure wire and CMR after STEMI predicts long-term outcomes</article-title>. <source>JACC: Cardiovasc Imaging</source>. (<year>2021</year>) <volume>14</volume>:<fpage>1948</fpage>&#x2013;<lpage>59</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2021.02.023</pub-id><pub-id pub-id-type="pmid">33865789</pub-id></citation></ref>
<ref id="B26"><label>26.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Piechnik</surname><given-names>SK</given-names></name><name><surname>Ferreira</surname><given-names>VM</given-names></name><name><surname>Dall&#x2019;Armellina</surname><given-names>E</given-names></name><name><surname>Cochlin</surname><given-names>LE</given-names></name><name><surname>Greiser</surname><given-names>A</given-names></name><name><surname>Neubauer</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Shortened modified look-locker inversion recovery (ShMOLLI) for clinical myocardial T1-mapping at 1.5 and 3 T within a 9 heartbeat breathhold</article-title>. <source>J Cardiovasc Magn Reson</source>. (<year>2010</year>) <volume>12</volume>:<fpage>69</fpage>. <pub-id pub-id-type="doi">10.1186/1532-429X-12-69</pub-id><pub-id pub-id-type="pmid">21092095</pub-id></citation></ref>
<ref id="B27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Werys</surname><given-names>K</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Biasiolli</surname><given-names>L</given-names></name><name><surname>Ntusi</surname><given-names>NA</given-names></name><name><surname>Desai</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Quality assurance of quantitative cardiac T1-mapping in multicenter clinical trials&#x2014;a T1 phantom program from the hypertrophic cardiomyopathy registry (HCMR) study</article-title>. <source>Int J Cardiol</source>. (<year>2021</year>) <volume>330</volume>:<fpage>251</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijcard.2021.01.026</pub-id><pub-id pub-id-type="pmid">33535074</pub-id></citation></ref>
<ref id="B28"><label>28.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Carapella</surname><given-names>V</given-names></name><name><surname>Puchta</surname><given-names>H</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Marini</surname><given-names>C</given-names></name><name><surname>Werys</surname><given-names>K</given-names></name><name><surname>Neubauer</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Standardized image post-processing of cardiovascular magnetic resonance T1-mapping reduces variability and improves accuracy and consistency in myocardial tissue characterization</article-title>. <source>Int J Cardiol</source>. (<year>2020</year>) <volume>298</volume>:<fpage>128</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijcard.2019.08.058</pub-id><pub-id pub-id-type="pmid">31500864</pub-id></citation></ref>
<ref id="B29"><label>29.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Papie&#x017C;</surname><given-names>BW</given-names></name><name><surname>Werys</surname><given-names>K</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name></person-group>, et al. <article-title>MOCOnet: robust motion correction of cardiovascular magnetic resonance T1 mapping using convolutional neural networks</article-title>. <source>Front Cardiovasc Med</source>. (<year>2021</year>) <volume>8</volume>:<fpage>1</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.3389/fcvm.2021.768245</pub-id></citation></ref>
<ref id="B30"><label>30.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ronneberger</surname><given-names>O</given-names></name><name><surname>Fischer</surname><given-names>P</given-names></name><name><surname>Brox</surname><given-names>T</given-names></name></person-group>. <comment>U-Net: convolutional networks for biomedical image segmentation. In: Navab N, Hornegger J, Wells WM, Frangi AF, editors. <italic>Medical Image Computing, Computer-Assisted Intervention &#x2013; MICCAI 2015</italic>. Cham: Springer International Publishing (2015). p. 234&#x2013;41. Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-319-24574-4_28">https://doi.org/10.1007/978-3-319-24574-4&#x02D9;28</ext-link></comment>.</citation></ref>
<ref id="B31"><label>31.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Isola</surname><given-names>P</given-names></name><name><surname>Zhu</surname><given-names>JY</given-names></name><name><surname>Zhou</surname><given-names>T</given-names></name><name><surname>Efros</surname><given-names>AA</given-names></name></person-group>. <article-title>Image-to-image translation with conditional adversarial networks</article-title>. <source>2017 IEEE Conference on Computer Vision, Pattern Recognition (CVPR)</source>. <publisher-loc>Honolulu, HI, USA</publisher-loc>: <publisher-name>IEEE</publisher-name> (2017). p. 5967&#x2013;76. <comment>Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/CVPR.2017.632">https://doi.org/10.1109/CVPR.2017.632</ext-link></comment>.</citation></ref>
<ref id="B32"><label>32.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Simonyan</surname><given-names>K</given-names></name><name><surname>Zisserman</surname><given-names>A</given-names></name></person-group>. <comment>Very deep convolutional networks for large-scale image recognition. <italic>3rd International Conference on Learning Representations (ICLR 2015)</italic>. Computational, Biological Learning Society (2015)</comment></citation></ref>
<ref id="B33"><label>33.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Johnson</surname><given-names>J</given-names></name><name><surname>Alahi</surname><given-names>A</given-names></name><name><surname>Fei-Fei</surname><given-names>L</given-names></name></person-group>. <comment>Perceptual losses for real-time style transfer. In: Leibe B, Matas J, Sebe N, Welling M, editors. <italic>Computer Vision &#x2013; ECCV 2016</italic>. Cham: Springer International Publishing (2016). p. 694&#x2013;711. Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-319-46475-6_43">https://doi.org/10.1007/978-3-319-46475-6&#x02D9;43</ext-link></comment>.</citation></ref>
<ref id="B34"><label>34.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname><given-names>X</given-names></name><name><surname>Aldridge</surname><given-names>B</given-names></name><name><surname>Fisher</surname><given-names>R</given-names></name><name><surname>Rees</surname><given-names>J</given-names></name></person-group>. <article-title>Estimating the ground truth from multiple individual segmentations incorporating prior pattern analysis with application to skin lesion segmentation</article-title>. <source>2011 IEEE International Symposium on Biomedical Imaging: From Nano to Macro</source>. <publisher-loc>Chicago, IL, USA</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2011</year>). p. <fpage>1438</fpage>&#x2013;<lpage>41</lpage>. <comment>Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/ISBI.2011.5872670">https://doi.org/10.1109/ISBI.2011.5872670</ext-link></comment>.</citation></ref>
<ref id="B35"><label>35.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Srivastava</surname><given-names>N</given-names></name><name><surname>Hinton</surname><given-names>G</given-names></name><name><surname>Krizhevsky</surname><given-names>A</given-names></name><name><surname>Sutskever</surname><given-names>I</given-names></name><name><surname>Salakhutdinov</surname><given-names>R</given-names></name></person-group>. <article-title>Dropout: a simple way to prevent neural networks from overfitting</article-title>. <source>J Mach Learn Res</source>. (<year>2014</year>) <volume>15</volume>:<fpage>1929</fpage>&#x2013;<lpage>58</lpage>. <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/10.5555/2627435.2670313">https://dl.acm.org/doi/10.5555/2627435.2670313</ext-link></citation></ref>
<ref id="B36"><label>36.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Park</surname><given-names>SH</given-names></name><name><surname>Han</surname><given-names>K</given-names></name></person-group>. <article-title>Methodologic guide for evaluating clinical performance and effect of artificial intelligence technology for medical diagnosis and prediction</article-title>. <source>Radiology</source>. (<year>2018</year>) <volume>286</volume>:<fpage>800</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2017171920</pub-id><pub-id pub-id-type="pmid">29309734</pub-id></citation></ref>
<ref id="B37"><label>37.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kingma</surname><given-names>DP</given-names></name><name><surname>Ba</surname><given-names>JLA</given-names></name></person-group>. <article-title>A method for stochastic optimization</article-title>. <source>3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings</source>. <publisher-loc>San Diego, CA, USA</publisher-loc>: <publisher-name>Computational and Biological Learning Society</publisher-name> (<year>2015</year>).</citation></ref>
<ref id="B38"><label>38.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Abadi</surname><given-names>M</given-names></name><name><surname>Barham</surname><given-names>P</given-names></name><name><surname>Chen</surname><given-names>J</given-names></name><name><surname>Chen</surname><given-names>Z</given-names></name><name><surname>Davis</surname><given-names>A</given-names></name><name><surname>Dean</surname><given-names>J</given-names></name></person-group>, et al. <article-title>Tensorflow: a system for large-scale machine learning</article-title>. <source>Proceedings of the 12th USENIX Symposium on Operating Systems Design and Implementation, OSDI 2016</source>. <publisher-loc>Savannah, GA, USA</publisher-loc>.: <publisher-name>USENIX Association</publisher-name> (<year>2016</year>). p. <fpage>265</fpage>&#x2013;<lpage>83</lpage>.</citation></ref>
<ref id="B39"><label>39.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Robinson</surname><given-names>R</given-names></name><name><surname>Valindria</surname><given-names>VV</given-names></name><name><surname>Bai</surname><given-names>W</given-names></name><name><surname>Oktay</surname><given-names>O</given-names></name><name><surname>Kainz</surname><given-names>B</given-names></name><name><surname>Suzuki</surname><given-names>H</given-names></name></person-group>, et al. <article-title>Automated quality control in image segmentation: application to the UK Biobank cardiovascular magnetic resonance imaging study</article-title>. <source>J Cardiovasc Magn Reson</source>. (<year>2019</year>) <volume>21</volume>:<fpage>18</fpage>. <pub-id pub-id-type="doi">10.1186/s12968-019-0523-x</pub-id><pub-id pub-id-type="pmid">30866968</pub-id></citation></ref>
<ref id="B40"><label>40.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Hann</surname><given-names>E</given-names></name><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Ferreira</surname><given-names>VM</given-names></name><name><surname>Piechnik</surname><given-names>SK</given-names></name></person-group>. <comment>Ensemble of deep convolutional neural networks with Monte Carlo dropout sampling for automated image segmentation quality control and robust deep learning using small datasets. In: Papie&#x017C; BW, Yaqub M, Jiao J, Namburete AIL, Noble JA, editors. <italic>Medical Image Understanding and Analysis</italic>. Cham: Springer International Publishing (2021). p. 280&#x2013;93. Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-030-80432-9_22">https://doi.org/10.1007/978-3-030-80432-9&#x02D9;22</ext-link></comment>.</citation></ref>
<ref id="B41"><label>41.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isensee</surname><given-names>F</given-names></name><name><surname>Jaeger</surname><given-names>PF</given-names></name><name><surname>Kohl</surname><given-names>SAA</given-names></name><name><surname>Petersen</surname><given-names>J</given-names></name><name><surname>Maier-Hein</surname><given-names>KH</given-names></name></person-group>. <article-title>nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation</article-title>. <source>Nat Methods</source>. (<year>2021</year>) <volume>18</volume>:<fpage>203</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-020-01008-z</pub-id><pub-id pub-id-type="pmid">33288961</pub-id></citation></ref>
<ref id="B42"><label>42.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>C</given-names></name><name><surname>Ouyang</surname><given-names>C</given-names></name><name><surname>Tarroni</surname><given-names>G</given-names></name><name><surname>Schlemper</surname><given-names>J</given-names></name><name><surname>Qiu</surname><given-names>H</given-names></name><name><surname>Bai</surname><given-names>W</given-names></name></person-group>, et al. <comment>Unsupervised multi-modal style transfer for cardiac MR segmentation. <italic>Statistical Atlases and Computational Models of the Heart. Multi-Sequence CMR Segmentation, CRT-EPiggy and LV Full Quantification Challenges</italic>. Cham: Springer International Publishing (2020). p. 209&#x2013;19. Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-030-39074-7_22">https://doi.org/10.1007/978-3-030-39074-7&#x02D9;22</ext-link></comment>.</citation></ref>
<ref id="B43"><label>43.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Campello</surname><given-names>VM</given-names></name><name><surname>Mart&#x00ED;n-Isla</surname><given-names>C</given-names></name><name><surname>Izquierdo</surname><given-names>C</given-names></name><name><surname>Petersen</surname><given-names>SE</given-names></name><name><surname>Ballester</surname><given-names>MAG</given-names></name><name><surname>Lekadir</surname><given-names>K</given-names></name></person-group>, <comment>Combining multi-sequence and synthetic images for improved segmentation of late gadolinium enhancement cardiac MRI. <italic>Statistical Atlases and Computational Models of the Heart. Multi-Sequence CMR Segmentation, CRT-EPiggy and LV Full Quantification Challenges</italic>. Cham: Springer International Publishing (2020). p. 290&#x2013;9. Available from: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-030-39074-7_31">https://doi.org/10.1007/978-3-030-39074-7&#x02D9;31</ext-link></comment>.</citation></ref>
<ref id="B44"><label>44.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname><given-names>Y</given-names></name><name><surname>Tang</surname><given-names>Z</given-names></name><name><surname>Li</surname><given-names>B</given-names></name><name><surname>Firmin</surname><given-names>D</given-names></name><name><surname>Yang</surname><given-names>G</given-names></name></person-group>. <article-title>Recent advances in fibrosis and scar segmentation from cardiac MRI: a state-of-the-art review and future perspectives</article-title>. <source>Front Physiol</source>. (<year>2021</year>) <volume>12</volume>:<fpage>1</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.3389/fphys.2021.709230</pub-id></citation></ref>
<ref id="B45"><label>45.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Seemann</surname><given-names>F</given-names></name><name><surname>Lamy</surname><given-names>J</given-names></name><name><surname>Arvidsson</surname><given-names>PM</given-names></name><name><surname>Heiberg</surname><given-names>E</given-names></name><name><surname>Murray</surname><given-names>V</given-names></name></person-group>, et al. <article-title>Automated left atrial time-resolved segmentation in MRI long-axis cine images using active contours</article-title>. <source>BMC Med Imaging</source>. (<year>2021</year>) <volume>21</volume>:<fpage>101</fpage>. <pub-id pub-id-type="doi">10.1186/s12880-021-00630-3</pub-id><pub-id pub-id-type="pmid">34147081</pub-id></citation></ref>
<ref id="B46"><label>46.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davies</surname><given-names>RH</given-names></name><name><surname>Augusto</surname><given-names>JB</given-names></name><name><surname>Bhuva</surname><given-names>A</given-names></name><name><surname>Xue</surname><given-names>H</given-names></name><name><surname>Treibel</surname><given-names>TA</given-names></name><name><surname>Ye</surname><given-names>Y</given-names></name></person-group>, et al. <article-title>Precision measurement of cardiac structure and function in cardiovascular magnetic resonance using machine learning</article-title>. <source>J Cardiovasc Magn Reson</source>. (<year>2022</year>) <volume>24</volume>:<fpage>16</fpage>. <pub-id pub-id-type="doi">10.1186/s12968-022-00846-4</pub-id><pub-id pub-id-type="pmid">35272664</pub-id></citation></ref>
<ref id="B47"><label>47.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Heiberg</surname><given-names>E</given-names></name><name><surname>Engblom</surname><given-names>H</given-names></name><name><surname>Carlsson</surname><given-names>M</given-names></name><name><surname>Erlinge</surname><given-names>D</given-names></name><name><surname>Atar</surname><given-names>D</given-names></name><name><surname>Aletras</surname><given-names>AH</given-names></name></person-group>, et al. <article-title>Infarct quantification with cardiovascular magnetic resonance using &#x201C;standard deviation from remote&#x201D; is unreliable: validation in multi-centre multi-vendor data</article-title>. <source>J Cardiovasc Magn Reson</source>. (<year>2022</year>) <volume>24</volume>:<fpage>53</fpage>. <pub-id pub-id-type="doi">10.1186/s12968-022-00888-8</pub-id><pub-id pub-id-type="pmid">36336693</pub-id></citation></ref>
<ref id="B48"><label>48.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mikami</surname><given-names>Y</given-names></name><name><surname>Kolman</surname><given-names>L</given-names></name><name><surname>Joncas</surname><given-names>SX</given-names></name><name><surname>Stirrat</surname><given-names>J</given-names></name><name><surname>Scholl</surname><given-names>D</given-names></name><name><surname>Rajchl</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Accuracy and reproducibility of semi-automated late gadolinium enhancement quantification techniques in patients with hypertrophic cardiomyopathy</article-title>. <source>J Cardiovasc Magn Reson</source>. (<year>2014</year>) <volume>16</volume>:<fpage>85</fpage>. <pub-id pub-id-type="doi">10.1186/s12968-014-0085-x</pub-id><pub-id pub-id-type="pmid">25315701</pub-id></citation></ref>
<ref id="B49"><label>49.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Flett</surname><given-names>AS</given-names></name><name><surname>Hasleton</surname><given-names>J</given-names></name><name><surname>Cook</surname><given-names>C</given-names></name><name><surname>Hausenloy</surname><given-names>D</given-names></name><name><surname>Quarta</surname><given-names>G</given-names></name><name><surname>Ariti</surname><given-names>C</given-names></name></person-group>, et al. <article-title>Evaluation of techniques for the quantification of myocardial scar of differing etiology using cardiac magnetic resonance</article-title>. <source>JACC: Cardiovasc Imaging</source>. (<year>2011</year>) <volume>4</volume>:<fpage>150</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcmg.2010.11.015</pub-id><pub-id pub-id-type="pmid">21329899</pub-id></citation></ref></ref-list>
</back>
</article>