<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="editorial" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cardiovasc. Med.</journal-id>
<journal-title>Frontiers in Cardiovascular Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cardiovasc. Med.</abbrev-journal-title>
<issn pub-type="epub">2297-055X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcvm.2023.1307812</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Cardiovascular Medicine</subject>
<subj-group>
<subject>Editorial</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Editorial: Generative adversarial networks in cardiovascular research</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Zhang</surname><given-names>Qiang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/1822921/overview"/><role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/><role content-type="https://credit.niso.org/contributor-roles/project-administration/"/></contrib>
<contrib contrib-type="author"><name><surname>Cukur</surname><given-names>Tolga</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/268667/overview" /><role content-type="https://credit.niso.org/contributor-roles/project-administration/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
<contrib contrib-type="author"><name><surname>Greenspan</surname><given-names>Hayit</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref><role content-type="https://credit.niso.org/contributor-roles/project-administration/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
<contrib contrib-type="author"><name><surname>Yang</surname><given-names>Guang</given-names></name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
<xref ref-type="aff" rid="aff10"><sup>10</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/401153/overview" /><role content-type="https://credit.niso.org/contributor-roles/project-administration/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><institution>Division of Cardiovascular Medicine, Radcliffe Department of Medicine, University of Oxford</institution>, <addr-line>Oxford</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff2"><label><sup>2</sup></label><institution>Big Data Institute, University of Oxford</institution>, <addr-line>Oxford</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff3"><label><sup>3</sup></label><institution>Department of Electrical and Electronics Engineering, Bilkent University</institution>, <addr-line>Ankara</addr-line>, <country>T&#x00FC;rkiye</country></aff>
<aff id="aff4"><label><sup>4</sup></label><institution>National Magnetic Resonance Research Center (UMRAM), Bilkent University</institution>, <addr-line>Ankara</addr-line>, <country>T&#x00FC;rkiye</country></aff>
<aff id="aff5"><label><sup>5</sup></label><institution>Department of Biomedical Engineering, Tel Aviv University</institution>, <addr-line>Tel Aviv</addr-line>, <country>Israel</country></aff>
<aff id="aff6"><label><sup>6</sup></label><institution>Department of Radiology, Icahn School of Medicine, Mount Sinai</institution>, <addr-line>New York, NY</addr-line>, <country>United States</country></aff>
<aff id="aff7"><label><sup>7</sup></label><institution>Bioengineering Department and Imperial-X, Imperial College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff8"><label><sup>8</sup></label><institution>National Heart and Lung Institute, Imperial College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff9"><label><sup>9</sup></label><institution>Cardiovascular Research Centre, Royal Brompton Hospital</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff10"><label><sup>10</sup></label><institution>School of Biomedical Engineering &#x0026; Imaging Sciences, King&#x2019;s College London</institution>, <addr-line>London</addr-line>, <country>United Kingdom</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited and Reviewed by:</bold> Xiang Li, Harvard Medical School, United States</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Qiang Zhang <email>qiang.zhang@cardiov.ox.ac.uk</email></corresp>
</author-notes>
<pub-date pub-type="epub"><day>23</day><month>10</month><year>2023</year></pub-date>
<pub-date pub-type="collection"><year>2023</year></pub-date>
<volume>10</volume><elocation-id>1307812</elocation-id>
<history>
<date date-type="received"><day>05</day><month>10</month><year>2023</year></date>
<date date-type="accepted"><day>13</day><month>10</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Zhang, Cukur, Greenspan and Yang.</copyright-statement>
<copyright-year>2023</copyright-year><copyright-holder>Zhang, Cukur, Greenspan and Yang</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<kwd-group>
<kwd>deep generative models</kwd>
<kwd>generative adversarial networks (GAN)</kwd>
<kwd>echocardiography (Echo)</kwd>
<kwd>cardiovascular magnetic resonance</kwd>
<kwd>segmentation (Image processing)</kwd>
</kwd-group>
<contract-num rid="cn001">RE/18/3/34214</contract-num>
<contract-num rid="cn002">&#x00A0;</contract-num>
<contract-num rid="cn003">&#x00A0;</contract-num>
<contract-num rid="cn004">TG/18/5/34111, PG/16/78/32402</contract-num>
<contract-num rid="cn005">101005122</contract-num>
<contract-num rid="cn006">952172</contract-num>
<contract-num rid="cn007">MC/PC/21013</contract-num>
<contract-num rid="cn008">IEC\NSFC\211235</contract-num>
<contract-num rid="cn009">&#x00A0;</contract-num>
<contract-num rid="cn010">&#x00A0;</contract-num>
<contract-num rid="cn011">&#x00A0;</contract-num>
<contract-num rid="cn012">&#x00A0;</contract-num>
<contract-num rid="cn013">MR/V023799/1</contract-num>
<contract-sponsor id="cn001">Oxford BHF Centre of Research Excellence</contract-sponsor>
<contract-sponsor id="cn002">Turkish Academy of Sciences</contract-sponsor>
<contract-sponsor id="cn003">Science Academy</contract-sponsor>
<contract-sponsor id="cn004">BHF</contract-sponsor>
<contract-sponsor id="cn005">ERC IMI</contract-sponsor>
<contract-sponsor id="cn006">H2020</contract-sponsor>
<contract-sponsor id="cn007">MRC</contract-sponsor>
<contract-sponsor id="cn008">Royal Society</contract-sponsor>
<contract-sponsor id="cn009">NVIDIA Academic Hardware Grant Program</contract-sponsor>
<contract-sponsor id="cn010">SABER project</contract-sponsor>
<contract-sponsor id="cn011">Boehringer Ingelheim Ltd.</contract-sponsor>
<contract-sponsor id="cn012">Wellcome Leap Dynamic Resilience</contract-sponsor>
<contract-sponsor id="cn013">UKRI Future Leaders Fellowship</contract-sponsor>
<counts>
<fig-count count="0"/>
<table-count count="0"/><equation-count count="0"/><ref-count count="7"/><page-count count="0"/><word-count count="0"/></counts><custom-meta-wrap><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Cardiovascular Imaging</meta-value></custom-meta></custom-meta-wrap>
</article-meta>
</front>
<body>
<p><bold>Editorial on the Research Topic</bold> <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/research-topics/45072/generative-adversarial-networks-in-cardiovascular-research">Generative adversarial networks in cardiovascular research</ext-link></p>
<p>Deep generative models are a family of neural networks capable of learning the data distribution from a large set of training samples and then generating realistic new data samples. They are among the most exciting technical breakthroughs in deep learning in recent years. A popular example is Generative adversarial networks (GAN) (<xref ref-type="bibr" rid="B1">1</xref>), which leverage a game-theoretic interplay between a generator and adversarial discriminator for an implicit characterization of the data distribution. In cardiovascular medicine, GANs are increasingly adopted in a wide range of applications for analysing cardiovascular MRI, echocardiography, electrocardiography and patient characteristics. This Research Topic has collected articles on the application of deep GAN models to left atrial appendage selection for surgical occlusion (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1153053">Zhu et al.</ext-link>), function analysis in coronary artery stenosis (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1155969">Yong et al.</ext-link>), late gadolinium enhancement scar assessment (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1213290">Gonzales et al.</ext-link>), and strain analysis (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2022.1067760">Deng et al.</ext-link>), using echocardiography and cardiac MRI.</p>
<p>In <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1153053">Zhu et al.</ext-link>, an adversarial-based latent space alignment framework has been proposed for left atrial appendage (LAA) segmentation in transesophageal echocardiography (TEE). LAA segmentation and quantification are crucial in guiding the surgical procedure for the treatment of LAA-associated ischaemic strokes. However, it is challenging on TEE due to TEE image artefacts, noise and highly variable LAA structure. To address this challenge, the authors encoded the prior knowledge of LAA shapes in a latent feature space and adopted generative adversarial learning to align the automated segmentation with the prior knowledge in the latent space, therefore constraining the segmentation results. The approach was validated on 1,783 TEE images and achieved superior performance with a Dice Similarity Coefficient (DSC) of 0.83. This work demonstrated the effectiveness of GANs in enhancing deep-learning models for challenging image modalities.</p>
<p><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1155969">Yong et al.</ext-link> combined a GAN approach with intravascular ultrasound (IVUS) for non-invasive function analysis of coronary artery stenosis. The current clinical standard for coronary artery function evaluation is invasive fractional flow reserve (FFR). However, IVUS, a procedure routinely used for morphological assessment, has the potential to provide function evaluation simultaneously. To assess this approach, 92 patients who received both IVUS and FFR assessments were retrospectively identified. The authors employed a SegAN (<xref ref-type="bibr" rid="B2">2</xref>) to automatically segment the arterial lumen contours from IVUS images, which achieved a high DSC of 0.95 and 0.97 for lumen and media&#x2013;adventitia border delineation. This allows accurate calculation of IVUS-FFR in good agreement with invasive FFR (<italic>r</italic>&#x2009;&#x003D;&#x2009;0.94), and a high diagnosis accuracy of 90.7&#x0025;. Powered by deep generative models, IVUS demonstrated the feasibility of achieving comparable diagnostic performance with invasive FFR, with significantly lower computation time.</p>
<p><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2023.1213290">Gonzales et al.</ext-link> validated an approach of augmenting training samples with GAN to improve the accuracy of segmentation of cardiac MRI late gadolinium enhancement (LGE)&#x2014;the imaging standard for myocardial scar assessment. The study leveraged GAN-generated virtual native enhancement (VNE) (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>)&#x2014;a new, gadolinium-free modality that resembles LGE&#x2014;to expand the training set. A dataset comprising 4,716 LGE images (from 1,363 patients with hypertrophic cardiomyopathy and myocardial infarction) was retrospectively collated. LGE data were augmented with a GAN-based generator to produce VNE images. The results demonstrated that incorporating GAN-generated VNE data into the training process consistently led to enhanced segmentation performance: the models trained on only LGE yielded a DSC of 0.835, 0.838 for LGE and VNE segmentation; whereas the models trained on both LGE and VNE yield higher DSC of 0.845, 0.845. Additionally, the individual segmentation performance of the model trained with only LGE data, including extensive data augmentation (<xref ref-type="bibr" rid="B5">5</xref>) (0.846) was also surpassed by the same framework when the VNE data were added (0.851). This work showed data augmentation using generative models as an effective approach to improving deep learning training, especially in the scenario of limited training data.</p>
<p>Additionally, in <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcvm.2022.1067760">Deng et al.</ext-link>, a deep learning approach has been developed for automated strain analysis on echocardiography. Strain analysis using echocardiography has great potential to offer rapid heart function assessment in routine clinical workflow. However, it requires myocardial segmentation, which is challenging in echocardiography. The authors developed a 3D U-Net and an optical-flow network to segment the LV myocardium, track the motion, and calculate the longitudinal strain. The AI-based echocardiography interpretation demonstrated a good agreement (Spearman correlation of 0.9) with the traditional semi-automatic speck tracking echocardiography (STE), with no significant bias (mean bias &#x2212;1.2&#x2009;&#x00B1;&#x2009;1.5&#x0025;), whilst much faster (15&#x2005;s vs. 5&#x2013;10&#x2005;min). Further development of generative models to learn the prior and distribution of a representative, real in-vivo data bank may help to translate this echocardiography technique into clinical practice.</p>
<p>In summary, these articles have highlighted the significant potential of deep generative models in revolutionising cardiac imaging, particularly in addressing intricate tasks and various image modalities. Looking ahead, we foresee a surge in upcoming research focused on fine-tuning and utilising deep generative models for a broader range of applications. These may involve reducing doses, rectifying missing modalities, augmenting data, refining image reconstruction (<xref ref-type="bibr" rid="B6">6</xref>), precise segmentation (<xref ref-type="bibr" rid="B7">7</xref>), accurate tracking of anatomical features, and dependable classification within the field of cardiovascular medicine.</p>
</body>
<back>
<sec id="s1" sec-type="author-contributions"><title>Author contributions</title>
<p>QZ: Writing &#x2013; original draft, Project administration. TC: Project administration, Writing &#x2013; review &#x0026; editing. HG: Project administration, Writing &#x2013; review &#x0026; editing. GY: Project administration, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec id="s2" sec-type="funding-information"><title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article.</p>
<p>QZ acknowledges the Oxford BHF Centre of Research Excellence grant RE/18/3/34214. TC was supported in part by a GEBIP 2015 fellowship by the Turkish Academy of Sciences and by a BAGEP 2017 fellowship by the Science Academy. GY was supported in part by the BHF (TG/18/5/34111, PG/16/78/32402), ERC IMI (101005122), the H2020 (952172), the MRC (MC/PC/21013), the Royal Society (IEC\NSFC\211235), the NVIDIA Academic Hardware Grant Program, the SABER project supported by Boehringer Ingelheim Ltd., Wellcome Leap Dynamic Resilience, and the UKRI Future Leaders Fellowship (MR/V023799/1).</p>
</sec>
<sec id="s3" sec-type="COI-statement"><title>Conflict of interest</title>
<p>QZ has authorship rights for patent WO2021/044153: &#x201C;Enhancement of Medical Images&#x201D;.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="s4" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goodfellow</surname><given-names>I</given-names></name><name><surname>Pouget-Abadie</surname><given-names>J</given-names></name><name><surname>Mirza</surname><given-names>M</given-names></name><name><surname>Xu</surname><given-names>B</given-names></name><name><surname>Warde-Farley</surname><given-names>D</given-names></name><name><surname>Ozair</surname><given-names>S</given-names></name><etal/></person-group> <article-title>Generative adversarial networks</article-title>. <source>Commun ACM</source>. (<year>2020</year>) <volume>63</volume>(<issue>11</issue>):<fpage>139</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1145/3422622</pub-id></citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname><given-names>Y</given-names></name><name><surname>Xu</surname><given-names>T</given-names></name><name><surname>Zhang</surname><given-names>H</given-names></name><name><surname>Long</surname><given-names>LR</given-names></name><name><surname>Huang</surname><given-names>X</given-names></name></person-group>. <article-title>Segan: adversarial network with multi-scale L<sub>1</sub> loss for medical image segmentation</article-title>. <source>Neuroinformatics</source>. (<year>2018</year>) <volume>16</volume>:<fpage>383</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1007/s12021-018-9377-x</pub-id><pub-id pub-id-type="pmid">29725916</pub-id></citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Burrage</surname><given-names>MK</given-names></name><name><surname>Shanmuganathan</surname><given-names>M</given-names></name><name><surname>Gonzales</surname><given-names>RA</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Thomas</surname><given-names>KE</given-names></name><etal/></person-group> <article-title>Artificial intelligence for contrast-free MRI: scar assessment in myocardial infarction using deep learning-based virtual native enhancement</article-title>. <source>Circulation</source>. (<year>2022</year>) <volume>146</volume>(<issue>20</issue>):<fpage>1492</fpage>&#x2013;<lpage>503</lpage>. <pub-id pub-id-type="doi">10.1161/CIRCULATIONAHA.122.060137</pub-id><pub-id pub-id-type="pmid">36124774</pub-id></citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q</given-names></name><name><surname>Burrage</surname><given-names>MK</given-names></name><name><surname>Lukaschuk</surname><given-names>E</given-names></name><name><surname>Shanmuganathan</surname><given-names>M</given-names></name><name><surname>Popescu</surname><given-names>IA</given-names></name><name><surname>Nikolaidou</surname><given-names>C</given-names></name><etal/></person-group> <article-title>Toward replacing late gadolinium enhancement with artificial intelligence virtual native enhancement for gadolinium-free cardiovascular magnetic resonance tissue characterization in hypertrophic cardiomyopathy</article-title>. <source>Circulation</source>. (<year>2021</year>) <volume>144</volume>(<issue>8</issue>):<fpage>589</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1161/CIRCULATIONAHA.121.054432</pub-id><pub-id pub-id-type="pmid">34229451</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isensee</surname><given-names>F</given-names></name><name><surname>Jaeger</surname><given-names>PF</given-names></name><name><surname>Kohl</surname><given-names>SA</given-names></name><name><surname>Petersen</surname><given-names>J</given-names></name><name><surname>Maier-Hein</surname><given-names>KH</given-names></name></person-group>. <article-title>nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation</article-title>. <source>Nat Methods</source>. (<year>2021</year>) <volume>18</volume>(<issue>2</issue>):<fpage>203</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-020-01008-z</pub-id><pub-id pub-id-type="pmid">33288961</pub-id></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>G</given-names></name><name><surname>Yu</surname><given-names>S</given-names></name><name><surname>Dong</surname><given-names>H</given-names></name><name><surname>Slabaugh</surname><given-names>G</given-names></name><name><surname>Dragotti</surname><given-names>PL</given-names></name><name><surname>Ye</surname><given-names>X</given-names></name><etal/></person-group> <article-title>DAGAN: deep de-aliasing generative adversarial networks for fast compressed sensing MRI reconstruction</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2018</year>) <volume>37</volume>(<issue>6</issue>):<fpage>1310</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2017.2785879</pub-id><pub-id pub-id-type="pmid">29870361</pub-id></citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname><given-names>Y</given-names></name><name><surname>Tang</surname><given-names>Z</given-names></name><name><surname>Li</surname><given-names>B</given-names></name><name><surname>Firmin</surname><given-names>D</given-names></name><name><surname>Yang</surname><given-names>G</given-names></name></person-group>. <article-title>Recent advances in fibrosis and scar segmentation from cardiac MRI: a state-of-the-art review and future perspectives</article-title>. <source>Front Physiol</source>. (<year>2021</year>) <volume>12</volume>:<fpage>709230</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2021.709230</pub-id><pub-id pub-id-type="pmid">34413789</pub-id></citation></ref></ref-list>
</back>
</article>