<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Physiol.</journal-id>
<journal-title>Frontiers in Physiology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Physiol.</abbrev-journal-title>
<issn pub-type="epub">1664-042X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fphys.2022.757159</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Physiology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Fast Characterization of Inducible Regions of Atrial Fibrillation Models With Multi-Fidelity Gaussian Process Classification</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Gander</surname> <given-names>Lia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1462017/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Pezzuto</surname> <given-names>Simone</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/240206/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Gharaviri</surname> <given-names>Ali</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/823916/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Krause</surname> <given-names>Rolf</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Perdikaris</surname> <given-names>Paris</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1152161/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Sahli Costabal</surname> <given-names>Francisco</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/842319/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Center for Computational Medicine in Cardiology, Euler Institute, Universit&#x000E0; della Svizzera italiana</institution>, <addr-line>Lugano</addr-line>, <country>Switzerland</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Mechanical Engineering and Applied Mechanics, University of Pennsylvania</institution>, <addr-line>Philadelphia, PA</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Mechanical and Metallurgical Engineering, School of Engineering, Pontificia Universidad Cat&#x000F3;lica de Chile</institution>, <addr-line>Santiago</addr-line>, <country>Chile</country></aff>
<aff id="aff4"><sup>4</sup><institution>Institute for Biological and Medical Engineering, Schools of Engineering, Medicine and Biological Sciences, Pontificia Universidad Cat&#x000F3;lica de Chile</institution>, <addr-line>Santiago</addr-line>, <country>Chile</country></aff>
<aff id="aff5"><sup>5</sup><institution>Millennium Nucleus for Cardiovascular Magnetic Resonance</institution>, <addr-line>Santiago</addr-line>, <country>Chile</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Rafael Sebastian, University of Valencia, Spain</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Axel Loewe, Karlsruhe Institute of Technology (KIT), Germany; Richard H. Clayton, The University of Sheffield, United Kingdom; Dana H. Brooks, Northeastern University, United States; Pras Pathmanathan, United States Food and Drug Administration, United States</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Francisco Sahli Costabal <email>fsc&#x00040;ing.puc.cl</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Computational Physiology and Medicine, a section of the journal Frontiers in Physiology</p></fn></author-notes>
<pub-date pub-type="epub">
<day>07</day>
<month>03</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>13</volume>
<elocation-id>757159</elocation-id>
<history>
<date date-type="received">
<day>11</day>
<month>08</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>31</day>
<month>01</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2022 Gander, Pezzuto, Gharaviri, Krause, Perdikaris and Sahli Costabal.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Gander, Pezzuto, Gharaviri, Krause, Perdikaris and Sahli Costabal</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract>
<p>Computational models of atrial fibrillation have successfully been used to predict optimal ablation sites. A critical step to assess the effect of an ablation pattern is to pace the model from different, potentially random, locations to determine whether arrhythmias can be induced in the atria. In this work, we propose to use multi-fidelity Gaussian process classification on Riemannian manifolds to efficiently determine the regions in the atria where arrhythmias are inducible. We build a probabilistic classifier that operates directly on the atrial surface. We take advantage of lower resolution models to explore the atrial surface and combine seamlessly with high-resolution models to identify regions of inducibility. We test our methodology in 9 different cases, with different levels of fibrosis and ablation treatments, totalling 1,800 high resolution and 900 low resolution simulations of atrial fibrillation. When trained with 40 samples, our multi-fidelity classifier that combines low and high resolution models, shows a balanced accuracy that is, on average, 5.7% higher than a nearest neighbor classifier. We hope that this new technique will allow faster and more precise clinical applications of computational models for atrial fibrillation. All data and code accompanying this manuscript will be made publicly available at: <ext-link ext-link-type="uri" xlink:href="https://github.com/fsahli/AtrialMFclass">https://github.com/fsahli/AtrialMFclass</ext-link>.</p></abstract>
<kwd-group>
<kwd>machine learning</kwd>
<kwd>cardiac electrophysiology</kwd>
<kwd>atrial fibrillation</kwd>
<kwd>Gaussian processes</kwd>
<kwd>Riemannian manifolds</kwd>
<kwd>active learning</kwd>
</kwd-group>
<counts>
<fig-count count="8"/>
<table-count count="1"/>
<equation-count count="26"/>
<ref-count count="50"/>
<page-count count="16"/>
<word-count count="10158"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1. Introduction</title>
<p>Atrial Fibrillation (AF) is the most common cardiac arrhythmia and a significant contributor to morbidity and mortality (Virani et al., <xref ref-type="bibr" rid="B48">2021</xref>). AF is characterized by a chaotic electrical activity of the atria and perpetuated by multiple re-entrant wavelets propagating in the atrial tissue. It has been shown in several studies that in patients in the early stages of AF (paroxysmal AF), the chaotic activity is originated mainly from the pulmonary veins (PVs) (Haissaguerre et al., <xref ref-type="bibr" rid="B20">1998</xref>; Chen et al., <xref ref-type="bibr" rid="B5">1999</xref>). Thus, PV isolation (PVI) is the cornerstone of AF treatment at this point (Kawai et al., <xref ref-type="bibr" rid="B24">2019</xref>). Here, ablation lines around the PVs are created to electrically isolate them. However, in patients with a persistent form of AF, PVI efficacy remains sub-optimal (Verma et al., <xref ref-type="bibr" rid="B47">2015</xref>; Kawai et al., <xref ref-type="bibr" rid="B24">2019</xref>). The detriment in the effect of this treatment in persistent AF patients is caused mainly by the shift of electrical abnormalities in the PVs to other locations and higher degrees of structural remodelling (Boyle et al., <xref ref-type="bibr" rid="B4">2019</xref>; Kawai et al., <xref ref-type="bibr" rid="B24">2019</xref>). Targeting arrhythmic substrates in persistent AF patients, in addition to PVI, could not demonstrate any benefit, as these treatment approaches do not incorporate strategies to find optimal ablation targets according to the AF mechanism (Verma et al., <xref ref-type="bibr" rid="B47">2015</xref>). Furthermore, the high inter-individual variability in fibrosis distributions (McDowell et al., <xref ref-type="bibr" rid="B29">2015</xref>; Boyle et al., <xref ref-type="bibr" rid="B4">2019</xref>) and sources maintaining AF indicates an urgent need for patient-specific approaches.</p>
<p>Simulations, conducted in computational atrial models, have recently been used to develop mechanistic insights into the perpetuation and ablation of persistent AF patients with atrial fibrosis (McDowell et al., <xref ref-type="bibr" rid="B29">2015</xref>; Boyle et al., <xref ref-type="bibr" rid="B4">2019</xref>; Loewe et al., <xref ref-type="bibr" rid="B28">2019</xref>; Roney et al., <xref ref-type="bibr" rid="B43">2019</xref>). A common approach to investigate AF is to stimulate a high fidelity model from different pacing sites and observe whether this arrhythmia was induced or not (Boyle et al., <xref ref-type="bibr" rid="B4">2019</xref>). With these simulations, it is possible to create an inducibility map that shows the regions of the atria where AF will manifest if stimulated (Potse et al., <xref ref-type="bibr" rid="B39">2018</xref>). Moreover, this map can be reduced into one metric, the inducibility, which corresponds to the fraction of the tissue where AF can be induced. This quantity is useful to compare different ablation treatments, as the most efficient intervention will be the one that reaches the lowest inducibility with the lowest amount of ablation (Gharaviri et al., <xref ref-type="bibr" rid="B16">2021a</xref>,<xref ref-type="bibr" rid="B17">b</xref>).</p>
<p>Inducibility maps are computationally expensive to compute with high fidelity models. The complete exploration of all the potential sites that could trigger an arrhythmia is currently unfeasible (Loewe et al., <xref ref-type="bibr" rid="B28">2019</xref>). For this reason, a number of alternatives have been proposed. A viable option is to design a pacing protocol that maximizes the chance of inducing AF (Azzolin et al., <xref ref-type="bibr" rid="B1">2021</xref>). Alternatively, the computational cost per simulation could be reduced by a faster implementation of the AF model, e.g., based on GPGPU (Kaboudian et al., <xref ref-type="bibr" rid="B22">2019</xref>). Additionally, low fidelity models provide an approximation that could be based on simplified physics, e.g., eikonal models (Fu et al., <xref ref-type="bibr" rid="B14">2013</xref>; Quaglino et al., <xref ref-type="bibr" rid="B40">2018</xref>), reduced-order modeling (Fresca et al., <xref ref-type="bibr" rid="B13">2020</xref>; Pagani and Manzoni, <xref ref-type="bibr" rid="B32">2021</xref>) or simply on a coarser discretization (Quaglino et al., <xref ref-type="bibr" rid="B41">2019</xref>; Dhamala et al., <xref ref-type="bibr" rid="B12">2020</xref>).</p>
<p>Low fidelity models alone are faster, but potentially imprecise in reproducing the high fidelity inducibility map. However, a certain degree of statistical correlation between high- and low fidelity maps is to be expected. Multi-fidelity approaches can exploit this inter-model correlation to improve the accuracy of the estimators for a fixed total cost or, equivalently, to reduce the total cost of estimation for a targeted accuracy (Perdikaris et al., <xref ref-type="bibr" rid="B33">2016</xref>; Quaglino et al., <xref ref-type="bibr" rid="B40">2018</xref>, <xref ref-type="bibr" rid="B41">2019</xref>; Sahli Costabal et al., <xref ref-type="bibr" rid="B44">2019</xref>). This is achieved by offsetting most of the computational burden to the low fidelity model. Moreover, the overall computational cost could also be further reduced by carefully selecting the training points. To this end, Bayesian decision making strategies, commonly referred to as active learning (Cohn et al., <xref ref-type="bibr" rid="B6">1996</xref>), can provide a principled way for judiciously selecting new observations towards improving classification accuracy. The process consists in adding points iteratively in the locations where the uncertainty is greater (Kapoor et al., <xref ref-type="bibr" rid="B23">2007</xref>; Gramacy and Polson, <xref ref-type="bibr" rid="B19">2017</xref>; Sahli Costabal et al., <xref ref-type="bibr" rid="B45">2020</xref>; Zaman et al., <xref ref-type="bibr" rid="B50">2021</xref>).</p>
<p>The problem of creating an inducibility map can be seen as a classification problem, from a machine learning perspective. The labels, in this case, are the occurrence or absence of AF when we pace the model from a specific site, which corresponds to the input. Although this may seem a trivial task, for which many tools are available, it is not straightforward when the classification domain is a Riemannian manifold, such as the atrial surface. In this case, points that may be close in the Euclidean space might be apart in the manifold due to its topology. There has been recent attention in the machine learning community on formulating effective Gaussian process (GP) models for supervised learning on Riemannian manifolds (Coveney et al., <xref ref-type="bibr" rid="B9">2019</xref>; Borovitskiy et al., <xref ref-type="bibr" rid="B2">2020</xref>). GPs tend to perform well when the amount of data available is limited, and, due to their Bayesian nature, they provide built-in uncertainty in the predictions. However, current approaches, also adopted in the cardiac modeling community, have focused on the regression case (Coveney et al., <xref ref-type="bibr" rid="B9">2019</xref>; Coveney S. et al., <xref ref-type="bibr" rid="B10">2020</xref>). Performing classification with Gaussian processes is a challenging task, as there is no closed expression of likelihood and requires different types of approximations to perform statistical inference (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>).</p>
<p>In this work, we develop GP classifiers that can operate on manifolds, such as the atrial surface (see <xref ref-type="fig" rid="F1">Figure 1</xref>). We extend this tool to seamlessly combine different levels of data fidelity by creating a multi-fidelity GP classifier. In the specific context of AF, we aim to develop a method that allows us to comprehensively determine atrial regions, for a specific structural remodeling pattern, that, if stimulated could successfully initiate AF, creating an inducibility map <italic>in-silico</italic>. In particular, our low fidelity model is based on a coarser spatial discretization of the atrial geometry and on a larger time step in the solution of the electrophysiology equations. The inducibility map is reconstructed using a multi-fidelity GP classifier, resulting in a function on the atrial surface taking boolean values, depending on whether AF is or is not inducible when pacing from a given location. We will demonstrate that this approach is more efficient and accurate than other classifiers, and even single-fidelity methods, for cases with and without ablation treatments and for different fibrosis patterns.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Overview of the methodology. We predict the regions where AF can be induced using a multi-fidelity Gaussian process classifier. We use the Laplacian eigenfunctions of the atrial geometry to efficiently construct a Gaussian process covariance function that operates directly on the manifold surface. We pace different sites in computational models of AF with low and high resolution to create a dataset to train our classifier. In the end, we obtain an inducibility map that can be used to assess treatments.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0001.tif"/>
</fig>
<p>The manuscript is organized as follows. In Section 2 we present the AF model and the classification method, obtained by extending the classic GP classification on manifolds. We also present the multi-fidelity approach, as well as the active learning scheme employed to sequentially acquire new information. Section 3 is devoted to the numerical experiments. Specifically, we propose a numerical assessment of the classifiers, including nine case studies involving the characterization of inducibility regions of atrial models. The discussion in Section 4 concludes the manuscript.</p></sec>
<sec sec-type="methods" id="s2">
<title>2. Methods</title>
<sec>
<title>2.1. Atrial Modeling</title>
<p>In this work, we use previously developed highly detailed human atrial model of atrial fibrillation (AF) (Potse et al., <xref ref-type="bibr" rid="B39">2018</xref>; Gharaviri et al., <xref ref-type="bibr" rid="B15">2020</xref>). We briefly summarize here the relevant aspects of the model. The anatomy, including heart and torso, is based on MRI data. Several key features (bundles, fibers) are based on histological studies and added manually. The atrial wall is 3-dimensional with variable thickness.</p>
<p>In the numerical experiments for this study, we consider different combinations of fibrosis patterns and ablation lines, for a total of 9 scenarios. Firstly, we consider three fibrosis patterns (<xref ref-type="fig" rid="F2">Figure 2</xref>), one case with moderate fibrosis, corresponding to 50% of fibrotic tissue, and two cases with severe fibrosis, corresponding to 70% of fibrotic tissue. We consider endomysial fibrosis, which is modeled by formally imposing zero cross-fiber intracellular conductivity in fibrotic regions. Secondly, we implement two standard-of-care ablation strategies, pulmonary veins isolation (PVI) and PVI with roof lines (PVI&#x0002B;BOX), see <xref ref-type="fig" rid="F2">Figure 2D</xref>. Ablation lines are non-conductive tissue.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>AF models. Fibrosis distribution in 3 different scenarios: moderate fibrosis [<bold>(A)</bold>, 50% fibrotic tissue], and severe fibrosis (70%) in two different patterns <bold>(B), (C)</bold>. <bold>(D)</bold> shows PVI and BOX ablation lines.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0002.tif"/>
</fig>
<p>The electrical activity is modeled with the monodomain system (Colli Franzone et al., <xref ref-type="bibr" rid="B7">2014</xref>), which reads as follows
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003C7;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>&#x02202;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mtext>ion</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#x02207;</mml:mo><mml:mo>&#x000B7;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x02207;</mml:mo><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>&#x000D7;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi>T</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x02202;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>g</mml:mtext></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>&#x000D7;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi>T</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x02207;</mml:mo><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>n</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:mi>&#x02202;</mml:mi><mml:mo>&#x003A9;</mml:mo><mml:mo>&#x000D7;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi>T</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where <italic>V</italic><sub>m</sub>(<bold>x</bold>, <italic>t</italic>) is the transmembrane potential, <bold>w</bold>(<bold>x</bold>, <italic>t</italic>) is a vector of ion gating and concentration variables, &#x003A9; is a domain describing the active myocardium, <italic>C</italic><sub>m</sub> &#x0003D; 1 &#x003BC;F cm<sup>&#x02212;2</sup> is the membrane capacitance, &#x003C7; &#x0003D; 800 cm<sup>&#x02212;1</sup> is the membrane surface-to-volume ratio, <italic>I</italic><sub>stim</sub> is the current stimulus, <bold>G</bold><sub>m</sub>(<bold>x</bold>) is the monodomain conductivity tensor, and <italic>I</italic><sub>ion</sub> and <bold>g</bold> describe the ionic model. In particular, we consider the Courtermanche-Ramirez-Nattel model (Courtemanche et al., <xref ref-type="bibr" rid="B8">1998</xref>) adapted to an AF phenotype, with minor adaptations to guarantee numerical stability when evaluating the gating parameters for certain values of <italic>V</italic><sub>m</sub> (Potse, <xref ref-type="bibr" rid="B37">2019</xref>). The initial condition (<italic>V</italic><sub>0</sub>, <bold>w</bold><sub>0</sub>) corresponds to the resting state.</p>
<p>The conductivity tensor <bold>G</bold><sub>m</sub> is defined as <inline-formula><mml:math id="M2"><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>i</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>i</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>e</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">G</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>e</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:math></inline-formula>, where <bold>G</bold><sub>i</sub> and <bold>G</bold><sub>e</sub> are, respectively, the intra- and extra-cellular conductivity tensors, both assumed transversely isotropic with respect to the local fiber direction. The intracellular longitudinal and cross conductivity are, respectively, set 3 and 0.3 mS cm<sup>&#x02212;1</sup>, while the extracellular conductivities are 3 and 1.2 mS cm<sup>&#x02212;1</sup>, respectively. The resulting conduction velocity in the fiber direction is 55.6 cm s<sup>&#x02212;1</sup>. In the Bachmann&#x00027;s bundle, faster conduction is obtained with a longitudinal intracellular conductivity of 9 mS cm<sup>&#x02212;1</sup>. Finally, the region between the superior and inferior vena cava is assumed isotropic, with all conductivities set to 1.5 mS cm<sup>&#x02212;1</sup>.</p>
<p>The numerical solution of Equation (1) is based on a second-order finite difference scheme for the spatial discretization, and a fully explicit first-order Euler scheme for time stepping (Potse et al., <xref ref-type="bibr" rid="B38">2006</xref>). The Rush-Larsen scheme is adopted to update the gating variables. The computational domain is discretized using a uniform mesh with hexahedral elements of side <italic>h</italic>.</p>
<p>For the high fidelity simulations, we consider a fine mesh with <italic>h</italic> &#x0003D; 0.2 mm and a time step of &#x00394;<italic>t</italic> &#x0003D; 0.01 ms. For the low fidelity simulations, we double the discretization parameters, with <italic>h</italic> &#x0003D; 0.4 mm and &#x00394;<italic>t</italic> &#x0003D; 0.02 ms. The coarsening of the grid is performed by employing a majority rule to determine the tissue type and fiber orientation of the coarse hexahedral elements from the eight sub-elements of the fine mesh. Moreover, the coarse model assumes a reduced surface-to-volume ratio &#x003C7; &#x0003D; 450 cm<sup>&#x02212;1</sup> to balance out the expected reduction in conduction velocity due to a coarser space discretization (Pezzuto et al., <xref ref-type="bibr" rid="B34">2016</xref>).</p>
<p>All simulations are performed with the Propag-5 software (Potse et al., <xref ref-type="bibr" rid="B38">2006</xref>; Krause et al., <xref ref-type="bibr" rid="B26">2012</xref>) on the Swiss National Supercomputing Centre (CSCS). For one simulation with <italic>T</italic> &#x0003D; 4 s, the compute time of the high fidelity model is 1 h40 min with 8 nodes, whereas the compute time of the low fidelity model is 14 min with 4 nodes. This means that the low fidelity model is approximately 16 times faster than the high fidelity model.</p></sec>
<sec>
<title>2.2. Pacing Protocol for Atrial Fibrillation</title>
<p>The stimulation protocol, encoded in the function <italic>I</italic><sub>stim</sub>(<bold>x</bold>, <italic>t</italic>), is defined by a point <bold>x</bold><sub>stim</sub> &#x02208; &#x003A9; and a vector of distinct times <inline-formula><mml:math id="M3"><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>&#x003C4;</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>stim</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>stim</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:mrow></mml:msubsup></mml:math></inline-formula> through the expression
<disp-formula id="E2"><label>(2)</label><mml:math id="M4"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd columnalign="right"><mml:msub><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>&#x003C4;</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd columnalign="left"><mml:msub><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mtext>max</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000D7;</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mrow><mml:mo>&#x022C3;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mtext>stim</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:msubsup></mml:mstyle><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mo>&#x00394;</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mtext>otherwise</mml:mtext><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where <italic>B</italic><sub><italic>r</italic></sub>(<bold>x</bold><sub>stim</sub>) &#x0003D; {<bold>x</bold> &#x02208; &#x003A9;:<bold>x</bold><sub>stim</sub> &#x02264; <bold>x</bold> &#x02264; <bold>x</bold><sub>stim</sub> &#x0002B; <italic>r</italic>} is a <italic>r</italic>-neighborhood (the &#x02264; is meant component-wise) of the stimulation site, and &#x00394;&#x003C4; &#x0003E; 0 is the stimulus duration. In this study, the vector <italic><bold>&#x003C4;</bold></italic><sub>stim</sub> is fixed as in <xref ref-type="fig" rid="F3">Figure 3</xref> (middle panel), which consists in a series of <italic>N</italic><sub>stim</sub> &#x0003D; 14 stimuli with decreasing temporal distance, whereas <bold>x</bold><sub>stim</sub> varies for each simulation. Each stimulus lasts &#x00394;&#x003C4; &#x0003D; 4 ms, and has a strength <italic>I</italic><sub>max</sub> &#x0003D; 800 &#x003BC;A cm<sup>&#x02212;2</sup> with a fixed radius <italic>r</italic> &#x0003D; 0.8 cm, which is enough to maximize the chance that the tissue correctly captures it (Potse et al., <xref ref-type="bibr" rid="B39">2018</xref>). The induction of AF is not successful when (<italic>V</italic><sub>m</sub>, <bold>w</bold>) asymptotically approaches the resting state after the delivery of the last stimulus. Otherwise, if a self-sustained activity is still present at the end of the simulation, the induction of AF is successful. The idea is summarized in <xref ref-type="fig" rid="F3">Figure 3</xref>. For sake of simplicity, in this work there is no distinction between a true AF episode and an atrial flutter, which could be understood as a periodic solution of the monodomain system.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Inducibility of AF in the computer model. Two simulations with different pacing sites (grey stars) and inducibility outputs. The middle plot illustrates the pacing protocol. Top: transmembrane potential resulting from a successful induction of AF and corresponding lead II ECG recording. The stimulation results in a self-sustained activity. Bottom: transmembrane potential resulting from an unsuccessful induction of AF and corresponding lead II ECG recording. The stimulation results in a vanishing wave.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0003.tif"/>
</fig>
<p>The objective of this work is to learn the set <inline-formula><mml:math id="M6"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow><mml:mo>&#x02282;</mml:mo><mml:mo>&#x003A9;</mml:mo></mml:math></inline-formula>, such that if <inline-formula><mml:math id="M7"><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext>stim</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow></mml:math></inline-formula> a sustained episode of AF is observed. In particular, we are interested in approximating the indicator function of <inline-formula><mml:math id="M8"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow></mml:math></inline-formula>, denoted by <bold>F</bold>:&#x003A9; &#x02192; {0, 1} such that <inline-formula><mml:math id="M9"><mml:msup><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">F</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow></mml:math></inline-formula>. The overall inducibility, which reflects the fraction of the tissue where AF can be initiated, follows immediately from the definition of <inline-formula><mml:math id="M10"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow></mml:math></inline-formula> as
<disp-formula id="E4"><mml:math id="M11"><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">I</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">A</mml:mi></mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>|</mml:mo></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>|</mml:mo></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003A9;</mml:mo></mml:mrow></mml:msub></mml:mstyle><mml:mi>F</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#x000A0;d</mml:mtext><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
Interestingly, the formula generalizes to the case of non-uniformly distributed ectopic foci. Let &#x003C1;(<bold>x</bold>) be the probability density function of the distribution of foci, then the inducibility can be obtained as
<disp-formula id="E5"><mml:math id="M12"><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">I</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003A9;</mml:mo></mml:mrow></mml:msub></mml:mstyle><mml:mi>F</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>&#x003C1;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#x000A0;d</mml:mtext><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
In this way, for instance, it is possible to account for a higher density of ectopic activity around the pulmonary veins and fibrotic regions. In this work, we will only consider a uniform distribution of foci, equivalent to select &#x003C1;(<bold>x</bold>) &#x0003D; |&#x003A9;|<sup>&#x02212;1</sup>.</p></sec>
<sec>
<title>2.3. Classification With Gaussian Processes</title>
<p>Next, we present the proposed methodology for learning the inducibility function <bold>F</bold> from a limited set of simulations. We start by assuming that we have a data-set of <italic>N</italic> input/output pairs <inline-formula><mml:math id="M13"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula>, where <bold>x</bold><sub><italic>i</italic></sub> &#x02208; &#x003A9; and <italic>y</italic><sub><italic>i</italic></sub> &#x02208; {0, 1}. Since the atrial wall is thin, we constrain the points to belong to a mid-wall smooth atrial surface <inline-formula><mml:math id="M14"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">S</mml:mi></mml:mrow><mml:mo>&#x02282;</mml:mo><mml:mo>&#x003A9;</mml:mo></mml:math></inline-formula>. We remark however that there is no loss of generality in the following presentation, as the methodology applies to the volumetric domain &#x003A9; in the same manner. Moreover, since <italic>y</italic><sub><italic>i</italic></sub> takes only binary values, we also restrict the scope of this work to binary classification. We also note that it is straightforward to extend this framework to the multi-class classification setting.</p>
<p>The classical formulation of Gaussian process classification defines an inter mediate variable which is computed from a latent function <italic>f</italic>(<bold>x</bold>) (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>). Throughout this article, we will assume standardized data-sets and work with zero-mean Gaussian process priors of the form <inline-formula><mml:math id="M15"><mml:mi>f</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">G</mml:mi><mml:mi mathvariant="-tex-caligraphic">P</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mn>0</mml:mn></mml:mstyle><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle class="text"><mml:mtext mathvariant="bold">x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>;</mml:mo><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>. Here, <italic>k</italic>(&#x000B7;, &#x000B7;; &#x003B8;) is a covariance kernel function, which depends on a set of parameters &#x003B8;. We adopt a fully Bayesian treatment and prescribe prior distributions over these parameters, which we will specify later (Neal, <xref ref-type="bibr" rid="B30">1999</xref>). To obtain class probability predictions we pass the Gaussian process output <italic>f</italic> through a non-linear warping function &#x003C3;:&#x0211D; &#x02192; [0, 1], such that the output is constrained to [0, 1], rendering meaningful class probabilities. We define the conditional class probability as &#x003C0;(<bold>x</bold>) &#x0003D; &#x02119;[<italic>y</italic> &#x0003D; 1|<bold>x</bold>] &#x0003D; &#x003C3;(<italic>f</italic>(<bold>x</bold>)). A common choice for &#x003C3;(<italic>f</italic>) is the logistic sigmoid function &#x003C3;(<italic>f</italic>) &#x0003D; (1 &#x0002B; exp(&#x02212;<italic>f</italic>))<sup>&#x02212;1</sup>, which we will use throughout this work. We assume that the class labels are distributed according to a Bernoulli likelihood with probability &#x003C3;(<italic>y</italic>) (Nickisch and Rasmussen, <xref ref-type="bibr" rid="B31">2008</xref>).</p></sec>
<sec>
<title>2.4. Gaussian Process Priors on Manifolds</title>
<p>A crucial step in building a Gaussian process classifier is the choice of the kernel function. A popular choice is the Mat&#x000E9;rn kernel, which explicitly allows one to encode smoothness assumptions for the latent functions <italic>f</italic>(<bold>x</bold>) (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>). In a Euclidean space setting, the kernel function has the form (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>):
<disp-formula id="E6"><label>(3)</label><mml:math id="M16"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>&#x00393;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BD;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msqrt><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msqrt><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>K</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msqrt><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msqrt><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where &#x00393; is the gamma function, and <italic>K</italic><sub>&#x003BD;</sub> is the modified Bessel function of the second kind. The parameter &#x003B7; controls the overall variance of the Gaussian process, the parameter &#x02113; controls the spatial correlation length-scale, and &#x003BD; controls the regularity of the latent functions <italic>f</italic>(<bold>x</bold>) (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>). When &#x003BD; &#x02192; &#x0221E;, we recover the popular squared exponential kernel, also known as radial basis function, that yields a prior over smooth functions with infinitely many continuous derivatives.</p>
<p>The form presented in Equation (3) is not suitable to be used on manifolds, as the atrial surface. A naive approach is to replace the Euclidean distance between points with the geodesic distance on the manifold surface. Even though this approach may work for some cases, there is no guarantee that the resulting covariance matrix between input points will be positive semi-definite (Pezzuto et al., <xref ref-type="bibr" rid="B35">2019</xref>; Borovitskiy et al., <xref ref-type="bibr" rid="B2">2020</xref>), a key requirement for a kernel function. As a matter of fact, the choice of the kernel is problematic in this case. For instance, the Mat&#x000E9;rn family does not yield positive definite kernels even on the sphere, except for a few exceptional choices of the parameters (Gneiting, <xref ref-type="bibr" rid="B18">2013</xref>). Here, we follow an alternative approach, implicitly based on the solution of the following stochastic partial differential equation (SPDE) (Whittle, <xref ref-type="bibr" rid="B49">1963</xref>; Lindgren et al., <xref ref-type="bibr" rid="B27">2011</xref>):
<disp-formula id="E7"><label>(4)</label><mml:math id="M17"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003BA;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mstyle mathvariant="bold"><mml:mtext>I</mml:mtext></mml:mstyle><mml:mo>-</mml:mo><mml:mo>&#x00394;</mml:mo></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x003BD;</mml:mi><mml:mo>/</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x0002B;</mml:mo><mml:mi>d</mml:mi><mml:mo>/</mml:mo><mml:mn>4</mml:mn></mml:mrow></mml:msup><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">W</mml:mi></mml:mrow><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle mathvariant="bold"><mml:mtext>n</mml:mtext></mml:mstyle><mml:mo>&#x000B7;</mml:mo><mml:mo>&#x02207;</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003BA;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mstyle mathvariant="bold"><mml:mtext>I</mml:mtext></mml:mstyle><mml:mo>-</mml:mo><mml:mo>&#x00394;</mml:mo></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msup><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mstyle class="math"><mml:mstyle class="text"><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:mi>&#x02202;</mml:mi><mml:mo>&#x003A9;</mml:mo></mml:mstyle><mml:mtext>,</mml:mtext><mml:mstyle class="math"><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:mo>&#x0230A;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#x003BD;</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mn>4</mml:mn></mml:mrow></mml:mfrac></mml:mrow><mml:mo>&#x0230B;</mml:mo></mml:mrow></mml:mstyle></mml:mstyle><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where &#x02212;&#x00394; is the Laplace-Beltrami operator on the <italic>d</italic>-dimensional manifold, and <inline-formula><mml:math id="M18"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">W</mml:mi></mml:mrow></mml:math></inline-formula> is the spatial Gaussian white noise on &#x003A9;. When &#x003A9; &#x0003D; &#x0211D;<sup><italic>d</italic></sup>, the solution of the fractional SPDE is a Mat&#x000E9;rn random field with <inline-formula><mml:math id="M19"><mml:mi>&#x003BA;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msqrt><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x003BD;</mml:mi></mml:mrow></mml:msqrt></mml:mrow><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow></mml:mfrac></mml:math></inline-formula> (Lindgren et al., <xref ref-type="bibr" rid="B27">2011</xref>). However, compared to Equation (3), the SPDE in Equation (4) trivially generalizes to manifolds with no loss of positive definiteness of the correlation kernel, thanks to the properties of the pseudo-differential operator (Borovitskiy et al., <xref ref-type="bibr" rid="B2">2020</xref>). The correlation function can be explicitly written as follows. Let <inline-formula><mml:math id="M20"><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> be the eigenvalue/eigenfunction pairs of the Laplace-Beltrami operator with pure Neumann boundary conditions, that is
<disp-formula id="E8"><label>(5)</label><mml:math id="M21"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>-</mml:mo><mml:mo>&#x00394;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:mo>&#x003A9;</mml:mo><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>-</mml:mo><mml:mstyle mathvariant="bold"><mml:mtext>n</mml:mtext></mml:mstyle><mml:mo>&#x000B7;</mml:mo><mml:mo>&#x02207;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:mi>&#x02202;</mml:mi><mml:mo>&#x003A9;</mml:mo><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
for all <italic>i</italic> &#x02208; &#x02115;. Then, we can represent Mat&#x000E9;rn-like kernels on manifolds as (Coveney et al., <xref ref-type="bibr" rid="B9">2019</xref>; Borovitskiy et al., <xref ref-type="bibr" rid="B2">2020</xref>).
<disp-formula id="E9"><label>(6)</label><mml:math id="M22"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>;</mml:mo><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003BD;</mml:mi><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where <italic>C</italic> is a normalizing constant. This eigen-decomposition also enables a direct solution of the SPDE, providing the following representation of the Gaussian process prior:
<disp-formula id="E10"><label>(7)</label><mml:math id="M23"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>f</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02248;</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mi>&#x003BD;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mn>4</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x02003;&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">N</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
In practice, the eigen-decomposition is truncated to a number <italic>N</italic><sub>eig</sub> of pairs.</p>
<p>In this work, we discretize the manifold <inline-formula><mml:math id="M24"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">S</mml:mi></mml:mrow><mml:mo>&#x02282;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211D;</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula> using a triangulated mesh and solve Equation (5) using finite element shape functions. As such, we can obtain the stiffness matrix <italic><bold>A</bold></italic> and mass matrix <italic><bold>M</bold></italic>:
<disp-formula id="E11"><label>(8)</label><mml:math id="M25"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>A</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mover class="overset"><mml:mrow><mml:mstyle displaystyle="true"><mml:munder><mml:mrow><mml:mtext>A</mml:mtext></mml:mrow><mml:mrow><mml:mtext>e</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munder></mml:mstyle></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mtext>el</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mover><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">B</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:mstyle><mml:mo>&#x02207;</mml:mo><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:mo>&#x02207;</mml:mo><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mtext>&#x000A0;</mml:mtext><mml:mi>d</mml:mi><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>M</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mover class="overset"><mml:mrow><mml:mstyle displaystyle="true"><mml:munder><mml:mrow><mml:mtext>A</mml:mtext></mml:mrow><mml:mrow><mml:mtext>e</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munder></mml:mstyle></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mtext>el</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mover><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">B</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:mstyle><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mtext>&#x000A0;</mml:mtext><mml:mi>d</mml:mi><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where A represents the assembly of the local element matrices, and <italic>N</italic> are the finite element shape functions. Then, we solve the eigenvalue problem:
<disp-formula id="E12"><label>(9)</label><mml:math id="M26"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mstyle mathvariant="bold-italic"><mml:mi>A</mml:mi></mml:mstyle><mml:mstyle mathvariant="bold-italic"><mml:mi>v</mml:mi></mml:mstyle><mml:mo>=</mml:mo><mml:mi>&#x003BB;</mml:mi><mml:mstyle mathvariant="bold-italic"><mml:mi>M</mml:mi></mml:mstyle><mml:mstyle mathvariant="bold-italic"><mml:mi>v</mml:mi></mml:mstyle></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
In practice, to compute the kernel in Equation (6) we use a portion of all the resulting eigenpairs, starting from the smallest eigenvalues. We also use the corresponding eigenvectors as the eigenfunctions with <italic>f</italic>(<italic><bold>x</bold></italic><sub>i</sub>) &#x0003D; <italic><bold>v</bold></italic><sub>i</sub>, where i is the node index at location <bold>x</bold><sub>i</sub>. Given that the eigenvalue problem is solved only once as a pre-processing step, this methodology provides an efficient way to compute the kernel and the prior in a manifold.</p></sec>
<sec>
<title>2.5. Bayesian Inference</title>
<p>We finalize our Bayesian model description by prescribing the prior distributions for the kernel parameters. We assume the following distributions for the parameters &#x003B8; &#x0003D; {&#x003B7;, &#x02113;},
<disp-formula id="E13"><label>(10)</label><mml:math id="M27"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B7;</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mtext>HalfNormal</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C3;</mml:mi><mml:mo>=</mml:mo><mml:mn>10000</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E14"><label>(11)</label><mml:math id="M28"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x02113;</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mtext>Gamma</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>&#x003B2;</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
The posterior distribution over the model parameters &#x003B8; &#x0003D; {&#x003B7;, &#x02113;} cannot be described analytically, and thus we must resort to approximate inference techniques to calibrate this Bayesian model on the available data. To this end, we use the NO-U-Turn sampler (NUTS) (Hoffman and Gelman, <xref ref-type="bibr" rid="B21">2014</xref>), which is a type of Hamiltonian Monte Carlo algorithm, as implemented in NumPyro (Phan et al., <xref ref-type="bibr" rid="B36">2019</xref>). We use one chain, and set the target accept probability to 0.9. The first 500 samples are used to adjust the step size of the sampler, and are later discarded. We use the subsequent 500 samples to statistically estimate the parameters &#x003B8;.</p>
<p>Once we have completed the inference, we can make predictions <italic><bold>y</bold></italic><sup>&#x0002A;</sup> at new locations <italic><bold>x</bold></italic><sup>&#x0002A;</sup> in three steps. First, we compute the predictive posterior distribution of the latent function <inline-formula><mml:math id="M29"><mml:msup><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">N</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x003A3;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>, which by construction follows a multi-variate normal distribution, with a mean <bold>&#x003BC;</bold> and covariance &#x003A3; obtained by conditioning on the available training data (Rasmussen and Williams, <xref ref-type="bibr" rid="B42">2006</xref>):
<disp-formula id="E15"><label>(12)</label><mml:math id="M30"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003BC;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mstyle mathvariant="bold-italic"><mml:mi>f</mml:mi></mml:mstyle></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E16"><label>(13)</label><mml:math id="M31"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x003A3;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where the covariance matrix <italic><bold>K</bold></italic> &#x02208; &#x0211D;<sup><italic>N</italic> &#x000D7; <italic>N</italic></sup> results from evaluating the kernel function <italic>k</italic>(&#x000B7;, &#x000B7;; &#x003B8;) at the locations of the input training data <italic><bold>X</bold></italic> and <italic><bold>f</bold></italic> &#x0003D; <italic>f</italic>(<italic><bold>X</bold></italic>), respectively. We then proceed by sampling &#x003BC;, &#x003A3; using model parameters drawn from the estimated posterior distributions of &#x003B8; and <italic><bold>f</bold></italic>. This will result in a number of random variables <italic><bold>f</bold></italic><sup>&#x0002A;</sup> that are independent and normally distributed, which we can be used to compute statistical averages as
<disp-formula id="E17"><label>(14)</label><mml:math id="M32"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">N</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover><mml:mo>,</mml:mo><mml:mover accent="true"><mml:mrow><mml:mo>&#x003A3;</mml:mo></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x02003;&#x000A0;</mml:mtext><mml:mover accent="true"><mml:mrow><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mtext>s</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mtext>s</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x02003;&#x000A0;</mml:mtext><mml:mover accent="true"><mml:mrow><mml:mo>&#x003A3;</mml:mo></mml:mrow><mml:mo>^</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mtext>s</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mtext>s</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mo>&#x003A3;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where <italic>N</italic><sub>s</sub> is the number of samples considered for &#x003B8; and <italic><bold>f</bold></italic>. We finally pass <inline-formula><mml:math id="M33"><mml:msup><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:math></inline-formula> through the logistic sigmoid function &#x003C3; to obtain a distribution of class probabilities <italic><bold>y</bold></italic><sup>&#x0002A;</sup>.</p></sec>
<sec>
<title>2.6. Multi-Fidelity Classification With Gaussian Processes</title>
<p>In this work, we will assume that we have 2 information sources of different fidelity. We will call the high fidelity, computationally expensive, and hard to acquire information source with the subscript <italic>H</italic> and the inexpensive, faster to compute, low fidelity source with the subscript <italic>L</italic>. Now, our data set comes from these two sources <inline-formula><mml:math id="M34"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msubsup></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="bold-italic"><mml:mi>y</mml:mi></mml:mstyle></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula>. We will postulate two latent functions <italic>f</italic><sub><italic>H</italic></sub> and <italic>f</italic><sub><italic>L</italic></sub>, respectively, that are related through an auto-regressive prior (Kennedy and O&#x00027;Hagan, <xref ref-type="bibr" rid="B25">2000</xref>).
<disp-formula id="E18"><label>(15)</label><mml:math id="M35"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x003C1;</mml:mi><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B4;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
Under this model structure, the high fidelity function is expressed as a combination of the low fidelity function scaled by &#x003C1;, corrected with another latent function &#x003B4;(<italic><bold>x</bold></italic>) that explains the difference between the different levels of fidelity. Following (Kennedy and O&#x00027;Hagan, <xref ref-type="bibr" rid="B25">2000</xref>), we assume Gaussian process priors on these latent functions.
<disp-formula id="E19"><label>(16)</label><mml:math id="M36"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">G</mml:mi><mml:mi mathvariant="-tex-caligraphic">P</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mn>0</mml:mn></mml:mstyle><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>&#x003B8;</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E20"><label>(17)</label><mml:math id="M37"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B4;</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">G</mml:mi><mml:mi mathvariant="-tex-caligraphic">P</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mn>0</mml:mn></mml:mstyle><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>&#x003B8;</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
The vectors <italic><bold>&#x003B8;</bold></italic><sub><italic>L</italic></sub> and <italic><bold>&#x003B8;</bold></italic><sub><italic>H</italic></sub> contain the kernel hyper-parameters of this multi-fidelity Gaussian processes model. The choice of the auto-regressive model leads to a joint prior distribution over the latent functions that can be expressed as (Kennedy and O&#x00027;Hagan, <xref ref-type="bibr" rid="B25">2000</xref>).
<disp-formula id="E21"><label>(18)</label><mml:math id="M38"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mstyle mathvariant="bold-italic"><mml:mi>f</mml:mi></mml:mstyle><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>f</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>f</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">N</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:mstyle mathvariant="bold"><mml:mn>0</mml:mn></mml:mstyle></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle mathvariant="bold"><mml:mn>0</mml:mn></mml:mstyle></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
with
<disp-formula id="E22"><label>(19)</label><mml:math id="M39"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none none none none none none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x003C1;</mml:mi><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>K</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
The global covariance matrix <italic><bold>K</bold></italic> of this multi-fidelity Gaussian process model has a block structure corresponding to the different levels of fidelity, where <italic><bold>K</bold></italic><sub><italic>HH</italic></sub> and <italic><bold>K</bold></italic><sub><italic>LL</italic></sub> model the spatial correlation of the data observed in each fidelity level, and <italic><bold>K</bold></italic><sub><italic>LH</italic></sub> models the cross-correlation between the two levels of fidelity. We also have kernel parameters for the different levels of fidelity. We again use the Mat&#x000E9;rn as described in Section 2.4, which results in parameters &#x003B8;<sub><italic>H</italic></sub> &#x0003D; (&#x003B7;<sub><italic>H</italic></sub>, &#x02113;<sub><italic>H</italic></sub>), and &#x003B8;<sub><italic>L</italic></sub> &#x0003D; (&#x003B7;<sub><italic>L</italic></sub>, &#x02113;<sub><italic>L</italic></sub>). For these parameters and the scaling factor &#x003C1;, we consider the following prior distributions
<disp-formula id="E23"><label>(20)</label><mml:math id="M40"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0007E;</mml:mo><mml:mtext>HalfNormal</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C3;</mml:mi><mml:mo>=</mml:mo><mml:mn>10000</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E24"><label>(21)</label><mml:math id="M41"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>&#x02113;</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0007E;</mml:mo><mml:mtext>Gamma</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>&#x003B2;</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E25"><label>(22)</label><mml:math id="M42"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003C1;</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mtext>Normal</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi>&#x003C3;</mml:mi><mml:mo>=</mml:mo><mml:mn>10</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
We can perform inference and prediction for this model in the same way as for the single fidelity classifier, as detailed in Section 2.5. In particular, we can use Equations (12) and (13) with the entire covariance matrix <italic><bold>K</bold></italic> to obtain the conditional mean and covariance of <italic><bold>f</bold></italic><sup>&#x0002A;</sup>.</p></sec>
<sec>
<title>2.7. Active Learning</title>
<p>Here, we take advantage of the uncertainty predictions that are inherent to Gaussian processes and are absent in other types of classifiers, such as nearest neighbor. Specifically, at each active learning iteration, we train the classifier, and select the next point that should be included in our training data-set by solving the following optimization problem (Kapoor et al., <xref ref-type="bibr" rid="B23">2007</xref>; Sahli Costabal et al., <xref ref-type="bibr" rid="B44">2019</xref>):
<disp-formula id="E26"><label>(23)</label><mml:math id="M43"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">arg&#x000A0;min</mml:mo></mml:mrow><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>X</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:munder></mml:mstyle><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mover accent="true"><mml:mrow><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mo class="qopname">^</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:msqrt><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mo>&#x003A3;</mml:mo></mml:mrow><mml:mo class="qopname">^</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msqrt></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
where <italic><bold>X</bold></italic><sub><italic>cand</italic></sub> represents a set of candidate locations that can be acquired. In our case, we use all the nodes in the mesh as candidates, except the ones at the boundaries which have artificially high variance. This active learning criterion presents a good balance between exploitation (sampling near the classification decision boundary) and exploration (discovering new inducible regions). It can be seen as promoting the selection of points that tend to be located near the decision boundary (&#x003C3;(&#x003BC; &#x0003D; 0) &#x0003D; 0.5), or points in regions with high uncertainty (as reflected by the posterior variance &#x003A3;). We keep adding points via this sequential active learning procedure until we have reached the desired number of samples.</p></sec></sec>
<sec id="s3">
<title>3. Numerical Experiments</title>
<sec>
<title>3.1. Numerical Assessment</title>
<p>We first create a synthetic example to test the performance of the proposed classifier. We study the length scale of different random fields that could represent the potential inducibility maps that we want to approximate in this study. In particular, we use a mesh based on the anatomy of the mid-layer described in Section 2.1. Here, we represent the left and right atria with 3,298 nodes and 6,335 triangles. First, we normalize the geometry by the largest standard deviation of one of its coordinates. In this way, we can use the same prior distributions regardless the particular geometry. Then, we generate Gaussian random fields on the atrial manifold with zero mean and the Mat&#x000E9;rn covariance kernel, as detailed in Equation (6). We use 1,000 eigenpairs to construct a computable kernel function approximation with &#x003BD; &#x0003D; 3/2 and &#x003B7; &#x0003D; 1. We consider different length scales to simulate inducibility regions and assess the performance of the classifier: &#x02113; &#x0003D; {0.2, 0.4, 0.6, 0.8, 1.0}. Finally, we pass the resulting random field through the sigmoid function &#x003C3; to obtain values between zero and one, which we round to the nearest integer to create discrete labels.</p>
<p>Examples of the resulting random fields can be seen in <xref ref-type="fig" rid="F4">Figure 4</xref>, left column. We compare three different classifiers. First, as a baseline benchmark, we create a nearest neighbor classifier. Here, the prediction of an unknown point is based on the label of the closest data point. Since we are working with a manifold, we use the geodesic distance to find the closest point, which we compute using the heat method (Crane et al., <xref ref-type="bibr" rid="B11">2013</xref>). As a data-set, we use a fixed design spread through the manifold surface. To select the locations, we first randomly pick a node in the mesh, and then we add the node that is further away from the initial node using the geodesic distance. Then, we iterate, finding the point that is further away from all the nodes already included in the data-set, until we reached the desired data-set size. The second classifier that we consider is a Gaussian process classifier, as described in the previous sections, that is trained on the same fixed experimental design. The final classifier is also a Gaussian process classifier, which we train with the first 20 samples of the fixed experimental design, and then we apply the proposed active learning procedure. For all the Gaussian process classifiers in these experiments, we set the number of eigenfunctions used to <italic>N</italic><sub>eig</sub> &#x0003D; 1,000.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Numerical assessment of the Gaussian process classifier. We create different random examples with different correlations lengths (first column) and train a nearest neighbor classifier (second column), a Gaussian process classifier trained with the same data-set as the nearest neighbor classifier (third column), and Gaussian process classifier that adaptively selects the training points through active learning (fourth column). The black bars represent the size of the length scale relative to the atrial geometry.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0004.tif"/>
</fig>
<p>In these examples, we test the performance of the three different classifiers, using between 20 and 100 samples, and 10 different random fields for each of the 5 length scales selected. To take into account potential imbalances of classes in the examples generated, we use the balanced accuracy score. This metric is defined as the arithmetic mean of the sensitivity and specificity as
<disp-formula id="E27"><label>(24)</label><mml:math id="M44"><mml:mtable class="eqnarray" columnalign="right"><mml:mtr><mml:mtd><mml:mtext>balanced accuracy</mml:mtext><mml:mo>=</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mtext>&#x00023; of predicted positives</mml:mtext></mml:mrow><mml:mrow><mml:mtext>&#x00023; of real positives</mml:mtext></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mtext>&#x00023; of predicted negatives</mml:mtext></mml:mrow><mml:mrow><mml:mtext>&#x00023; of real negatives</mml:mtext></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
In contrast to conventional accuracy, this metric will reflect if a classifier is predominately predicting one class due to the higher proportion of samples present in the data-set.</p>
<p>The results of this assessment are summarized in <xref ref-type="fig" rid="F4">Figures 4</xref>, <xref ref-type="fig" rid="F5">5</xref>. We first observe in <xref ref-type="fig" rid="F4">Figure 4</xref>, left column, that the complexity of the classification regions increases as the correlation length scale is reduced. In the same figure, we show the different classifiers trained with 100 samples. It is visually possible to note that the accuracy of the classifiers degrades as the length scale of the ground-truth classification surface is decreased. For the length scale &#x02113; &#x0003D; 0.2, some regions are not captured by the classifiers. We also note that the Gaussian process classification boundaries tend to be smoother than the nearest neighbor classifier. These differences are quantified in <xref ref-type="fig" rid="F5">Figure 5</xref>. We first compare the improvements in accuracy between the nearest neighbor classifier and the Gaussian process classifier with a fixed design in the top row. These two methods are trained with identical data, and we observe that for most cases and number of samples, the Gaussian process classifier is more accurate than the nearest neighbor classifier. The accuracy improvements at 100 samples range on average from 0.8% at &#x02113; &#x0003D; 0.2 to 2.4% at &#x02113; &#x0003D; 0.8. Then, we compare the nearest neighbor classifier with the Gaussian process classier trained with active learning. These two classifiers only share the first 20 points of data. Then the active learning classifier judiciously selects the remaining samples attempting to maximize accuracy. We observe that the accuracy improvements are more pronounced with the active learning for &#x02113; &#x0003D; 0.4 &#x02212; 1.0. The average improvements at 100 samples range from 3.0% at &#x02113; &#x0003D; 0.4 to 6.2% at &#x02113; &#x0003D; 0.8. For &#x02113; &#x0003D; 0.2, we see an average decrease in accuracy of 1.0% at 100 samples. In the last row of <xref ref-type="fig" rid="F5">Figure 5</xref>, we see the average accuracies for the three classifiers at 100 samples, reflecting the improvements in accuracy obtained by the Gaussian process classifiers already described. We see that all classifiers tend to decrease their accuracy as the length scale decreases, which coincides with the increased complexity of classification boundaries for lower length scales seen in <xref ref-type="fig" rid="F4">Figure 4</xref>. This detriment in performance becomes more pronounced between &#x02113; &#x0003D; 0.4 and &#x02113; &#x0003D; 0.2. This change corresponds with the average geodesic distance between points in the fixed design data-set, which is equal to 0.39. This metric is shown as a dashed vertical line bottom row plot of <xref ref-type="fig" rid="F5">Figure 5</xref>. Classification regions with a characteristic size smaller than this value could be ignored by the classifiers, which is what we observe in the top row of <xref ref-type="fig" rid="F5">Figure 5</xref>. In these cases, the uncertainty estimates used for active learning might be inadequate, leading to a worse performance compared to the longer length scale cases. Overall, we see that Gaussian process classifiers and active learning provide advantages in accuracy when compared to the baseline nearest neighbor classifier.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Accuracy of the numerical assessment. We quantify the improvements in accuracy when using a Gaussian process classifier versus the nearest neighbor classifier (top row) and when using a Gaussian process classifier with active learning, versus the baseline nearest neighbor classifier (middle row) for different length scales. The gray lines show the balanced accuracy improvements of the 10 examples for each length scale and the black line shows the mean improvement. The bottom row shows how the average balanced accuracy changes with length scale when the classifiers are trained with 100 samples. The dashed vertical line represents the average geodesic distance between training points of the fixed design.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0005.tif"/>
</fig></sec>
<sec>
<title>3.2. Characterization of Inducibility Regions</title>
<p>We examine the inducibility of the 9 models described in Section 2.1, specifically 3 different fibrotic patterns and 3 ablation strategies: no ablation, PVI, and PVI&#x0002B;BOX. For each model, we create a training set and test set, both containing 100 samples, using a fixed design, as described in Section 3.1 and shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. We run the model using each of these points as a pacing site and check whether AF was induced or not. For the training set, we also run the low fidelity model, obtaining 100 samples. In total we run 1800 high fidelity simulations and 900 low fidelity simulations. We test three different classifiers for both cases: a nearest neighbor classifier described in Section 3.1, a single-fidelity Gaussian process classifier described in Section 2.3, and a multi-fidelity Gaussian process classifier described in Section 2.6 with 100 low fidelity samples. We train the classifiers with different amounts of data from the training set, ranging from 20 to 100 points. For each level of data, we evaluate the performance of the classifier computing the balanced accuracy in the 100 samples of the test set.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Inducibility maps for the three cases. The performance of the classifiers is analyzed for three cases: a case with no ablation <bold>(A)</bold>, a case with PVI ablation <bold>(B)</bold> and a case with PVI&#x0002B;BOX ablation <bold>(C)</bold>. In each panel, the leftmost column shows the training set (top) and the single-fidelity Gaussian process classifier trained with 100 low fidelity samples (bottom). In the remainder panels, we show the nearest neighbor, single-fidelity Gaussian process classifier, and multi-fidelity Gaussian process classifier trained with 50 and 100 high fidelity samples. The ground truth points are also shown in these panels.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0006.tif"/>
</fig>
<p>The results of this numerical experiment are summarized in <xref ref-type="fig" rid="F6">Figures 6</xref>&#x02013;<xref ref-type="fig" rid="F8">8</xref> and <xref ref-type="table" rid="T1">Table 1</xref>. First, we note that training and predicting with the Gaussian process classifiers only takes a negligible fraction of the cost of high fidelity model, less than 5 min on a laptop. In <xref ref-type="fig" rid="F6">Figure 6</xref>, we show the resulting classifiers trained with the same 50 and 100 high fidelity samples and also the low fidelity classifier trained with 100 samples. We see that the multi-fidelity classifier at 50 and 100 samples shares some features with the low fidelity classifier that are not present in the other two classifiers. Nonetheless, the multi-fidelity classifier is learning from the high fidelity data, as its balanced accuracy increases as the number of samples increases, as seen in <xref ref-type="fig" rid="F8">Figure 8</xref>. We observe that the differences in accuracy tend to collapse as more data is available, showing small differences when 100 samples are provided to the classifiers. The multi-fidelity classifier has the biggest advantage in the small data regime, when it is trained with between 20 and 70 high fidelity samples. Perhaps surprisingly, we see that the low fidelity classifier is always more accurate than the single-fidelity classifiers trained with 20 samples. The cost of training the low fidelity classifier is approximately equivalent to the cost of acquiring 6.25 high fidelity samples, which makes it a cost-effective alternative to estimate the inducibility with limited budget. Along the same line, we compare the accuracies of the different classifiers for the different cases when the training with the equivalent cost of 40 high fidelity simulations in <xref ref-type="fig" rid="F7">Figure 7B</xref>. This is the number of simulations that has been used in clinical studies to optimize the ablation treatment (Boyle et al., <xref ref-type="bibr" rid="B4">2019</xref>). We observe that by using the multi-fidelity classifier we gain, on average, 5.4% points of accuracy comparing to the single-fidelity classifier and 5.7% comparing to the nearest neighbor classifier. Only in one case there was a decrease in accuracy when using the multi-fidelity classifier, but of only 0.45% points of accuracy.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Performance of the classifiers. In <bold>(A)</bold>, the agreement between the low fidelity and the high fidelity model is reported as a confusion matrix, as resulting from 1,800 simulations (900 per fidelity). Moreover, each point is colored according to the case of fibrosis. In <bold>(B)</bold>, we compare the balanced accuracy for the nearest neighbor, single-fidelity, and multi-fidelity classifier, for all nine model scenarios and with a fixed budget of 40 high fidelity simulations.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0007.tif"/>
</fig>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>Accuracies for 9 different cases. We show how the balanced accuracy evolves as more samples (from 20 to 100) are available for the multi-fidelity, single-fidelity, and nearest neighbor classifiers. The samples are represented as the cost of running a high fidelity model and the multi-fidelity curve is shifted to the right to account for the cost of 100 low fidelity simulations. The dashed horizontal line represents the accuracy of a Gaussian process classifier trained with 100 low fidelity simulations predicting the high fidelity test set.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fphys-13-757159-g0008.tif"/>
</fig>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Inducibility results from the 1,800 high fidelity simulations and 900 low fidelity simulations.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th/>
<th valign="top" align="center" colspan="3" style="border-bottom: thin solid #000000;"><bold>Inducibility [%]</bold></th>
<th/>
</tr>
<tr>
<th valign="top" align="left"><bold>Fibrosis</bold></th>
<th valign="top" align="left"><bold>Ablation</bold></th>
<th valign="top" align="center"><bold>Test</bold></th>
<th valign="top" align="center"><bold>Train</bold></th>
<th valign="top" align="center"><bold>Low fidelity</bold></th>
<th valign="top" align="center"><bold>Low and high fidelity agreement [%]</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Moderate</td>
<td valign="top" align="left">-</td>
<td valign="top" align="center">60</td>
<td valign="top" align="center">58</td>
<td valign="top" align="center">55</td>
<td valign="top" align="center">77</td>
</tr>
<tr>
<td valign="top" align="left">Moderate</td>
<td valign="top" align="left">PVI</td>
<td valign="top" align="center">52</td>
<td valign="top" align="center">43</td>
<td valign="top" align="center">41</td>
<td valign="top" align="center">76</td>
</tr>
<tr>
<td valign="top" align="left">Moderate</td>
<td valign="top" align="left">PVI&#x0002B;BOX</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">40</td>
<td valign="top" align="center">38</td>
<td valign="top" align="center">78</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 1</td>
<td valign="top" align="left">-</td>
<td valign="top" align="center">62</td>
<td valign="top" align="center">62</td>
<td valign="top" align="center">52</td>
<td valign="top" align="center">82</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 1</td>
<td valign="top" align="left">PVI</td>
<td valign="top" align="center">51</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">42</td>
<td valign="top" align="center">86</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 1</td>
<td valign="top" align="left">PVI&#x0002B;BOX</td>
<td valign="top" align="center">48</td>
<td valign="top" align="center">47</td>
<td valign="top" align="center">36</td>
<td valign="top" align="center">85</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 2</td>
<td valign="top" align="left">-</td>
<td valign="top" align="center">73</td>
<td valign="top" align="center">65</td>
<td valign="top" align="center">57</td>
<td valign="top" align="center">84</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 2</td>
<td valign="top" align="left">PVI</td>
<td valign="top" align="center">60</td>
<td valign="top" align="center">54</td>
<td valign="top" align="center">44</td>
<td valign="top" align="center">84</td>
</tr>
<tr>
<td valign="top" align="left">Severe - case 2</td>
<td valign="top" align="left">PVI&#x0002B;BOX</td>
<td valign="top" align="center">54</td>
<td valign="top" align="center">48</td>
<td valign="top" align="center">38</td>
<td valign="top" align="center">84</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>We show the fraction of simulation where AF was inducible for 3 different fibrotic patterns and for a baseline case and 2 ablation strategies. We also report the fraction of simulations where the low and high fidelity models predicted the same outcome</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>We analyze the agreement between the low and high fidelity models by looking at training sets for all cases in <xref ref-type="fig" rid="F7">Figure 7A</xref>. Overall, we find the low and high fidelity agree in 81.7% of the simulation. However, we see that the low fidelity model is biased towards predicting no AF when the high fidelity model is predicting AF. This is confirmed in every case, as can be seen in <xref ref-type="table" rid="T1">Table 1</xref>, where low fidelity inducibility is always lower than the high fidelity inducibility. A possible explanation is that the low fidelity model, being based on a coarser discretization of the atrial model, has fewer fine-grained features (fibrosis, anatomy, wall thickness) that might favor AF. It is also worth noting that we adapted the conduction velocity in the low fidelity model by increasing it to the level of the high fidelity one, a change that is potentially antiarrhythmic but that increased the correlation between the models and hence the overall performance of the multi-fidelity classifier. We also found that in the case of 50% fibrosis, the low fidelity model tends to predict proportionally more occurrence of AF when the high fidelity model is not predicting AF.</p>
<p>Finally, we see in <xref ref-type="table" rid="T1">Table 1</xref> that the ablation strategies applied are decreasing the inducibility in all cases, both for the train, test and low fidelity sets. We see that pulmonary vein isolation has more impact on the inducibility than the subsequent box ablation for all cases, both in the train and the test set.</p></sec></sec>
<sec sec-type="discussion" id="s4">
<title>4. Discussion</title>
<p>In this study, we propose a novel methodology to estimate the AF inducibility regions of a computational model of the human atria. This is achieved by training a Gaussian process classifier that indicates whether a given point on the atria is associated with a sustained AF event, when incrementally pacing from its location. Our classifier is directly trained on the atrial surface, hence it embodies the geometrical and topological properties of the atria, which are known to be key determinants in AF. Gaussian process regression on Riemannian manifolds is not a novel concept, as well as its link to certain types of SPDEs (Lindgren et al., <xref ref-type="bibr" rid="B27">2011</xref>). To the best of our knowledge, however, this is the first study proposing a multi-fidelity Gaussian process classifier on manifolds, which extends our previous work on Euclidean spaces (Sahli Costabal et al., <xref ref-type="bibr" rid="B44">2019</xref>). The proposed method is non-intrusive, in the sense that the atrial model is a black-box, with comparable training cost to a nearest neighbor classifier. Moreover, when a low fidelity model is available&#x02014;in our case, obtained by coarsening the computational mesh&#x02014;, the accuracy of the classifier can be sensibly improved with a multi-fidelity approach. Finally, given its structure, the methodology can be easily extended to multi-class classifier, e.g., with the capability to distinguish AF episodes from atrial flutter.</p>
<p>From a methodological perspective, our results show that the accuracy of the classifier depends on the length scale of the inducibility region. Intuitively, the shorter the length scale is, the more training data is needed. When the length scale is much smaller than the size of the atria, it is more likely to observe an inducibility region composed of disconnected and relatively small components. Moreover, the boundary of the inducibility region becomes less smooth. Interestingly, the length scale has, however, a limited effect on the estimate of the overall inducibility. This is due to fact that the volume of the inducibility region is only marginally affected by the smoothness of its boundary and the presence of multiple disconnected regions. We attempt to estimate the length scale of the inducibility map by training a single-fidelity classifier with both the high fidelity test and train sets. The average length scale of the resulting classifier of the baseline AF model is &#x02113;= 0.28. This is smaller than the average distance between points in the training set, which corresponds to 0.39, and may explain the balanced accuracies that we obtained were only around 90%. We also observed in the numerical assessment that the efficiency of active learning deteriorates at smaller length scales, for &#x02113; between 0.2 to 0.4, and we decided not to use it for predicting inducibility maps in the experiments in Section 3.2, also to limit the computational cost.</p>
<p>From a computational viewpoint, the proposed multi-fidelity classifier reports the maximum improvements in accuracy in a typical data set of 40 pacing sites. In general, the multi-fidelity classifier was more accurate for a small number of samples (less than 50), while for a larger sample size the difference between single- and multi-fidelity classifiers is less pronounced. When comparing the model without ablation lines and with ablation, both high- and low fidelity models agree on the observed reduced inducibility due to ablation. In the case of ablation, therefore, it is convenient to adopt a multi-fidelity approach or even just the low fidelity classifier, to save computational time. In fact, the biggest advantage of the low fidelity classifier relies on its very limited computational cost, which is only a small fraction of the high fidelity counterpart. This highlights the importance of taking advantage of these inexpensive approximations of the high fidelity model whenever possible. We remark that our low fidelity model does not require a training phase itself, thus there is no additional offline cost.</p>
<p>Finally, from a modeling perspective, our results on the inducibility of AF are in agreement with those reported in the literature. Firstly, points in the proximity of fibrotic regions are more likely to induce AF (Kawai et al., <xref ref-type="bibr" rid="B24">2019</xref>). Visually, there is a spatial correlation between the inducibility region (see <xref ref-type="fig" rid="F6">Figure 6</xref>) and the fibrosis distribution (<xref ref-type="fig" rid="F2">Figure 2</xref>). The local inducibility property may therefore reflect the local tissue properties (Boyle et al., <xref ref-type="bibr" rid="B3">2021</xref>). Nonetheless, inducibility may also depend on other factors, such as an abrupt change in the fiber direction, heterogeneity in the ionic parameters, and the presence of anatomical defects or a scar. Hence, pacing sites leading to AF may not necessarily be correlated with the local tissue properties. Secondly, our results show that, with a fixed design, 40 pacing points are sufficient to achieve a good estimate of the inducibility (Boyle et al., <xref ref-type="bibr" rid="B3">2021</xref>), while 20 are probably too few. The multi-fidelity classifier, however, can achieve high accuracy with only 20 samples. Thirdly, the ablation treatment reduced the overall inducibility, essentially because a large inducible region surrounding the pulmonary veins has been isolated from the rest of the tissue, impeding the emergence of AF. Due to the presence of severe fibrosis in the tissue, however, it is still possible to induce AF from several other portions of the atria, mostly unaffected by ablation. Finally, as described above, the inducibility region in both cases shows a small length scale, which can explain why pacing from different but sufficiently close points may lead to discordant results in AF inducibility. In other words, the uncertainty in the outcome is potentially large for some pacing sites.</p>
<p>Our work also presents some limitations. We limited our analysis to a single anatomy, but we tested different fibrosis patterns, in terms of distribution and severity, and two standard-of-care ablation strategies. Therefore, the framework can be applied with no changes to other anatomies and therapies, such as antiarrhythmic drug therapy (Sahli Costabal et al., <xref ref-type="bibr" rid="B46">2018</xref>; Gharaviri et al., <xref ref-type="bibr" rid="B16">2021a</xref>). It is worth to mention that for this study we ran 1 800 high fidelity simulations and 900 low fidelity ones, for a total cost of roughly 25 000 node-hours on the CSCS supercomputer. We also tested a single pacing protocol with a fixed design. The stimulation protocol is typically tailored to the ionic model and can be tested in a single-cell preparation, but sometimes this is not optimal, especially in the presence of heterogeneity and fibrosis. Optimized protocols (Azzolin et al., <xref ref-type="bibr" rid="B1">2021</xref>) can be easily combined with our approach, since the algorithm does not depend on it. The duration of each simulation, 4 s, is sufficiently long to detect AF events, but it might preclude the discovery of self-terminating episodes of AF, or the translation of an AF event to atrial flutter. These cases are typically very limited in number. The presence of self-terminating AF also depends on the ionic model used, which may not be suitable for long simulations (more than 1 min). Finally, we observed that using active learning can be effective in judiciously selecting new observation sites, albeit with a deteriorating efficiency at smaller length scales. Nonetheless, this limitation motivates future work on exploring new kernel functions and active learning criteria that might be better suited for this task.</p>
<p>From a clinical perspective, there is an increasing application of patient-specific electrophysiology models. Thus, there is a compelling need for reducing the overall time needed to deliver the optimal virtual treatment within the constraints dictated by clinical practice (Azzolin et al., <xref ref-type="bibr" rid="B1">2021</xref>; Boyle et al., <xref ref-type="bibr" rid="B3">2021</xref>; Pagani and Manzoni, <xref ref-type="bibr" rid="B32">2021</xref>). This study shows that the proposed Gaussian process classifier can, in fact, reduce the computational cost while maintaining a comparable or even better accuracy to a single-fidelity approach. Moreover, it does not require intrusive changes to existing implementations and it has a very limited computational overhead, rendering its translation to existing patient-specific solutions feasible and appealing.</p>
<p>Inducibility maps can also offer a novel, yet unexplored, view into AF, possibly unveiling regions susceptible to trigger AF. They could be used to design and test ablation scenarios, e.g., by isolating vulnerable regions. These maps could also be used to validate an AF model, by checking whether the patient-specific model and the real atria agree on the inducibility observed during a procedure.</p>
<p>In summary, our multi-fidelity classifier provides an efficient methodology to evaluate the effect of ablation therapy in patient-specific models of AF. We envision that this tool will accelerate the personalization of accurate treatments in the clinical setting.</p></sec>
<sec sec-type="data-availability" id="s5">
<title>Data Availability Statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link ext-link-type="uri" xlink:href="https://github.com/fsahli/AtrialMFclass">https://github.com/fsahli/AtrialMFclass</ext-link>.</p></sec>
<sec id="s6">
<title>Author Contributions</title>
<p>FS and SP: conceived the problem. FS and PP: formulated and implemented the classifier, whereas LG: implemented all necessary steps to perform multi-fidelity simulations on the supercomputer. AG and SP: provided the atrial model, the fibrosis pattern, and the ablation lines. RK and SP: supervised the work of LG. FS, SP, and LG: wrote the manuscript, and all authors reviewed and improved it. All authors contributed to the article and approved the submitted version.</p></sec>
<sec sec-type="funding-information" id="s7">
<title>Funding</title>
<p>This work was supported by the ANID Millennium Science Initiative Program Grant NCN17-129, ANID-FONDECYT Postdoctoral Fellowship 3190355 to FS, the DOE grant DE-SC0019116, AFOSR grant FA9550-20-1-0060, the DOEARPA grant DE-AR0001201 awarded to PP, the Leading House for Latin American Region grant RPG 2117 to SP and FS, the Swiss Heart Foundation grant FF20042, CSCS-Swiss National Supercomputing Centre grant s1074 awarded to SP, and the SNSF grant 197041 to RK. This work was also financially supported by the Theo Rossi di Montelera Foundation, the Metis Foundation Sergio Mantegazza, the Fidinam Foundation, and the Horten Foundation.</p></sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec sec-type="disclaimer" id="s8">
<title>Publisher&#x00027;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
</body>
<back>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Azzolin</surname> <given-names>L.</given-names></name> <name><surname>Schuler</surname> <given-names>S.</given-names></name> <name><surname>D&#x000F6;ssel</surname> <given-names>O.</given-names></name> <name><surname>Loewe</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>A reproducible protocol to assess arrhythmia vulnerability <italic>in silico</italic>: Pacing at the end of the effective refractory period</article-title>. <source>Front. Physiol</source>. <volume>12</volume>, <fpage>420</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2021.656411</pub-id><pub-id pub-id-type="pmid">33868025</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Borovitskiy</surname> <given-names>V.</given-names></name> <name><surname>Terenin</surname> <given-names>A.</given-names></name> <name><surname>Mostowsky</surname> <given-names>P.</given-names></name> <name><surname>Deisenroth</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Mat&#x000E9;rn gaussian processes on riemannian manifolds</article-title>, in <source>Advances in Neural Information Processing Systems</source>, Vol. 33, eds <person-group person-group-type="editor"><name><surname>Larochelle</surname> <given-names>H.</given-names></name> <name><surname>Ranzato</surname> <given-names>M.</given-names></name> <name><surname>Hadsell</surname> <given-names>R.</given-names></name> <name><surname>Balcan</surname> <given-names>M. F.</given-names></name> <name><surname>Lin</surname> <given-names>H.</given-names></name></person-group> (<publisher-name>Curran Associates, Inc.</publisher-name>), <fpage>12426</fpage>&#x02013;<lpage>12437</lpage>.</citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boyle</surname> <given-names>P. M.</given-names></name> <name><surname>Ochs</surname> <given-names>A. R.</given-names></name> <name><surname>Ali</surname> <given-names>R. L.</given-names></name> <name><surname>Paliwal</surname> <given-names>N.</given-names></name> <name><surname>Trayanova</surname> <given-names>N. A.</given-names></name></person-group> (<year>2021</year>). <article-title>Characterizing the arrhythmogenic substrate in personalized models of atrial fibrillation: sensitivity to mesh resolution and pacing protocol in af models</article-title>. <source>EP Europace</source> <volume>23</volume>, <fpage>i3</fpage>&#x02013;<lpage>i11</lpage>. <pub-id pub-id-type="doi">10.1093/europace/euaa385</pub-id><pub-id pub-id-type="pmid">33751074</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boyle</surname> <given-names>P. M.</given-names></name> <name><surname>Zghaib</surname> <given-names>T.</given-names></name> <name><surname>Zahid</surname> <given-names>S.</given-names></name> <name><surname>Ali</surname> <given-names>R. L.</given-names></name> <name><surname>Deng</surname> <given-names>D.</given-names></name> <name><surname>Franceschi</surname> <given-names>W. H.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Computationally guided personalized targeted ablation of persistent atrial fibrillation</article-title>. <source>Nat. Biomed. Eng.</source> <volume>3</volume>, <fpage>870</fpage>&#x02013;<lpage>879</lpage>. <pub-id pub-id-type="doi">10.1038/s41551-019-0437-9</pub-id><pub-id pub-id-type="pmid">31427780</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>S.-A.</given-names></name> <name><surname>Hsieh</surname> <given-names>M.-H.</given-names></name> <name><surname>Tai</surname> <given-names>C.-T.</given-names></name> <name><surname>Tsai</surname> <given-names>C.-F.</given-names></name> <name><surname>Prakash</surname> <given-names>V. S.</given-names></name> <name><surname>Yu</surname> <given-names>W.-C.</given-names></name> <etal/></person-group>. (<year>1999</year>). <article-title>Initiation of atrial fibrillation by ectopic beats originating from the pulmonary veins</article-title>. <source>Circulation</source> <volume>100</volume>, <fpage>1879</fpage>&#x02013;<lpage>1886</lpage>. <pub-id pub-id-type="pmid">10545432</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cohn</surname> <given-names>D. A.</given-names></name> <name><surname>Ghahramani</surname> <given-names>Z.</given-names></name> <name><surname>Jordan</surname> <given-names>M. I.</given-names></name></person-group> (<year>1996</year>). <article-title>Active learning with statistical models</article-title>. <source>J. Artif. Intell. Res.</source> <volume>4</volume>, <fpage>129</fpage>&#x02013;<lpage>145</lpage>.</citation></ref>
<ref id="B7">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Colli Franzone</surname> <given-names>P.</given-names></name> <name><surname>Pavarino</surname> <given-names>L. F.</given-names></name> <name><surname>Scacchi</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <source>Mathematical Cardiac Electrophysiology, Vol. 13</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>.</citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Courtemanche</surname> <given-names>M.</given-names></name> <name><surname>Ramirez</surname> <given-names>R. J.</given-names></name> <name><surname>Nattel</surname> <given-names>S.</given-names></name></person-group> (<year>1998</year>). <article-title>Ionic mechanisms underlying human atrial action potential properties: insights from a mathematical model</article-title>. <source>Am. J. Physiol. Heart Circ. Physiol.</source> <volume>275</volume>, <fpage>H301</fpage>&#x02013;<lpage>H321</lpage>. <pub-id pub-id-type="pmid">9688927</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coveney</surname> <given-names>S.</given-names></name> <name><surname>Corrado</surname> <given-names>C.</given-names></name> <name><surname>Roney</surname> <given-names>C.</given-names></name> <name><surname>Wilkinson</surname> <given-names>R.</given-names></name> <name><surname>Oakley</surname> <given-names>J.</given-names></name> <name><surname>Lindgren</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Probabilistic interpolation of uncertain local activation times on human atrial manifolds</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>67</volume>, <fpage>99</fpage>&#x02013;<lpage>109</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2019.2908486</pub-id><pub-id pub-id-type="pmid">30969911</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coveney</surname> <given-names>S.</given-names></name> <name><surname>Corrado</surname> <given-names>C.</given-names></name> <name><surname>Roney</surname> <given-names>C. H.</given-names></name> <name><surname>O&#x00027;Hare</surname> <given-names>D.</given-names></name> <name><surname>Williams</surname> <given-names>S. E.</given-names></name> <name><surname>O&#x00027;Neill</surname> <given-names>M.D.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Gaussian process manifold interpolation for probabilistic atrial activation maps and uncertain conduction velocity</article-title>. <source>Philosoph. Trans. R. Soc. A Math. Phys. Eng. Sci.</source> <volume>378</volume>:<fpage>20190345</fpage>. <pub-id pub-id-type="doi">10.1098/rsta.2019.0345</pub-id><pub-id pub-id-type="pmid">32448072</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Crane</surname> <given-names>K.</given-names></name> <name><surname>Weischedel</surname> <given-names>C.</given-names></name> <name><surname>Wardetzky</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Geodesics in heat: a new approach to computing distance based on heat flow</article-title>. <source>ACM Trans. Graph.</source> <volume>32</volume>, <fpage>10</fpage>. <pub-id pub-id-type="doi">10.1145/2516971.2516977</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dhamala</surname> <given-names>J.</given-names></name> <name><surname>Bajracharya</surname> <given-names>P.</given-names></name> <name><surname>Arevalo</surname> <given-names>H. J.</given-names></name> <name><surname>Sapp</surname> <given-names>J. L.</given-names></name> <name><surname>Hor&#x000E1;cek</surname> <given-names>B. M.</given-names></name> <name><surname>Wu</surname> <given-names>K. C.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Embedding high-dimensional bayesian optimization via generative modeling: parameter personalization of cardiac electrophysiological models</article-title>. <source>Med. Image Anal.</source> <volume>62</volume>:<fpage>101670</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2020.101670</pub-id><pub-id pub-id-type="pmid">32171168</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Fresca</surname> <given-names>S.</given-names></name> <name><surname>Manzoni</surname> <given-names>A.</given-names></name> <name><surname>Ded&#x000E8;</surname> <given-names>L.</given-names></name> <name><surname>Quarteroni</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Deep learning-based reduced order models in cardiac electrophysiology</article-title>. <source>PLoS ONE</source> 15:e0239416. <pub-id pub-id-type="doi">10.1371/journal.pone.0239416</pub-id><pub-id pub-id-type="pmid">34630131</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fu</surname> <given-names>Z.</given-names></name> <name><surname>Kirby</surname> <given-names>R. M.</given-names></name> <name><surname>Whitaker</surname> <given-names>R. T.</given-names></name></person-group> (<year>2013</year>). <article-title>A fast iterative method for solving the eikonal equation on tetrahedral domains</article-title>. <source>SIAM J. Sci. Comput.</source> <volume>35</volume>, <fpage>C473</fpage>&#x02013;<lpage>C494</lpage>. <pub-id pub-id-type="doi">10.1137/120881956</pub-id><pub-id pub-id-type="pmid">25221418</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gharaviri</surname> <given-names>A.</given-names></name> <name><surname>Bidar</surname> <given-names>E.</given-names></name> <name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Zeemering</surname> <given-names>S.</given-names></name> <name><surname>Verheule</surname> <given-names>S.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name> <name><surname>Maessen</surname> <given-names>J. G.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name> <name><surname>Schotten</surname> <given-names>U.</given-names></name></person-group> (<year>2020</year>). <article-title>Epicardial fibrosis explains increased endo-epicardial dissociation and epicardial breakthroughs in human atrial fibrillation</article-title>. <source>Front. Physiol.</source> <volume>11</volume>:<fpage>68</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2020.00068</pub-id><pub-id pub-id-type="pmid">32153419</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gharaviri</surname> <given-names>A.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Conte</surname> <given-names>G.</given-names></name> <name><surname>Zeemering</surname> <given-names>S.</given-names></name> <name><surname>Sobota</surname> <given-names>V.</given-names></name> <name><surname>Verheule</surname> <given-names>S.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name> <name><surname>Schotten</surname> <given-names>U.</given-names></name></person-group> (<year>2021a</year>). <article-title>Synergistic antiarrhythmic effect of inward rectifier current inhibition and pulmonary vein isolation in a 3d computer model for atrial fibrillation</article-title>. <source>EP Europace</source> <volume>23</volume>, <fpage>i161</fpage>&#x02013;<lpage>i168</lpage>. <pub-id pub-id-type="doi">10.1093/europace/euaa413</pub-id><pub-id pub-id-type="pmid">33751085</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gharaviri</surname> <given-names>A.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Verheule</surname> <given-names>S.</given-names></name> <name><surname>Conte</surname> <given-names>G.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name> <name><surname>Schotten</surname> <given-names>U.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name></person-group> (<year>2021b</year>). <article-title>Left atrial appendage electrical isolation reduces atrial fibrillation recurrences: simulation study</article-title>. <source>Circ. Arrhythmia Electrophysiol.</source> <volume>14</volume>:<fpage>e009230</fpage>. <pub-id pub-id-type="doi">10.1161/CIRCEP.120.009230</pub-id><pub-id pub-id-type="pmid">33356357</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gneiting</surname> <given-names>T.</given-names></name></person-group> (<year>2013</year>). <article-title>Strictly and non-strictly positive definite functions on spheres</article-title>. <source>Bernoulli</source> <volume>19</volume>, <fpage>1327</fpage>&#x02013;<lpage>1349</lpage>.</citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gramacy</surname> <given-names>R. B.</given-names></name> <name><surname>Polson</surname> <given-names>N. G.</given-names></name></person-group> (<year>2017</year>). <article-title>Particle learning of gaussian process models for sequential design and optimization</article-title>. <source>J. Comput. Graph. Stat.</source> <volume>20</volume>, <fpage>102</fpage>&#x02013;<lpage>118</lpage>. <pub-id pub-id-type="doi">10.1198/jcgs.2010.09171</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Haissaguerre</surname> <given-names>M.</given-names></name> <name><surname>Ja&#x000EF;s</surname> <given-names>P.</given-names></name> <name><surname>Shah</surname> <given-names>D. C.</given-names></name> <name><surname>Takahashi</surname> <given-names>A.</given-names></name> <name><surname>Hocini</surname> <given-names>M.</given-names></name> <name><surname>Quiniou</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>1998</year>). <article-title>Spontaneous initiation of atrial fibrillation by ectopic beats originating in the pulmonary veins</article-title>. <source>New England J. Med.</source> <volume>339</volume>, <fpage>659</fpage>&#x02013;<lpage>666</lpage>. <pub-id pub-id-type="pmid">9725923</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoffman</surname> <given-names>M. D.</given-names></name> <name><surname>Gelman</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>The no-u-turn sampler: adaptively setting path lengths in hamiltonian monte carlo</article-title>. <source>J. Mach. Learn. Res.</source> <volume>15</volume>, <fpage>1593</fpage>&#x02013;<lpage>1623</lpage>.</citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kaboudian</surname> <given-names>A.</given-names></name> <name><surname>Cherry</surname> <given-names>E. M.</given-names></name> <name><surname>Fenton</surname> <given-names>F. H.</given-names></name></person-group> (<year>2019</year>). <article-title>Real-time interactive simulations of large-scale systems on personal computers and cell phones: toward patient-specific heart modeling and other applications</article-title>. <source>Sci. Adv.</source> <volume>5</volume>:<fpage>eaav6019</fpage>. <pub-id pub-id-type="doi">10.1126/sciadv.aav6019</pub-id><pub-id pub-id-type="pmid">30944861</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Kapoor</surname> <given-names>A.</given-names></name> <name><surname>Grauman</surname> <given-names>K.</given-names></name> <name><surname>Urtasun</surname> <given-names>R.</given-names></name> <name><surname>Darrell</surname> <given-names>T.</given-names></name></person-group> (<year>2007</year>). <article-title>Active learning with gaussian processes for object categorization</article-title>, in <source>2007 IEEE 11th International Conference on Computer Vision</source> (<publisher-loc>Rio de Janeiro</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>8</lpage>.</citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kawai</surname> <given-names>S.</given-names></name> <name><surname>Mukai</surname> <given-names>Y.</given-names></name> <name><surname>Inoue</surname> <given-names>S.</given-names></name> <name><surname>Yakabe</surname> <given-names>D.</given-names></name> <name><surname>Nagaok</surname> <given-names>K.</given-names></name> <name><surname>Sakamoto</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Non-pulmonary vein triggers of atrial fibrillation are likely to arise from low-voltage areas in the left atrium</article-title>. <source>Sci. Rep.</source> <volume>9</volume>:<fpage>12271</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-48669-1</pub-id><pub-id pub-id-type="pmid">31439861</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kennedy</surname> <given-names>M. C.</given-names></name> <name><surname>O&#x00027;Hagan</surname> <given-names>A.</given-names></name></person-group> (<year>2000</year>). <article-title>Predicting the output from a complex computer code when fast approximations are available</article-title>. <source>Biometrika</source> <volume>87</volume>, <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1093/BIOMET/87.1.1</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Krause</surname> <given-names>D.</given-names></name> <name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Dickopf</surname> <given-names>T.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name> <name><surname>Prinzen</surname> <given-names>F.</given-names></name></person-group> (<year>2012</year>). <article-title>Hybrid parallelization of a large-scale heart model</article-title>, in <source>Facing the Multicore-Challenge II</source> (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>120</fpage>&#x02013;<lpage>132</lpage>. <pub-id pub-id-type="pmid">15339335</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lindgren</surname> <given-names>F.</given-names></name> <name><surname>Rue</surname> <given-names>H.</given-names></name> <name><surname>Lindstr&#x000F6;m</surname> <given-names>J.</given-names></name></person-group> (<year>2011</year>). <article-title>An explicit link between gaussian fields and gaussian markov random fields: the stochastic partial differential equation approach</article-title>. <source>J. R. Stat. Soc. Series B (Stat. Methodol.)</source> <volume>73</volume>, <fpage>423</fpage>&#x02013;<lpage>498</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-9868.2011.00777.x</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Loewe</surname> <given-names>A.</given-names></name> <name><surname>Poremba</surname> <given-names>E.</given-names></name> <name><surname>Oesterlein</surname> <given-names>T.</given-names></name> <name><surname>Luik</surname> <given-names>A.</given-names></name> <name><surname>Schmitt</surname> <given-names>C.</given-names></name> <name><surname>Seemann</surname> <given-names>G.</given-names></name> <name><surname>D&#x000F6;ssel</surname> <given-names>O.</given-names></name></person-group> (<year>2019</year>). <article-title>Patient-specific identification of atrial flutter vulnerability&#x02013;a computational approach to reveal latent reentry pathways</article-title>. <source>Front. Physiol.</source> <volume>9</volume>, <fpage>1910</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2018.01910</pub-id><pub-id pub-id-type="pmid">30692934</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>McDowell</surname> <given-names>K. S.</given-names></name> <name><surname>Zahid</surname> <given-names>S.</given-names></name> <name><surname>Vadakkumpadan</surname> <given-names>F.</given-names></name> <name><surname>Blauer</surname> <given-names>J.</given-names></name> <name><surname>MacLeod</surname> <given-names>R. S.</given-names></name> <name><surname>Trayanova</surname> <given-names>N. A.</given-names></name></person-group> (<year>2015</year>). <article-title>Virtual electrophysiological study of atrial fibrillation in fibrotic remodeling</article-title>. <source>PLoS ONE</source> <volume>10</volume>:<fpage>e0117110</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0117110</pub-id><pub-id pub-id-type="pmid">27196264</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neal</surname> <given-names>R.</given-names></name></person-group> (<year>1999</year>). <article-title>Regression and classification using gaussian process priors (with discussion)</article-title>. <source>Bayesian Stat.</source> <volume>6</volume>, <fpage>475</fpage>&#x02013;<lpage>501</lpage>.</citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nickisch</surname> <given-names>H.</given-names></name> <name><surname>Rasmussen</surname> <given-names>C. E.</given-names></name></person-group> (<year>2008</year>). <article-title>Approximations for binary Gaussian process classification</article-title>. <source>Mach. Learn. Res.</source> <volume>9</volume>, <fpage>2035</fpage>&#x02013;<lpage>2078</lpage>.</citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pagani</surname> <given-names>S.</given-names></name> <name><surname>Manzoni</surname> <given-names>A</given-names></name></person-group>. (<year>2021</year>). <article-title>Enabling forward uncertainty quantification and sensitivity analysis in cardiac electrophysiology by reduced order modeling and machine learning</article-title>. <source>Int. J. Numer. Methods Biomed. Eng.</source> <fpage>e3450</fpage>. <pub-id pub-id-type="pmid">33599106</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Perdikaris</surname> <given-names>P.</given-names></name> <name><surname>Venturi</surname> <given-names>D.</given-names></name> <name><surname>Karniadakis</surname> <given-names>G. E.</given-names></name></person-group> (<year>2016</year>). <article-title>Multifidelity information fusion algorithms for high-dimensional systems and massive data sets</article-title>. <source>SIAM J. Sci. Comput.</source> <volume>38</volume>, <fpage>B521</fpage>&#x02013;<lpage>B538</lpage>. <pub-id pub-id-type="doi">10.1137/15M1055164</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Hake</surname> <given-names>J.</given-names></name> <name><surname>Sundnes</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Space-discretization error analysis and stabilization schemes for conduction velocity in cardiac electrophysiology</article-title>. <source>Int. J. Numer. Methods Biomed. Eng.</source> <volume>32</volume>:<fpage>e02762</fpage>. <pub-id pub-id-type="doi">10.1002/cnm.3450</pub-id><pub-id pub-id-type="pmid">26685879</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Quaglino</surname> <given-names>A.</given-names></name> <name><surname>Potse</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>On sampling spatially-correlated random fields for complex geometries</article-title>, in <source>International Conference on Functional Imaging and Modeling of the Heart</source> (<publisher-loc>Bordeaux</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>103</fpage>&#x02013;<lpage>111</lpage>.</citation></ref>
<ref id="B36">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Phan</surname> <given-names>D.</given-names></name> <name><surname>Pradhan</surname> <given-names>N.</given-names></name> <name><surname>Jankowiak</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Composable effects for flexible and accelerated probabilistic programming in numpyro</article-title>. <source>arXiv preprint</source> arXiv:1912.11554.</citation></ref>
<ref id="B37">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Potse</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Inducibility of atrial fibrillation depends chaotically on ionic model parameters</article-title>, in <source>Computing in Cardiology (CinC)</source> (<publisher-loc>Singapore</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>4</lpage>.</citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Dub&#x000E9;</surname> <given-names>B.</given-names></name> <name><surname>Richer</surname> <given-names>J.</given-names></name> <name><surname>Vinet</surname> <given-names>A.</given-names></name> <name><surname>Gulrajani</surname> <given-names>R. M.</given-names></name></person-group> (<year>2006</year>). <article-title>A comparison of monodomain and bidomain reaction-diffusion models for action potential propagation in the human heart</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>53</volume>, <fpage>2425</fpage>&#x02013;<lpage>2435</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2006.880875</pub-id><pub-id pub-id-type="pmid">17153199</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Potse</surname> <given-names>M.</given-names></name> <name><surname>Gharaviri</surname> <given-names>A.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name> <name><surname>Verheule</surname> <given-names>S.</given-names></name> <name><surname>Schotten</surname> <given-names>U.</given-names></name></person-group> (<year>2018</year>). <article-title>Anatomically-induced fibrillation in a 3d model of the human atria</article-title>, in <source>2018 Computing in Cardiology Conference (CinC)</source>, <volume>Vol. 45</volume> (<publisher-loc>Maastricht</publisher-loc>: <publisher-name>IEEE</publisher-name>).</citation></ref>
<ref id="B40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Quaglino</surname> <given-names>A.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Koutsourelakis</surname> <given-names>P.</given-names></name> <name><surname>Auricchio</surname> <given-names>A.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name></person-group> (<year>2018</year>). <article-title>Fast uncertainty quantification of activation sequences in patient-specific cardiac electrophysiology meeting clinical time constraints</article-title>. <source>Int. J. Numer. Methods Biomed. Eng.</source> <volume>34</volume>:<fpage>e2985</fpage>. <pub-id pub-id-type="pmid">29577657</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Quaglino</surname> <given-names>A.</given-names></name> <name><surname>Pezzuto</surname> <given-names>S.</given-names></name> <name><surname>Krause</surname> <given-names>R.</given-names></name></person-group> (<year>2019</year>). <article-title>High-dimensional and higher-order multifidelity monte carlo estimators</article-title>. <source>J. Comput. Phys.</source> <volume>388</volume>, <fpage>300</fpage>&#x02013;<lpage>315</lpage>. <pub-id pub-id-type="doi">10.1002/cnm.2985</pub-id><pub-id pub-id-type="pmid">29577657</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Rasmussen</surname> <given-names>C. E.</given-names></name> <name><surname>Williams</surname> <given-names>C. K. I.</given-names></name></person-group> (<year>2006</year>). <source>Gaussian Processes for Machine Learning</source>. (<publisher-loc>Cambridge</publisher-loc>: <publisher-name>MIT Press</publisher-name>).</citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roney</surname> <given-names>C. H.</given-names></name> <name><surname>Whitaker</surname> <given-names>J.</given-names></name> <name><surname>Sim</surname> <given-names>I.</given-names></name> <name><surname>O&#x00027;Neill</surname> <given-names>L.</given-names></name> <name><surname>Mukherjee</surname> <given-names>R. K.</given-names></name> <name><surname>Razeghi</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>A technique for measuring anisotropy in atrial conduction to estimate conduction velocity and atrial fibre direction</article-title>. <source>Comput. Biol. Med.</source> <volume>104</volume>, <fpage>278</fpage>&#x02013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2018.10.019</pub-id><pub-id pub-id-type="pmid">30415767</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sahli Costabal</surname> <given-names>F.</given-names></name> <name><surname>Perdikaris</surname> <given-names>P.</given-names></name> <name><surname>Kuhl</surname> <given-names>E.</given-names></name> <name><surname>Hurtado</surname> <given-names>D. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Multi-fidelity classification using gaussian processes: accelerating the prediction of large-scale computational models</article-title>. <source>Comput. Methods Appl. Mech. Eng.</source> <volume>357</volume>:<fpage>112602</fpage>. <pub-id pub-id-type="doi">10.1016/j.cma.2019.112602</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sahli Costabal</surname> <given-names>F.</given-names></name> <name><surname>Seo</surname> <given-names>K.</given-names></name> <name><surname>Ashley</surname> <given-names>E.</given-names></name> <name><surname>Kuhl</surname> <given-names>E.</given-names></name></person-group> (<year>2020</year>). <article-title>Classifying drugs by their arrhythmogenic risk using machine learning</article-title>. <source>Biophys. J.</source> <volume>118</volume>, <fpage>1165</fpage>&#x02013;<lpage>1176</lpage>. <pub-id pub-id-type="doi">10.1016/j.bpj.2020.01.012</pub-id><pub-id pub-id-type="pmid">32023435</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sahli Costabal</surname> <given-names>F.</given-names></name> <name><surname>Yao</surname> <given-names>J.</given-names></name> <name><surname>Kuhl</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <article-title>Predicting drug-induced arrhythmias by multiscale modeling</article-title>. <source>Int. J. Numer. Methods Biomed. Eng.</source> <volume>118</volume>, <fpage>1165</fpage>&#x02013;<lpage>1176</lpage>. <pub-id pub-id-type="doi">10.1002/cnm.2964</pub-id><pub-id pub-id-type="pmid">29424967</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Verma</surname> <given-names>A.</given-names></name> <name><surname>Jiang</surname> <given-names>C.-Y.</given-names></name> <name><surname>Betts</surname> <given-names>T. R.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Deisenhofer</surname> <given-names>I.</given-names></name> <name><surname>Mantovan</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Approaches to catheter ablation for persistent atrial fibrillation</article-title>. <source>New Engl. J. Med.</source> <volume>372</volume>, <fpage>1812</fpage>&#x02013;<lpage>1822</lpage>. <pub-id pub-id-type="doi">10.1056/NEJMoa1408288</pub-id><pub-id pub-id-type="pmid">29399641</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Virani</surname> <given-names>S. S.</given-names></name> <name><surname>Alonso</surname> <given-names>A.</given-names></name> <name><surname>Aparicio</surname> <given-names>H. J.</given-names></name> <name><surname>Benjamin</surname> <given-names>E. J.</given-names></name> <name><surname>Bittencourt</surname> <given-names>M. S.</given-names></name> <name><surname>Callaway</surname> <given-names>C. W.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Heart disease and stroke statistics&#x02013;2021 update: a report from the american heart association</article-title>. <source>Circulation</source> <volume>143</volume>, <fpage>e254</fpage>&#x02013;<lpage>e743</lpage>. <pub-id pub-id-type="doi">10.1161/CIR.0000000000000950</pub-id><pub-id pub-id-type="pmid">33501848</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Whittle</surname> <given-names>P.</given-names></name></person-group> (<year>1963</year>). <article-title>Stochastic-processes in several dimensions</article-title>. <source>Bull. Int. Stat. Inst.</source> <volume>40</volume>, <fpage>974</fpage>&#x02013;<lpage>994</lpage>.</citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zaman</surname> <given-names>M. S.</given-names></name> <name><surname>Dhamala</surname> <given-names>J.</given-names></name> <name><surname>Bajracharya</surname> <given-names>P.</given-names></name> <name><surname>Sapp</surname> <given-names>J. L.</given-names></name> <name><surname>Hor&#x000E1;cek</surname> <given-names>B. M.</given-names></name> <name><surname>Wu</surname> <given-names>K. C.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Fast posterior estimation of cardiac electrophysiological model parameters via bayesian active learning</article-title>. <source>Front. Physiol.</source> <volume>12</volume>:<fpage>740306</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2021.740306</pub-id><pub-id pub-id-type="pmid">34759835</pub-id></citation></ref>
</ref-list>
</back>
</article>