<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2023.1229371</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Neural fingerprinting on MEG time series using MiniRocket</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Kampel</surname>
<given-names>Nikolas</given-names>
</name>
<xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
<xref rid="aff2" ref-type="aff"><sup>2</sup></xref>
<xref rid="aff3" ref-type="aff"><sup>3</sup></xref>
<xref rid="fn0001" ref-type="author-notes"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2325435/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kiefer</surname>
<given-names>Christian M.</given-names>
</name>
<xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
<xref rid="aff4" ref-type="aff"><sup>4</sup></xref>
<xref rid="fn0001" ref-type="author-notes"><sup>&#x2020;</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Shah</surname>
<given-names>N. Jon</given-names>
</name>
<xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
<xref rid="aff5" ref-type="aff"><sup>5</sup></xref>
<xref rid="aff6" ref-type="aff"><sup>6</sup></xref>
<xref rid="aff7" ref-type="aff"><sup>7</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/115242/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Neuner</surname>
<given-names>Irene</given-names>
</name>
<xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
<xref rid="aff3" ref-type="aff"><sup>3</sup></xref>
<xref rid="aff5" ref-type="aff"><sup>5</sup></xref>
<xref rid="aff8" ref-type="aff"><sup>8</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/82986/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Dammers</surname>
<given-names>J&#x00FC;rgen</given-names>
</name>
<xref rid="aff1" ref-type="aff"><sup>1</sup></xref>
<xref rid="aff2" ref-type="aff"><sup>2</sup></xref>
<xref rid="aff3" ref-type="aff"><sup>3</sup></xref>
<xref rid="c001" ref-type="corresp"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/6445/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Institute of Neuroscience and Medicine (INM-4), Forschungszentrum J&#x00FC;lich GmbH</institution>, <addr-line>J&#x00FC;lich</addr-line>, <country>Germany</country></aff>
<aff id="aff2"><sup>2</sup><institution>Faculty of Medicine, RWTH Aachen University</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<aff id="aff3"><sup>3</sup><institution>J&#x00FC;lich Aachen Research Alliance (JARA) &#x2013; CSD &#x2013; Center for Simulation and Data Science</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<aff id="aff4"><sup>4</sup><institution>Faculty of Mathematics, Computer Science and Natural Sciences, RWTH Aachen University</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<aff id="aff5"><sup>5</sup><institution>J&#x00FC;lich Aachen Research Alliance (JARA) &#x2013; BRAIN &#x2013; Translational Medicine</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<aff id="aff6"><sup>6</sup><institution>Institute of Neuroscience and Medicine (INM-11), J&#x00FC;lich Aachen Research Alliance (JARA), Forschungszentrum J&#x00FC;lich GmbH</institution>, <addr-line>J&#x00FC;lich</addr-line>, <country>Germany</country></aff>
<aff id="aff7"><sup>7</sup><institution>Department of Neurology, University Hospital RWTH Aachen</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<aff id="aff8"><sup>8</sup><institution>Department of Psychiatry, Psychotherapy and Psychosomatics, RWTH Aachen University</institution>, <addr-line>Aachen</addr-line>, <country>Germany</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0002"><p>Edited by: Xi-Nian Zuo, Beijing Normal University, China</p></fn>
<fn fn-type="edited-by" id="fn0003"><p>Reviewed by: Ahmadreza Keihani, University of Pittsburgh, United States; Chang Wei Tan, Monash University, Australia</p></fn>
<corresp id="c001">&#x002A;Correspondence: J&#x00FC;rgen Dammers, <email>j.dammers@fz-juelich.de</email></corresp>
<fn fn-type="equal" id="fn0001"><p><sup>&#x2020;</sup>These authors have contributed equally to this work</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>20</day>
<month>09</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>17</volume>
<elocation-id>1229371</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>05</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>04</day>
<month>09</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2023 Kampel, Kiefer, Shah, Neuner and Dammers.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Kampel, Kiefer, Shah, Neuner and Dammers</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Neural fingerprinting is the identification of individuals in a cohort based on neuroimaging recordings of brain activity. In magneto- and electroencephalography (M/EEG), it is common practice to use second-order statistical measures, such as correlation or connectivity matrices, when neural fingerprinting is performed. These measures or features typically require coupling between signal channels and often ignore the individual temporal dynamics. In this study, we show that, following recent advances in multivariate time series classification, such as the development of the RandOm Convolutional KErnel Transformation (ROCKET) classifier, it is possible to perform classification directly on short time segments from MEG resting-state recordings with remarkably high classification accuracies. In a cohort of 124 subjects, it was possible to assign windows of time series of 1&#x2009;s in duration to the correct subject with above 99% accuracy. The achieved accuracies are vastly superior to those of previous methods while simultaneously requiring considerably shorter time segments.</p>
</abstract>
<kwd-group>
<kwd>neural fingerprinting</kwd>
<kwd>resting state</kwd>
<kwd>rocket</kwd>
<kwd>time series classification</kwd>
<kwd>magnetoencephalogra</kwd>
<kwd>MEG</kwd>
<kwd>machine learning</kwd>
</kwd-group>
<contract-num rid="cn1">368482240/GRK2416</contract-num>
<contract-num rid="cn3">491111487</contract-num>
<contract-sponsor id="cn1">Deutsche Forschungsgemeinschaft (DFG, German Research Foundation)<named-content content-type="fundref-id">10.13039/501100001659</named-content></contract-sponsor>
<contract-sponsor id="cn2">HBP SGA3 &#x2013; Human Brain Project Specific Grant Agreement 3</contract-sponsor>
<contract-sponsor id="cn3">Deutsche Forschungsgemeinschaft (DFG, German Research Foundation)<named-content content-type="fundref-id">10.13039/501100001659</named-content></contract-sponsor>
<counts>
<fig-count count="4"/>
<table-count count="3"/>
<equation-count count="2"/>
<ref-count count="46"/>
<page-count count="10"/>
<word-count count="7719"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1.</label>
<title>Introduction</title>
<p>Historically, neuroscientists have inferred knowledge about the brain from the population level, and commonalities between individuals were used as the foundation for our understanding of the brain (<xref ref-type="bibr" rid="ref43">van Horn et al., 2008</xref>). However, it is now known that individual variations may convey important information, and disregarding them as noise may limit our insight into the brain [see (<xref ref-type="bibr" rid="ref43">van Horn et al., 2008</xref>) for a review]. Placing the individual as the focus of research led to the emergence of the field of neural fingerprinting, i.e., the identification of individuals in a cohort using different neuroimaging modalities such as magnetic resonance imaging (<xref ref-type="bibr" rid="ref44">Wachinger et al., 2015</xref>; <xref ref-type="bibr" rid="ref40">Valizadeh et al., 2018</xref>), functional magnetic resonance imaging (<xref ref-type="bibr" rid="ref32">Miranda-Dominguez et al., 2014</xref>; <xref ref-type="bibr" rid="ref17">Finn et al., 2015</xref>; <xref ref-type="bibr" rid="ref27">Kaufmann et al., 2017</xref>; <xref ref-type="bibr" rid="ref2">Amico and Go&#x00F1;i, 2018</xref>; <xref ref-type="bibr" rid="ref5">Bari et al., 2019</xref>), functional near-infrared spectroscopy (<xref ref-type="bibr" rid="ref10">de Souza Rodrigues et al., 2019</xref>), electroencephalography (<xref ref-type="bibr" rid="ref36">Rocca et al., 2014</xref>; <xref ref-type="bibr" rid="ref19">Fraschini et al., 2015</xref>; <xref ref-type="bibr" rid="ref29">Kong et al., 2019</xref>), and magnetoencephalography (MEG) (<xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>; <xref ref-type="bibr" rid="ref38">Sareen et al., 2021</xref>).</p>
<p>The development of neuroimaging techniques has further led to the possibility of using second-order statistical summaries of brain activity, such as functional connectomes, as the basis for neural fingerprinting (<xref ref-type="bibr" rid="ref38">Sareen et al., 2021</xref>). However, functional connectomes are not necessarily required for neural fingerprinting as neural fingerprinting can be performed directly on the time series from which the connectomes are usually computed. In fact, (multivariate) time series classification ((M)TSC), where unlabeled time series are assigned to one of multiple classes, is an exciting, yet challenging, field of research (<xref ref-type="bibr" rid="ref28">Keogh and Kasetty, 2003</xref>; <xref ref-type="bibr" rid="ref46">Yang and Wu, 2006</xref>). For example, many practical applications have emerged for (M)TSC in fields such as biology, medicine, finance, or engineering (<xref ref-type="bibr" rid="ref28">Keogh and Kasetty, 2003</xref>). Despite these advances, applications have been limited due to the fact that time series classification methods are computationally expensive (<xref ref-type="bibr" rid="ref1">Abanda et al., 2019</xref>).</p>
<p>Recently, a fast approach for time series classification, known as RandOmConvolutionalKErnelTransform (ROCKET), has been introduced and requires only a fraction of the computational expense of most existing methods (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). Its new variant, called MiniRocket (MINImally RandOm Convolutional KErnel Transform), introduced by the same group, provides similar or better accuracy but is up to 75 times faster compared to ROCKET on larger datasets (<xref ref-type="bibr" rid="ref12">Dempster et al., 2021</xref>).</p>
<p>Given these capabilities, we sought to reduce the complexity of neural fingerprinting by directly applying the multivariate time series classifier MiniRocket to source time courses from MEG resting-state recordings. Data requirements for training a successful classifier were investigated. Furthermore, it has been suggested that day-to-day variations in the background noise may have a significant impact on the classification results (<xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>). Therefore, we conducted experiments to estimate the effect of background noise by incorporating empty-room recordings (i.e., noise recordings taken without a subject being measured) into the training and testing datasets.</p>
<p>Using MiniRocket, it was possible to differentiate between MEG resting-state recordings from 124 subjects with accuracies exceeding 99.5%. A set of parameters providing a good trade-off between accuracy, speed, and amount of available data was investigated. Based on our findings, the impact of background noise on the classification results for fingerprinting appears to be minimal.</p>
</sec>
<sec sec-type="methods" id="sec2">
<label>2.</label>
<title>Methods</title>
<sec id="sec3">
<label>2.1.</label>
<title>Time series classification</title>
<p>In a similar way to image classification, TSC also requires the input values to be ordered, and it is possible that important information relevant to the classification might be buried in the ordering process (<xref ref-type="bibr" rid="ref4">Bagnall et al., 2017</xref>). Moreover, in the case of a multivariate time series, discriminatory features might even depend on interactions between the individual time series, and special multivariate classifiers are needed to deal with this added complexity (<xref ref-type="bibr" rid="ref37">Ruiz et al., 2021</xref>). While it is generally possible to adapt strictly univariate classifiers to the multivariate case, for example, by using an ensemble of separate univariate classifiers for each of the multivariate dimensions, inter-dimensional dependencies are ignored, and information is inevitably lost (<xref ref-type="bibr" rid="ref37">Ruiz et al., 2021</xref>).</p>
<p>A variety of MTSC methods, which include ensembles of univariate classifiers such as Hierarchical Vote Collective of Transformation-based Ensembles (HIVE-COTE) (<xref ref-type="bibr" rid="ref3">Bagnall et al., 2020</xref>), dedicated multivariate TSC methods such as RandOm Convolutional KErnel Transformation (ROCKET) (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>), MINImally RandOm Convolutional KErnel Transform [MiniRocket, (<xref ref-type="bibr" rid="ref12">Dempster et al., 2021</xref>)] and deep-learning approaches such as InceptionTime (<xref ref-type="bibr" rid="ref26">Ismail Fawaz et al., 2020</xref>), were recently reviewed for their performance on openly available TSC datasets (<xref ref-type="bibr" rid="ref37">Ruiz et al., 2021</xref>). Due to the exceptionally fast training times and state-of-the-art classification accuracy, we elected to use MiniRocket in this paper.</p>
<sec id="sec4">
<label>2.1.1.</label>
<title>Rocket</title>
<p>The basic principle behind ROCKET is to randomly generate a large number of convolutional kernels, which are then applied to the multivariate time series to obtain transformed features. Finally, a linear classifier, such as logistic regression or ridge regression, is trained on the transformed ROCKET features (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). Since the training complexity is linear in both the length of the time series and the number of training samples, ROCKET is an attractive, scalable algorithm for large datasets (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>).</p>
<p>There are five basic parameters that characterize a random convolutional ROCKET kernel: length, <italic>l<sub>k</sub></italic> and dilation, <italic>d</italic>, the individual weights, <italic>w</italic>, a bias term, <italic>b</italic>, and the use of padding (<xref ref-type="bibr" rid="ref25">Ismail Fawaz et al., 2019</xref>; <xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). The convolution, <italic>C</italic>, of the ROCKET kernel with a univariate time series can be computed by performing a sliding dot product operation over time <italic>t</italic> across the entire time series:</p>
<disp-formula id="EQ1"><label>(1)</label><mml:math id="M1"><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>&#x2217;</mml:mo><mml:mi>w</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:munderover><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mi>l</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>d</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Since patterns in the time series congruent with the kernel will result in large values (<xref ref-type="bibr" rid="ref25">Ismail Fawaz et al., 2019</xref>; <xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>), basic patterns or shapes can thus be detected. In ROCKET, global max pooling and the proportion of positive values (ppv) pooling are applied separately to the kernel output, providing two features per kernel. By using ppv pooling, ROCKET weights the prevalence of a feature captured by the kernel output over <italic>n</italic> time samples, <italic>t</italic>.</p>
<disp-formula id="EQ2"><label>(2)</label><mml:math id="M2"><mml:mrow><mml:mi mathvariant="normal">ppv</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x003E;</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>By using different values for the dilation, it is possible to capture patterns at different scales, and it is even possible to capture frequency information with larger dilation values corresponding to smaller frequencies and vice versa (<xref ref-type="bibr" rid="ref47">Yu and Koltun, 2016</xref>).</p>
<p>ROCKET generates the kernel parameters based on several predefined rules. First, the length of a kernel is selected with uniform probability from the set {7, 9, 11}. Then, the weights are sampled from a normal distribution, <italic>w<sub>j</sub>N</italic>(0,1), and subsequently mean centered, i.e., after all weights have been determined, the mean weight is subtracted. A uniform distribution is used to sample the bias term with <italic>bU</italic>(&#x2212;1,1). The dilation is sampled from an exponential scale with <italic>d</italic>&#x2009;=&#x2009;[2<italic><sup>x</sup>
</italic>] where <italic>xU</italic>(0, <italic>A</italic>) and <italic>A</italic>&#x2009;=&#x2009;log<sub>2</sub>(<italic>l</italic><sub>input</sub><sup>&#x2212;1</sup>/<italic>l<sub>k</sub></italic><sup>&#x2212;1</sup>). Finally, a binary decision with equal probability determines whether padding is used, i.e., whether (<italic>l<sub>k</sub></italic> &#x2212; 1)/2 zeros are added to the beginning and the end of the time series (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>).</p>
<p>For multivariate time series, an additional sixth kernel parameter is provided, which determines the particular dimensions a given kernel is applied to <xref ref-type="bibr" rid="ref37">Ruiz et al. (2021)</xref>. The kernels then become matrices with independently generated weights for each dimension, and consequently, the convolution is computed as the sliding dot product between two matrices (<xref ref-type="bibr" rid="ref37">Ruiz et al., 2021</xref>).</p>
<p>The feature that makes ROCKET special, and distinguishes it from earlier methods using (random) convolutional kernels, is the huge number and variety of kernels (10,000 per default) (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). Furthermore, a key contributor to the ability of ROCKET to detect patterns at different scales and frequencies is its effective use of dilation (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). Yet, the potentially most important aspect of ROCKET&#x2019;s success is that ROCKET computes two features for each kernel: the maximum value (similar to global max pooling) and a novel feature called the proportion of positive values, which provides the classifier with information about the prevalence of a given pattern in the time series (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>). Thus, the use of effective features and the combination of a large number of kernels enable ROCKET to distinguish between a multitude of time series patterns for the purpose of classification.</p>
<p>Finally, the ROCKET features are used to train a linear classifier. Logistic regression with stochastic gradient descent was recommended for very large datasets where the number of training examples is significantly higher than the number of features while, for smaller datasets, the authors recommended the use of ridge regression with cross-validation for the regularization parameter (<xref ref-type="bibr" rid="ref11">Dempster et al., 2020</xref>).</p>
</sec>
<sec id="sec5">
<label>2.1.2.</label>
<title>MiniRocket</title>
<p>The major difference between MiniRocket and ROCKET is that it uses a fixed set of convolutional kernels instead of kernels with random hyperparameters. In brief, the kernel length, <italic>l<sub>k</sub></italic> in MiniRocket is fixed to 9 instead of {7, 9, 11}, and the kernel weights are restricted to either &#x2212;1 or 2 instead of a weight drawn from a normal distribution between 0 and 1. Moreover, MiniRocket uses fixed padding, and the maximum number of dilation per kernel is restricted to 32 (<xref ref-type="bibr" rid="ref12">Dempster et al., 2021</xref>). These features allow the method to minimize the number of hyperparameters per kernel, enabling faster computation. Moreover, MiniRocket computes the kernel weights, <italic>w</italic> and &#x2212;<italic>w</italic> and the ppv at the same time by using a trick: with the proportion of negative values being pnv&#x2009;=&#x2009;1 &#x2212; ppv, MiniRocket uses the ppv of the inverted kernel without increasing the number of convolutions, thus doubling the number of kernels applied using a single convolution. In addition, several mathematical optimizations are applied [for details, see (<xref ref-type="bibr" rid="ref12">Dempster et al., 2021</xref>)] that makes MiniRocket much faster (up to 75 times) compared to ROCKET, while maintaining the same accuracy (<xref ref-type="bibr" rid="ref12">Dempster et al., 2021</xref>).</p>
</sec>
</sec>
<sec id="sec6">
<label>2.2.</label>
<title>The data</title>
<p>MEG recordings from two different sites (United States and Germany) were used for analysis. The first dataset was obtained from the Human Connectome Project (HCP), while the second dataset was provided by the Institute of Neuroscience and Medicine at Forschungszentrum J&#x00FC;lich (FZJ), Germany. MEG data in the two datasets were recorded at various points in time. For each subject, a minimum of two resting-state measurements and at least one empty-room recording were available. The total number of MEG recordings used was 372 from 124 different subjects.</p>
<sec id="sec7">
<label>2.2.1.</label>
<title>Dataset HCP</title>
<p>The Human Connectome Project (HCP) offers open access to a dataset consisting of MEG resting-state recordings and anatomical MR scans for 89 subjects acquired at St. Louis University (<xref ref-type="bibr" rid="ref42">Van Essen et al., 2012</xref>, <xref ref-type="bibr" rid="ref41">2013</xref>; <xref ref-type="bibr" rid="ref30">Larson-Prior et al., 2013</xref>; <xref ref-type="bibr" rid="ref23">Hodge et al., 2016</xref>). From this dataset, we used recordings from 84 subjects, 44% of whom were female, and the mean age was 28.9&#x2009;&#x00B1;&#x2009;3.6&#x2009;years. Between two and three resting-state recordings with durations of approximately 6&#x2009;min were available for each subject. Furthermore, an empty-room measurement of approximately 5&#x2009;min in duration was available for each subject.</p>
<p>All MEG data were acquired using a whole-head MAGNES 3600 system (4D Neuroimaging, San Diego, CA) with 248 magnetometers and 23 reference channels at a sampling rate of 2034&#x2009;Hz. ECG and EOG were acquired along with the MEG signals. At the beginning of each MEG recording session, the subject&#x2019;s head shape, together with the positions of the localizer coils, were digitized for the alignment with the anatomical MR scans, which were recorded as T1-weighted volumes with 0.7&#x2009;mm resolution using a Skyra 3&#x2009;T scanner (Siemens Healthcare GmbH, Erlangen, Germany).</p>
</sec>
<sec id="sec8">
<label>2.2.2.</label>
<title>Dataset FZJ</title>
<p>The FZJ dataset consists of two different MEG resting-state recording sessions. The first one was acquired from 20 male subjects in 2012 and 2013, and the second set was acquired from another set of 20 subjects (55% female) in 2017 and 2018. The mean ages were 26.2+/&#x2212; 4.3 and 26.6+/&#x2212; 4.9&#x2009;years, respectively. While the recordings from 2012 and 2013 had a duration of approximately 3&#x2009;min, followed by empty room recordings of about 5&#x2009;min, the recordings from 2017 and 2018 had a duration of 6&#x2009;min, followed by empty room recordings of between 10 and 15&#x2009;min. Similar to the HCP data, a whole-head MAGNES 3600 system with 248 magnetometers and 23 reference channels was used; however, the sampling rate was 1017.25&#x2009;Hz.</p>
<p>Electrocardiography (ECG) and electrooculography (EOG) were recorded using the MAGNES 3600 system along with the MEG measurements. An external BrainAmp ExG system (Brain Products, Gilching, Germany) was used to record ECG and EOG at a sampling rate of 5,000&#x2009;Hz for the later recordings (2017 and 2018). The subjects&#x2019; head shapes were digitized prior to the MEG recording sessions for alignment with the anatomical MR scans, which were recorded using a MAGNETOM 3&#x2009;T scanner (Siemens, Munich, Germany) with MPRAGE (<xref ref-type="bibr" rid="ref33">Mugler and Brookeman, 1990</xref>).</p>
</sec>
</sec>
<sec id="sec9">
<label>2.3.</label>
<title>Data analysis</title>
<p>Python 3.10 was used for data analysis, with the main packages being MNE-Python v1.3.1 (<xref ref-type="bibr" rid="ref20">Gramfort et al., 2013</xref>, <xref ref-type="bibr" rid="ref21">2014</xref>), Scikit-learn v1.2.2 (<xref ref-type="bibr" rid="ref34">Pedregosa et al., 2011</xref>), and sktime v0.17.1 (<xref ref-type="bibr" rid="ref31">L&#x00F6;ning et al., 2019</xref>). The source spaces were constructed from the anatomical MR scans based on an octahedral mesh using FreeSurfer (<xref ref-type="bibr" rid="ref8">Dale et al., 1999</xref>; <xref ref-type="bibr" rid="ref18">Fischl et al., 1999</xref>).</p>
<sec id="sec10">
<label>2.3.1.</label>
<title>Pre-processing</title>
<p>The first step in the pre-processing pipeline was to identify MEG channels with strong artifacts. An in-house machine learning algorithm based on density-based spatial clustering of applications with noise (DBSCAN) (<xref ref-type="bibr" rid="ref16">Ester et al., 1996</xref>), which scans for artifacts both in the time and the frequency domain, was used for this purpose. Channels and time segments with strong artifacts were annotated as &#x2018;bad&#x2019; and were followed by a visual inspection of the automated procedure. Furthermore, all recordings were also visually inspected for segments containing unusually strong artifacts (e.g., muscle artifacts), which were discarded from the analysis. The signals of the annotated bad channels were subsequently replaced by virtual channels using the interpolation method as implemented in (<xref ref-type="bibr" rid="ref20">Gramfort et al., 2013</xref>, <xref ref-type="bibr" rid="ref21">2014</xref>). <xref rid="tab1" ref-type="table">Table 1</xref> summarizes the duration of the MEG recordings used for each dataset and the recording type (resting-state or empty room data).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Median recording times and its ranges for the type of recording after the removal of bad data segments.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Dataset</th>
<th align="center" valign="top">Rec. type</th>
<th align="center" valign="top"><italic>T</italic><sub>median</sub></th>
<th align="center" valign="top"><italic>T</italic><sub>max</sub></th>
<th align="center" valign="top"><italic>T</italic><sub>min</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">FZJ</td>
<td align="center" valign="top">Empty</td>
<td align="center" valign="top">460</td>
<td align="center" valign="top">911</td>
<td align="center" valign="top">271</td>
</tr>
<tr>
<td align="left" valign="top">FZJ</td>
<td align="center" valign="top">rs1</td>
<td align="center" valign="top">220</td>
<td align="center" valign="top">299</td>
<td align="center" valign="top">136</td>
</tr>
<tr>
<td align="left" valign="top">FZJ</td>
<td align="center" valign="top">rs2</td>
<td align="center" valign="top">231</td>
<td align="center" valign="top">298</td>
<td align="center" valign="top">151</td>
</tr>
<tr>
<td align="left" valign="top">HCP</td>
<td align="center" valign="top">Empty</td>
<td align="center" valign="top">275</td>
<td align="center" valign="top">300</td>
<td align="center" valign="top">171</td>
</tr>
<tr>
<td align="left" valign="top">HCP</td>
<td align="center" valign="top">rs1</td>
<td align="center" valign="top">291</td>
<td align="center" valign="top">300</td>
<td align="center" valign="top">243</td>
</tr>
<tr>
<td align="left" valign="top">HCP</td>
<td align="center" valign="top">rs2</td>
<td align="center" valign="top">293</td>
<td align="center" valign="top">300</td>
<td align="center" valign="top">232</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Times (T) in seconds.</p>
</table-wrap-foot>
</table-wrap>
<p>Next, the MEG signals were band-pass filtered from 1 to 200&#x2009;Hz. Environmental and power line noise was removed by subtraction of appropriate weighted reference signals from the band-pass filtered (0.1 to 5&#x2009;Hz) references signals as described in (<xref ref-type="bibr" rid="ref35">Robinson, 1989</xref>). Furthermore, power-line noise (50&#x2009;Hz in Germany and 60&#x2009;Hz in the United States of America) plus harmonics were isolated in the reference channels using anti-notch filters at these frequencies. The weighted signal from the reference channels was then subtracted from the signal channels to reduce power-line noise.</p>
<p>Finally, ECG and EOG artifacts were removed using independent component analysis (ICA) (<xref ref-type="bibr" rid="ref24">Hyv&#x00E4;rinen and Oja, 2000</xref>; <xref ref-type="bibr" rid="ref9">Dammers et al., 2008</xref>). Components containing significant contributions of cardiac or ocular activity were removed prior to source localization (<xref ref-type="bibr" rid="ref24">Hyv&#x00E4;rinen and Oja, 2000</xref>; <xref ref-type="bibr" rid="ref9">Dammers et al., 2008</xref>).</p>
</sec>
<sec id="sec11">
<label>2.3.2.</label>
<title>Source localization and extraction of label time courses</title>
<p>The pre-processed, continuous MEG resting-state signals were projected onto the source space using the minimum-norm estimate (MNE) method (<xref ref-type="bibr" rid="ref22">H&#x00E4;m&#x00E4;l&#x00E4;inen and Ilmoniemi, 1994</xref>). The source spaces were then divided into 68 (34 per hemisphere) anatomical regions (labels) based on the Desikan-Killiany Atlas (<xref ref-type="bibr" rid="ref15">Desikan et al., 2006</xref>). As the frontal pole region is very small in this particular atlas, the number of vertices identified was very small, and no vertices were found in this region for one subject. Therefore, this subject was excluded from the analysis. Following this step, a single representative source time course was extracted for each region as the mean time course of all vertices inside this brain region. Finally, these continuous source time courses were split into time segments of different lengths (hereafter referred to as &#x2018;trials&#x2019;).</p>
<p>The same pre-processing and source localization steps were repeated for the empty-room data, with the data being treated as if it were a subject&#x2019;s recording. The empty-room data, which contain environmental noise only, are recorded directly after the MEG recordings. To further investigate whether day-to-day environmental noise variability causes significant differences, all empty-room recordings were also projected onto the same source space of a randomly selected subject. In this way, the influence of the background noise can be minimized, allowing the classifier to use the recordings for fingerprinting decisions.</p>
</sec>
</sec>
<sec id="sec12">
<label>2.4.</label>
<title>Classification</title>
<p>sktime (version 0.17.1) was used to perform the MiniRocket transformation of the MEG trials, and scikit-learn (version 1.2.2) was used to fit a ridge regression classifier to the transformed features.</p>
<p>To evaluate the classification performance, we compute the accuracy (ACC) as the ratio of the number of correctly classified instances to the total number of instances. In relation to neural fingerprinting, we test how accurately the model detects whether two different datasets from the same subject match. In addition to the ACC, the Precision, the Recall, and the F1-Score are computed.</p>
<p>The Precision refers to the proportion of correctly predicted positive instances out of all the instances predicted as positive by the model and is defined by Precision&#x2009;=&#x2009;TP/(TP&#x2009;+&#x2009;FP), with TP and FP being the True Positive and False Positives, respectively. A high precision value indicates that the model has a low rate of false positives. Recall (a.k.a. Sensitivity) is defined by Recall&#x2009;=&#x2009;TP/(TP&#x2009;+&#x2009;FN), with FN being the False Negatives, and measures the proportion of actual positive instances that are correctly identified by the model. Higher Recall indicates that the model is better at identifying all relevant positive instances in the dataset. The F1-Score is defined by F1-Score&#x2009;=&#x2009;2 &#x002A; (Precision &#x002A; Recall)/(Precision&#x2009;+&#x2009;Recall). Thus, the F1-Score provides a balance between Precision and Recall and ranges from 0 to 1, where 1 represents perfect precision and recall, and 0 indicates poor performance. We report the macro-average F1-Score, Precision, and Recall for each class independently and then take the average across all classes to ensure that the performance of each class (the subject) is given equal importance.</p>
<p>To evaluate the overall performance of the model, we employed a leave-one-out method (LOOM) at the subject level (<xref ref-type="bibr" rid="ref39">Schl&#x00F6;gl and Supp, 2006</xref>). Specifically, each subject was left out of the training and test sets once. This results in a total of 124 mean scores (e.g., accuracy) for each of the two training and test variants, for which the overall mean and standard deviation are computed. In this way, the stability of the model performance and the influence of data from individuals can be evaluated by computing the variance of the performance metrics.</p>
<sec id="sec13">
<label>2.4.1.</label>
<title>Resting-state neural fingerprinting</title>
<p>To investigate the performance of the classifier with respect to identifying a specific subject within the cohort, time series originating from the first resting-state recording (rs1) were used for training, while time series originating from the second resting-state recording (rs2) were used for testing. This order was then reversed to determine a broader estimate of the classifier&#x2019;s performance.</p>
<p>The continuous source time course of each brain region was used for a <italic>z</italic>-scored normalization. A random but fixed subset of trials was sampled from each recording to ensure balanced datasets across subjects. To gauge the variance expected due to the random nature of the method, we repeated the procedure ten times using random selections of trials and kernel initializations. The classifier&#x2019;s dependence on several parameters was tested by means of varying the number of trials used per subject in the training set, the trial duration, and the number of ROCKET kernels used.</p>
</sec>
<sec id="sec14">
<label>2.4.2.</label>
<title>Empty-room noise</title>
<p>To assess the impact of the day-to-day variations in the background noise with respect to the classification performance, we performed a control experiment with identical settings but with no subject in the scanner. These so-called empty room recordings were performed directly after the subject recording and were labeled with the same ID as the subject. In other words, the environmental noise data is used to have a third control condition to evaluate the model. With the empty-room noise data as a third set of recordings (rs1, rs2, empty), we performed the training and the testing of the model for all possible combinations. Each experiment was repeated ten times with a random selection of trials as well as different random kernel initializations. The mean accuracy was computed for each combination.</p>
</sec>
</sec>
</sec>
<sec sec-type="results" id="sec15">
<label>3.</label>
<title>Results</title>
<sec id="sec16">
<label>3.1.</label>
<title>Resting-state neural fingerprinting and its dependency on parameters</title>
<p>The classification of two MEG datasets recorded from the same subjects on the same day revealed remarkably high accuracy scores of about 99% using MiniRocket. The impact of important parameters on the classification accuracy was tested by varying the number of kernels, the number of trials, and the trial duration. While investigating the impact of one parameter, all other parameters were fixed as follows (unless stated otherwise): the number of kernels was set to 3,500, the number of trials to 15, and its duration to 1.5&#x2009;s.</p>
<p><xref rid="fig1" ref-type="fig">Figure 1</xref> shows the dependency of the accuracy scores on the number of kernels used in MiniRocket. The figure shows a sharp increase in accuracy between 100 and 500 kernels, with scores already above 96% for 500 kernels. For the number of kernels ranging from 1,000 to 5,000, there was a relatively marginal increase in accuracy, which only ranged from about 98.9 to 99.5%. All results, including the upper and lower range, can be found in <xref rid="tab2" ref-type="table">Table 2</xref>.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Dependence of the accuracy scores on the number of MiniRocket kernels. The classifier was trained on 15 time segments with a duration of 1.5&#x2009;s per segment. The blue shaded area indicates the upper and lower range of the classification accuracy between the 10 repetitions with a random selection of time segments and a random initialization of kernels.</p>
</caption>
<graphic xlink:href="fnins-17-1229371-g001.tif"/>
</fig>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>MiniRocket accuracy scores dependent on several parameters.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" colspan="4">Dependence on number of kernels (15 training trials, 1.5&#x2009;s duration)</th>
</tr>
<tr>
<th align="left" valign="top">Number of kernels</th>
<th align="center" valign="top">Mean accuracy</th>
<th align="center" valign="top">Min accuracy</th>
<th align="center" valign="top">Max accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">100</td>
<td align="char" valign="top" char=".">81.32</td>
<td align="char" valign="top" char=".">76.16</td>
<td align="char" valign="top" char=".">85.27</td>
</tr>
<tr>
<td align="left" valign="top">200</td>
<td align="char" valign="top" char=".">83.6</td>
<td align="char" valign="top" char=".">79.78</td>
<td align="char" valign="top" char=".">87.8</td>
</tr>
<tr>
<td align="left" valign="top">300</td>
<td align="char" valign="top" char=".">93.33</td>
<td align="char" valign="top" char=".">91.96</td>
<td align="char" valign="top" char=".">94.73</td>
</tr>
<tr>
<td align="left" valign="top">400</td>
<td align="char" valign="top" char=".">96.45</td>
<td align="char" valign="top" char=".">95.51</td>
<td align="char" valign="top" char=".">97.34</td>
</tr>
<tr>
<td align="left" valign="top">500</td>
<td align="char" valign="top" char=".">97.37</td>
<td align="char" valign="top" char=".">96.26</td>
<td align="char" valign="top" char=".">98.01</td>
</tr>
<tr>
<td align="left" valign="top">750</td>
<td align="char" valign="top" char=".">98.32</td>
<td align="char" valign="top" char=".">97.74</td>
<td align="char" valign="top" char=".">98.71</td>
</tr>
<tr>
<td align="left" valign="top">1,000</td>
<td align="char" valign="top" char=".">98.87</td>
<td align="char" valign="top" char=".">98.52</td>
<td align="char" valign="top" char=".">99.06</td>
</tr>
<tr>
<td align="left" valign="top">1,250</td>
<td align="char" valign="top" char=".">99.11</td>
<td align="char" valign="top" char=".">98.79</td>
<td align="char" valign="top" char=".">99.52</td>
</tr>
<tr>
<td align="left" valign="top">1,500</td>
<td align="char" valign="top" char=".">99.3</td>
<td align="char" valign="top" char=".">98.98</td>
<td align="char" valign="top" char=".">99.6</td>
</tr>
<tr>
<td align="left" valign="top">2000</td>
<td align="char" valign="top" char=".">99.42</td>
<td align="char" valign="top" char=".">99.22</td>
<td align="char" valign="top" char=".">99.6</td>
</tr>
<tr>
<td align="left" valign="top">2,500</td>
<td align="char" valign="top" char=".">99.49</td>
<td align="char" valign="top" char=".">99.3</td>
<td align="char" valign="top" char=".">99.57</td>
</tr>
<tr>
<td align="left" valign="top">3,500</td>
<td align="char" valign="top" char=".">99.51</td>
<td align="char" valign="top" char=".">99.3</td>
<td align="char" valign="top" char=".">99.7</td>
</tr>
<tr>
<td align="left" valign="top">5,000</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.49</td>
<td align="char" valign="top" char=".">99.65</td>
</tr>
<tr>
<td align="left" valign="top">10,000</td>
<td align="char" valign="top" char=".">99.59</td>
<td align="char" valign="top" char=".">99.49</td>
<td align="char" valign="top" char=".">99.65</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" colspan="4">Dependence number of trials (3,500 kernels, trial duration 1.5)</th>
</tr>
<tr>
<th align="left" valign="top">Number of trials</th>
<th align="center" valign="top">Mean accuracy</th>
<th align="center" valign="top">Min accuracy</th>
<th align="center" valign="top">Max accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">1</td>
<td align="char" valign="top" char=".">63.63</td>
<td align="char" valign="top" char=".">56.45</td>
<td align="char" valign="top" char=".">70.16</td>
</tr>
<tr>
<td align="left" valign="top">2</td>
<td align="char" valign="top" char=".">90.18</td>
<td align="char" valign="top" char=".">86.29</td>
<td align="char" valign="top" char=".">93.15</td>
</tr>
<tr>
<td align="left" valign="top">3</td>
<td align="char" valign="top" char=".">96.36</td>
<td align="char" valign="top" char=".">94.76</td>
<td align="char" valign="top" char=".">97.58</td>
</tr>
<tr>
<td align="left" valign="top">4</td>
<td align="char" valign="top" char=".">97.97</td>
<td align="char" valign="top" char=".">97.48</td>
<td align="char" valign="top" char=".">98.29</td>
</tr>
<tr>
<td align="left" valign="top">5</td>
<td align="char" valign="top" char=".">98.63</td>
<td align="char" valign="top" char=".">98.31</td>
<td align="char" valign="top" char=".">98.87</td>
</tr>
<tr>
<td align="left" valign="top">6</td>
<td align="char" valign="top" char=".">98.97</td>
<td align="char" valign="top" char=".">98.66</td>
<td align="char" valign="top" char=".">99.26</td>
</tr>
<tr>
<td align="left" valign="top">7</td>
<td align="char" valign="top" char=".">99.11</td>
<td align="char" valign="top" char=".">98.85</td>
<td align="char" valign="top" char=".">99.37</td>
</tr>
<tr>
<td align="left" valign="top">8</td>
<td align="char" valign="top" char=".">99.22</td>
<td align="char" valign="top" char=".">98.94</td>
<td align="char" valign="top" char=".">99.5</td>
</tr>
<tr>
<td align="left" valign="top">9</td>
<td align="char" valign="top" char=".">99.31</td>
<td align="char" valign="top" char=".">99.06</td>
<td align="char" valign="top" char=".">99.55</td>
</tr>
<tr>
<td align="left" valign="top">10</td>
<td align="char" valign="top" char=".">99.35</td>
<td align="char" valign="top" char=".">99.15</td>
<td align="char" valign="top" char=".">99.52</td>
</tr>
<tr>
<td align="left" valign="top">12</td>
<td align="char" valign="top" char=".">99.45</td>
<td align="char" valign="top" char=".">99.13</td>
<td align="char" valign="top" char=".">99.56</td>
</tr>
<tr>
<td align="left" valign="top">15</td>
<td align="char" valign="top" char=".">99.51</td>
<td align="char" valign="top" char=".">99.3</td>
<td align="char" valign="top" char=".">99.7</td>
</tr>
<tr>
<td align="left" valign="top">20</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.54</td>
<td align="char" valign="top" char=".">99.6</td>
</tr>
<tr>
<td align="left" valign="top">25</td>
<td align="char" valign="top" char=".">99.58</td>
<td align="char" valign="top" char=".">99.56</td>
<td align="char" valign="top" char=".">99.61</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" colspan="4">Dependence on trail duration (3,500 kernels, 15 training trails)</th>
</tr>
<tr>
<th align="left" valign="top">Trail duration</th>
<th align="center" valign="top">Mean accuracy</th>
<th align="center" valign="top">Min accuracy</th>
<th align="center" valign="top">Max accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">0.1</td>
<td align="char" valign="top" char=".">29.56</td>
<td align="char" valign="top" char=".">26.02</td>
<td align="char" valign="top" char=".">31.77</td>
</tr>
<tr>
<td align="left" valign="top">0.2</td>
<td align="char" valign="top" char=".">71.15</td>
<td align="char" valign="top" char=".">67.66</td>
<td align="char" valign="top" char=".">73.76</td>
</tr>
<tr>
<td align="left" valign="top">0.3</td>
<td align="char" valign="top" char=".">89.64</td>
<td align="char" valign="top" char=".">88.12</td>
<td align="char" valign="top" char=".">91.64</td>
</tr>
<tr>
<td align="left" valign="top">0.4</td>
<td align="char" valign="top" char=".">95.67</td>
<td align="char" valign="top" char=".">95.22</td>
<td align="char" valign="top" char=".">96.26</td>
</tr>
<tr>
<td align="left" valign="top">0.5</td>
<td align="char" valign="top" char=".">97.88</td>
<td align="char" valign="top" char=".">97.58</td>
<td align="char" valign="top" char=".">98.28</td>
</tr>
<tr>
<td align="left" valign="top">0.6</td>
<td align="char" valign="top" char=".">98.58</td>
<td align="char" valign="top" char=".">98.2</td>
<td align="char" valign="top" char=".">99.03</td>
</tr>
<tr>
<td align="left" valign="top">0.7</td>
<td align="char" valign="top" char=".">99.01</td>
<td align="char" valign="top" char=".">98.79</td>
<td align="char" valign="top" char=".">99.33</td>
</tr>
<tr>
<td align="left" valign="top">0.8</td>
<td align="char" valign="top" char=".">99.2</td>
<td align="char" valign="top" char=".">98.87</td>
<td align="char" valign="top" char=".">99.46</td>
</tr>
<tr>
<td align="left" valign="top">0.9</td>
<td align="char" valign="top" char=".">99.28</td>
<td align="char" valign="top" char=".">99.11</td>
<td align="char" valign="top" char=".">99.52</td>
</tr>
<tr>
<td align="left" valign="top">1.0</td>
<td align="char" valign="top" char=".">99.37</td>
<td align="char" valign="top" char=".">99.09</td>
<td align="char" valign="top" char=".">99.6</td>
</tr>
<tr>
<td align="left" valign="top">1.25</td>
<td align="char" valign="top" char=".">99.5</td>
<td align="char" valign="top" char=".">99.33</td>
<td align="char" valign="top" char=".">99.57</td>
</tr>
<tr>
<td align="left" valign="top">1.5</td>
<td align="char" valign="top" char=".">99.51</td>
<td align="char" valign="top" char=".">99.3</td>
<td align="char" valign="top" char=".">99.7</td>
</tr>
<tr>
<td align="left" valign="top">2.0</td>
<td align="char" valign="top" char=".">99.55</td>
<td align="char" valign="top" char=".">99.33</td>
<td align="char" valign="top" char=".">99.65</td>
</tr>
<tr>
<td align="left" valign="top">2.5</td>
<td align="char" valign="top" char=".">99.58</td>
<td align="char" valign="top" char=".">99.49</td>
<td align="char" valign="top" char=".">99.62</td>
</tr>
<tr>
<td align="left" valign="top">3.0</td>
<td align="char" valign="top" char=".">99.6</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.62</td>
</tr>
<tr>
<td align="left" valign="top">3.5</td>
<td align="char" valign="top" char=".">99.6</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.68</td>
</tr>
<tr>
<td align="left" valign="top">4.0</td>
<td align="char" valign="top" char=".">99.6</td>
<td align="char" valign="top" char=".">99.54</td>
<td align="char" valign="top" char=".">99.68</td>
</tr>
<tr>
<td align="left" valign="top">4.5</td>
<td align="char" valign="top" char=".">99.61</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.7</td>
</tr>
<tr>
<td align="left" valign="top">5.0</td>
<td align="char" valign="top" char=".">99.6</td>
<td align="char" valign="top" char=".">99.57</td>
<td align="char" valign="top" char=".">99.68</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To estimate the impact of the number of time segments used on the classification result, the number of trials was gradually increased until no further change in accuracy was observed. <xref rid="fig2" ref-type="fig">Figure 2</xref> shows the dependence of accuracy scores on the number of training trials. The figure shows that when five or more trials are used, classification accuracies of 98% and above can be achieved. Only a marginal increase in accuracy, ranging from about 99.3 to 99.6%, was achieved from 10 to 30 trials (<xref rid="tab2" ref-type="table">Table 2</xref>).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Dependence of the accuracy scores on the number of training segments. The number of kernels for the MiniRocket classifier was set to 3,500. The duration of the time segments was set to 1.5&#x2009;s. The blue shaded area indicates the upper and lower range of the classification accuracy between the 10 repetitions with a random selection of time segments and a random initialization of kernels.</p>
</caption>
<graphic xlink:href="fnins-17-1229371-g002.tif"/>
</fig>
<p>The dependency of the accuracy scores on the trial duration is shown in <xref rid="fig3" ref-type="fig">Figure 3</xref>. For segment durations ranging from 0.1&#x2009;s to 0.5&#x2009;s, there is a sharp increase in accuracy, while for durations of 1&#x2009;s in length, scores above 99.4% could already be achieved. Only a marginal increase in accuracy, from 99.5 to 99.6%, was achieved for durations ranging from 2.0&#x2009;s to 5.0&#x2009;s. A summary of all results and combinations is shown in <xref rid="tab2" ref-type="table">Table 2</xref>.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Dependence of the accuracy scores on the duration of the time segments. The number of kernels for the MiniRocket classifier was set to 3,500 and trained on 15 time segments. The duration of segments varied from 0.1 to 5&#x2009;s. The blue shaded area indicates the upper and lower range of the classification accuracy between the 10 repetitions with a random selection of time segments and a random initialization of kernels.</p>
</caption>
<graphic xlink:href="fnins-17-1229371-g003.tif"/>
</fig>
<p>The MiniRocket classification accuracy scores obtained through the LOOM method for neural fingerprinting based on resting-state data are as follows: The average accuracy score after cross-validation for training on rs1 and testing on rs2 was 99.15%&#x2009;&#x00B1;&#x2009;0.078%. Similarly, the mean Recall and Precision were found to be 99.15%&#x2009;&#x00B1;&#x2009;0.078 and 98.72%&#x2009;&#x00B1;&#x2009;0.425%, respectively, and the F1-Score was 98.80%&#x2009;&#x00B1;&#x2009;0.124% (<xref rid="tab3" ref-type="table">Table 3</xref>). For training on rs2 and testing on rs1, the average accuracy score was found to be slightly larger with 99.96%&#x2009;&#x00B1;&#x2009;0.036%, as compared to the accuracy of 99.15% for training on rs1 and testing on rs2. This tendency was also observed in the other three metrics (cf. <xref rid="tab3" ref-type="table">Table 3</xref>). Since the probability of obtaining a match for a single subject out of 124 subjects is 1/124, which is about 0.0081, the chance level in our experiment is approximately 0.81%. The difference in accuracy between the two classification tests were found to be significant, but with a change in score around the chance level (0.80&#x2013;1.24%).</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Loom-based performance scores for two classification tests with the number of kernels set to 3,500, the number of trials to 15, and its duration to 1.5&#x2009;s.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">rs1&#x2009;&#x2212;&#x2009;rs2</th>
<th align="center" valign="top">rs2&#x2009;&#x2212;&#x2009;rs1</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Accuracy</td>
<td align="char" valign="top" char="&#x00B1;">99.15 &#x00B1; 0.0779</td>
<td align="char" valign="top" char="&#x00B1;">99.96 &#x00B1; 0.0364</td>
</tr>
<tr>
<td align="left" valign="top">Precision</td>
<td align="char" valign="top" char="&#x00B1;">98.72 &#x00B1; 0.4250</td>
<td align="char" valign="top" char="&#x00B1;">99.96 &#x00B1; 0.0338</td>
</tr>
<tr>
<td align="left" valign="top">Recall</td>
<td align="char" valign="top" char="&#x00B1;">99.15 &#x00B1; 0.0779</td>
<td align="char" valign="top" char="&#x00B1;">99.96 &#x00B1; 0.0364</td>
</tr>
<tr>
<td align="left" valign="top">F1-Score</td>
<td align="char" valign="top" char="&#x00B1;">98.80 &#x00B1; 0.1238</td>
<td align="char" valign="top" char="&#x00B1;">99.96 &#x00B1; 0.0364</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec17">
<label>3.2.</label>
<title>Influence of empty-room noise</title>
<p>Ten random trials were sampled per subject and per set with a trial duration of 1.5&#x2009;s and 3,500 kernels. Whenever data originated from the same recordings, the continuous signal for each subject was split into two parts, and the trials for training and testing were sampled from the first and second half of the recording, respectively.</p>
<p><xref rid="fig4" ref-type="fig">Figure 4</xref> shows the dependence of the MiniRocket classifier accuracy scores on different combinations of training and test sets. The results show that accuracies above 99.3% were achieved for all combinations of training and testing on resting-state data (rs1 vs. rs2 and rs2 vs. rs1). For resting-state recordings evaluated against the empty room recordings from the same day, the accuracies were close to the chance level, as depicted in <xref rid="fig4" ref-type="fig">Figure 4</xref>. In the case of empty vs. empty room recordings, the classifier achieved a low accuracy of 7.9%.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Dependence of the accuracy scores of the MiniRocket classifier on different combinations of training and test sets. Since rest1, rest2, and empty were all recorded on the same day, it is possible to isolate the contribution of the daily background noise to the classification outcomes. The number of kernels for the MiniRocket classifier was set to 3,500. For both the training and test set, 10 time segments of 1.5&#x2009;s were sampled from each of the subjects.</p>
</caption>
<graphic xlink:href="fnins-17-1229371-g004.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="discussions" id="sec18">
<label>4.</label>
<title>Discussion</title>
<p>There are many promising applications of multivariate time series classifications (MTSC) in medicine and neuroscience, including in the diagnosis of medical conditions, personalized treatment planning, and the development of brain-computer interfaces (BCIs). With this study, we have shown that it is possible to perform neural fingerprinting directly on MEG time series without performing feature engineering. This is, to the best of our knowledge, the first time that neural fingerprinting has been achieved based on magnetic field changes in single trials of MEG time series recordings without the need for a feature-based analysis. Furthermore, the MiniRocket approach used in the study required fewer data (shorter trials) for successful classification and also improved accuracy. For example, previous MEG publications reached MEG resting-state classification accuracies with trial lengths of 30&#x2009;s in healthy controls of about 94.9&#x2013;96.2% (<xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>), and 94.5&#x2013;98.2% at trial lengths of 8&#x2009;s (<xref ref-type="bibr" rid="ref38">Sareen et al., 2021</xref>). In contrast, MiniRocket analysis with 3,500 kernels achieved a classification accuracy of over 99% when training the model on as little as 15&#x2009;s of data and testing it on 1&#x2009;s time segments. These results demonstrate that substantially fewer data are needed for accurate classification in comparison with previous approaches that use MEG data in combination with connectivity measures (<xref ref-type="bibr" rid="ref14">Demuru et al., 2017</xref>; <xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>; <xref ref-type="bibr" rid="ref38">Sareen et al., 2021</xref>) or data from electroencephalography (EEG) using EEG power spectra (<xref ref-type="bibr" rid="ref29">Kong et al., 2019</xref>; <xref ref-type="bibr" rid="ref13">Demuru and Fraschini, 2020</xref>).</p>
<p>In our parameter investigation, we aimed to explore the minimum input data requirements while maintaining computational efficiency. Our tests on trial duration suggested that a minimum of 0.9&#x2009;s and 15 trials were sufficient to achieve accuracies above 99%. In terms of the number of trials, we found that training a MiniRocket classifier with 3,500 kernels requires at least nine trials of 1.5&#x2009;s length to achieve accuracies above 99%. During our exploration of the number of kernels, we observed that increasing the number of kernels led to improved results in the low data regime, at the expense of computational demand. We were surprised to find that accuracies saturated at a relatively low number of 3,500 kernels using a fixed set of 15 trials of 1.5&#x2009;s duration, resulting in accuracies above 99.3% (<xref rid="tab2" ref-type="table">Table 2</xref>).</p>
<p>Interestingly, we observed a small but significant difference in all metrics when we reversed the order of training and evaluation set using the LOOM method. Specifically, the accuracies were 99.96% when training on rs1 and testing on rs2, whereas they fell to 99.15% when the order was reversed (<xref rid="tab3" ref-type="table">Table 3</xref>). This difference in accuracy of 0.81% is about chance level and may be due to a single subject only. In principle, we did not expect the accuracies to be identical as the two measurements will not be identical in practice. The subject&#x2019;s condition, such as mood and fatigue, is very likely to have an influence on the matching performance. Moreover, another source contributing to this difference may be due to a slight reduction in data quality over long recording sessions, possibly caused by increased subject movement due to fatigue or the execution of tasks before the second resting-state session. These findings raise the possibility that prioritizing training on datasets with higher complexity and diversity could be more crucial than employing the most complex data exclusively at the time of testing. However, in future work, it would be very interesting to investigate the model performance in a cohort of subjects where the temporal distance between rs1 and rs2 is increased by means of hours, days, weeks, and months.</p>
<p>In summary, these results are a proof of concept that subject differentiation can, in principle, be achieved directly from MEG brain recordings as short as 1&#x2009;s to achieve high accuracies of about 99% using MiniRocket. This would greatly simplify current procedures as the technique does not require the selection of the best-performing feature for the classification model &#x2013; as is the case when using functional connectomes (<xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>; <xref ref-type="bibr" rid="ref38">Sareen et al., 2021</xref>), for which the best-performing method needs to be determined. The high classification accuracy and the need for only relatively short segments of single trials data make MiniRocket a promising candidate for BCI research and motivate further research into the application of MiniRocket to MEG recordings.</p>
<sec id="sec19">
<label>4.1.</label>
<title>Limitations</title>
<p>It has been suggested that day-to-day variations in the background noise during the recording may contribute significantly to the classification (<xref ref-type="bibr" rid="ref7">da Silva Castanheira et al., 2021</xref>). We investigated this possibility by training the classifier on the subject&#x2019;s recording and testing on corresponding empty-room data, which were recorded soon after the experiment. While our study shows that training the classifier on empty-room data and applying it to the subject&#x2019;s resting-state data or vice versa did not result in the correct identification of individuals, and accuracies achieved on the cross-over of resting-state measurements and empty-room measurements were approximately at chance level, our findings suggest that the background noise may have a minor influence on the fingerprinting classification results. Notably, our analysis shows that matching empty room signals could be identified with an accuracy of approximately 8%.</p>
<p>To further investigate the classification performance and limitations on neural fingerprinting, we plan to implement a longitudinal study design to investigate the stability and performance of the classifier over time. Moreover, given that the subject is the class to be identified in this approach, we cannot split the data into training and test sets by subjects for the typical generalization purposes, which is a limitation of the method and is similar to a fingerprint analysis in criminal investigations, where a match can only be found if the suspect&#x2019;s fingerprints are already in the database.</p>
</sec>
</sec>
<sec sec-type="data-availability" id="sec20">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="sec21" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the ethical committee of the RWTH Aachen University, Aachen, under the code EK 249/22. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="sec22">
<title>Author contributions</title>
<p>NK and CK contributed equally to the conception and design of the study and to the data analysis. CK wrote the original draft. NS, IN, and JD supervised the study and acquired funding. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec sec-type="funding-information" id="sec23">
<title>Funding</title>
<p>Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) &#x2013; 491111487, by the HBP SGA3 &#x2013; Human Brain Project Specific Grant Agreement 3 (2020-04-01&#x2013;2023-03-31), Helmholtz Metadata Collaboration (HMC), by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) &#x2013; 368482240/GRK2416, and by the Joint Lab &#x201C;Supercomputing and Modeling for the Human Brain.</p>
</sec>
<sec sec-type="COI-statement" id="sec24">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<ack>
<p>The authors would like to thank Claire Rick for proofreading the manuscript. This study is part of the doctoral thesis (Dr. rer. medic.) of Nikolas Kampel at the Medical Faculty of the RWTH Aachen University, Germany.</p>
</ack>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abanda</surname> <given-names>A.</given-names></name> <name><surname>Mori</surname> <given-names>U.</given-names></name> <name><surname>Lozano</surname> <given-names>J. A.</given-names></name></person-group> (<year>2019</year>). <article-title>A review on distance based time series classification</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>33</volume>, <fpage>378</fpage>&#x2013;<lpage>412</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-018-0596-4</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amico</surname> <given-names>E.</given-names></name> <name><surname>Go&#x00F1;i</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>The quest for identifiability in human functional connectomes</article-title>. <source>Sci. Rep.</source> <volume>8</volume>:<fpage>8254</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-018-25089-1</pub-id>, PMID: <pub-id pub-id-type="pmid">29844466</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Bagnall</surname> <given-names>A.</given-names></name> <name><surname>Flynn</surname> <given-names>M.</given-names></name> <name><surname>Large</surname> <given-names>J.</given-names></name> <name><surname>Lines</surname> <given-names>J.</given-names></name> <name><surname>Middlehurst</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>On the usage and performance of the hierarchical vote collective of transformation-based ensembles version 1.0 (HIVE-COTE v1.0)</article-title>. <conf-name>Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) 12588 LNAI</conf-name>, <fpage>3</fpage>&#x2013;<lpage>18</lpage></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bagnall</surname> <given-names>A.</given-names></name> <name><surname>Lines</surname> <given-names>J.</given-names></name> <name><surname>Bostrom</surname> <given-names>A.</given-names></name> <name><surname>Large</surname> <given-names>J.</given-names></name> <name><surname>Keogh</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>The great time series classification bake off: a review and experimental evaluation of recent algorithmic advances</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>31</volume>, <fpage>606</fpage>&#x2013;<lpage>660</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-016-0483-9</pub-id>, PMID: <pub-id pub-id-type="pmid">30930678</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bari</surname> <given-names>S.</given-names></name> <name><surname>Amico</surname> <given-names>E.</given-names></name> <name><surname>Vike</surname> <given-names>N.</given-names></name> <name><surname>Talavage</surname> <given-names>T. M.</given-names></name> <name><surname>Go&#x00F1;i</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Uncovering multi-site identifiability based on resting-state functional connectomes</article-title>. <source>NeuroImage</source> <volume>202</volume>:<fpage>115967</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.06.045</pub-id>, PMID: <pub-id pub-id-type="pmid">31352124</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>da Silva Castanheira</surname> <given-names>J.</given-names></name> <name><surname>Orozco Perez</surname> <given-names>H. D.</given-names></name> <name><surname>Misic</surname> <given-names>B.</given-names></name> <name><surname>Baillet</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Brief segments of neurophysiological activity enable individual differentiation</article-title>. <source>Nat. Commun.</source> <volume>12</volume>:<fpage>5713</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-021-25895-8</pub-id>, PMID: <pub-id pub-id-type="pmid">34588439</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dale</surname> <given-names>A. M.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Sereno</surname> <given-names>M. I.</given-names></name></person-group> (<year>1999</year>). <article-title>Cortical surface-based analysis: I Segmentation and surface reconstruction</article-title>. <source>NeuroImage</source> <volume>9</volume>, <fpage>179</fpage>&#x2013;<lpage>194</lpage>. doi: <pub-id pub-id-type="doi">10.1006/nimg.1998.0395</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dammers</surname> <given-names>J.</given-names></name> <name><surname>Schiek</surname> <given-names>M.</given-names></name> <name><surname>Boers</surname> <given-names>F.</given-names></name> <name><surname>Silex</surname> <given-names>C.</given-names></name> <name><surname>Zvyagintsev</surname> <given-names>M.</given-names></name> <name><surname>Pietrzyk</surname> <given-names>U.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Integration of amplitude and phase statistics for complete artifact removal in independent components of neuromagnetic recordings</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>55</volume>, <fpage>2353</fpage>&#x2013;<lpage>2362</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2008.926677</pub-id>, PMID: <pub-id pub-id-type="pmid">18838360</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>de Souza Rodrigues</surname> <given-names>J.</given-names></name> <name><surname>Ribeiro</surname> <given-names>F. L.</given-names></name> <name><surname>Sato</surname> <given-names>J. R.</given-names></name> <name><surname>Mesquita</surname> <given-names>R. C.</given-names></name> <name><surname>J&#x00FA;nior</surname> <given-names>C. E. B.</given-names></name></person-group> (<year>2019</year>). <article-title>Identifying individuals using fNIRS-based cortical connectomes</article-title>. <source>Biomed. Opt. Express</source> <volume>10</volume>, <fpage>2889</fpage>&#x2013;<lpage>2897</lpage>. doi: <pub-id pub-id-type="doi">10.1364/boe.10.002889</pub-id>, PMID: <pub-id pub-id-type="pmid">31259059</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dempster</surname> <given-names>A.</given-names></name> <name><surname>Petitjean</surname> <given-names>F.</given-names></name> <name><surname>Webb</surname> <given-names>G. I.</given-names></name></person-group> (<year>2020</year>). <article-title>ROCKET: exceptionally fast and accurate time series classification using random convolutional kernels</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>34</volume>, <fpage>1454</fpage>&#x2013;<lpage>1495</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-020-00701-z</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Dempster</surname> <given-names>A.</given-names></name> <name><surname>Schmidt</surname> <given-names>D. F.</given-names></name> <name><surname>Webb</surname> <given-names>G. I.</given-names></name></person-group> (<year>2021</year>). <article-title>MiniRocket: a very fast (almost) deterministic transform for time series classification</article-title>. <conf-name>Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (ACM)</conf-name>, <fpage>248</fpage>&#x2013;<lpage>257</lpage></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Demuru</surname> <given-names>M.</given-names></name> <name><surname>Fraschini</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>EEG fingerprinting: subject-specific signature based on the aperiodic component of power spectrum</article-title>. <source>Comput. Biol. Med.</source> <volume>120</volume>:<fpage>103748</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103748</pub-id>, PMID: <pub-id pub-id-type="pmid">32421651</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Demuru</surname> <given-names>M.</given-names></name> <name><surname>Gouw</surname> <given-names>A. A.</given-names></name> <name><surname>Hillebrand</surname> <given-names>A.</given-names></name> <name><surname>Stam</surname> <given-names>C. J.</given-names></name> <name><surname>van Dijk</surname> <given-names>B. W.</given-names></name> <name><surname>Scheltens</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Functional and effective whole brain connectivity using magnetoencephalography to identify monozygotic twin pairs</article-title>. <source>Sci. Rep.</source> <volume>7</volume>:<fpage>9685</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-017-10235-y</pub-id>, PMID: <pub-id pub-id-type="pmid">28852152</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desikan</surname> <given-names>R. S.</given-names></name> <name><surname>S&#x00E9;gonne</surname> <given-names>F.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Quinn</surname> <given-names>B. T.</given-names></name> <name><surname>Dickerson</surname> <given-names>B. C.</given-names></name> <name><surname>Blacker</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2006</year>). <article-title>An automated labeling system for subdividing the human cerebral cortex on MRI scans into gyral based regions of interest</article-title>. <source>NeuroImage</source> <volume>31</volume>, <fpage>968</fpage>&#x2013;<lpage>980</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2006.01.021</pub-id>, PMID: <pub-id pub-id-type="pmid">16530430</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ester</surname> <given-names>M.</given-names></name> <name><surname>Kriegel</surname> <given-names>H.-P.</given-names></name> <name><surname>Sander</surname> <given-names>J.</given-names></name> <name><surname>Xu</surname> <given-names>X.</given-names></name></person-group> (<year>1996</year>). <article-title>A density-based algorithm for discovering clusters in large spatial databases with noise</article-title>. <source>KDD</source> <volume>96</volume>, <fpage>226</fpage>&#x2013;<lpage>231</lpage>. <ext-link xlink:href="https://dl.acm.org/doi/proceedings/10.5555/3001460" ext-link-type="uri">https://dl.acm.org/doi/proceedings/10.5555/3001460</ext-link></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Finn</surname> <given-names>E. S.</given-names></name> <name><surname>Shen</surname> <given-names>X.</given-names></name> <name><surname>Scheinost</surname> <given-names>D.</given-names></name> <name><surname>Rosenberg</surname> <given-names>M. D.</given-names></name> <name><surname>Huang</surname> <given-names>J.</given-names></name> <name><surname>Chun</surname> <given-names>M. M.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Functional connectome fingerprinting: identifying individuals using patterns of brain connectivity</article-title>. <source>Nat. Neurosci.</source> <volume>18</volume>, <fpage>1664</fpage>&#x2013;<lpage>1671</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.4135</pub-id>, PMID: <pub-id pub-id-type="pmid">26457551</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Sereno</surname> <given-names>M. I.</given-names></name> <name><surname>Tootell</surname> <given-names>R. B. H.</given-names></name> <name><surname>Dale</surname> <given-names>A. M.</given-names></name></person-group> (<year>1999</year>). <article-title>High-resolution inter subject averaging and a coordinate system for the cortical surface</article-title>. <source>Hum. Brain Mapp.</source> <volume>8</volume>, <fpage>272</fpage>&#x2013;<lpage>284</lpage>. doi: <pub-id pub-id-type="doi">10.1002/(SICI)1097-0193(1999)8:4&#x003C;272::AID-HBM10&#x003E;3.0.CO;2-4</pub-id>, PMID: <pub-id pub-id-type="pmid">10619420</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fraschini</surname> <given-names>M.</given-names></name> <name><surname>Hillebrand</surname> <given-names>A.</given-names></name> <name><surname>Demuru</surname> <given-names>M.</given-names></name> <name><surname>Didaci</surname> <given-names>L.</given-names></name> <name><surname>Marcialis</surname> <given-names>G. L.</given-names></name></person-group> (<year>2015</year>). <article-title>An EEG-based biometric system using eigenvector centrality in resting state brain networks</article-title>. <source>IEEE Signal Process Lett.</source> <volume>22</volume>, <fpage>666</fpage>&#x2013;<lpage>670</lpage>. doi: <pub-id pub-id-type="doi">10.1109/LSP.2014.2367091</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gramfort</surname> <given-names>A.</given-names></name> <name><surname>Luessi</surname> <given-names>M.</given-names></name> <name><surname>Larson</surname> <given-names>E.</given-names></name> <name><surname>Engemann</surname> <given-names>D. A.</given-names></name> <name><surname>Strohmeier</surname> <given-names>D.</given-names></name> <name><surname>Brodbeck</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>MEG and EEG data analysis with MNE-python</article-title>. <source>Front. Neurosci.</source> <volume>7</volume>:<fpage>267</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2013.00267</pub-id>, PMID: <pub-id pub-id-type="pmid">24431986</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gramfort</surname> <given-names>A.</given-names></name> <name><surname>Luessi</surname> <given-names>M.</given-names></name> <name><surname>Larson</surname> <given-names>E.</given-names></name> <name><surname>Engemann</surname> <given-names>D. A.</given-names></name> <name><surname>Strohmeier</surname> <given-names>D.</given-names></name> <name><surname>Brodbeck</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>MNE software for processing MEG and EEG data</article-title>. <source>NeuroImage</source> <volume>86</volume>, <fpage>446</fpage>&#x2013;<lpage>460</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.10.027</pub-id>, PMID: <pub-id pub-id-type="pmid">24161808</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>H&#x00E4;m&#x00E4;l&#x00E4;inen</surname> <given-names>M. S.</given-names></name> <name><surname>Ilmoniemi</surname> <given-names>R. J.</given-names></name></person-group> (<year>1994</year>). <article-title>Interpreting magnetic fields of the brain: minimum norm estimates</article-title>. <source>Med. Biol. Eng. Comput.</source> <volume>32</volume>, <fpage>35</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1007/BF02512476</pub-id>, PMID: <pub-id pub-id-type="pmid">8182960</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hodge</surname> <given-names>M. R.</given-names></name> <name><surname>Horton</surname> <given-names>W.</given-names></name> <name><surname>Brown</surname> <given-names>T.</given-names></name> <name><surname>Herrick</surname> <given-names>R.</given-names></name> <name><surname>Olsen</surname> <given-names>T.</given-names></name> <name><surname>Hileman</surname> <given-names>M. E.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>ConnectomeDB-sharing human brain connectivity data</article-title>. <source>NeuroImage</source> <volume>124</volume>, <fpage>1102</fpage>&#x2013;<lpage>1107</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.04.046</pub-id>, PMID: <pub-id pub-id-type="pmid">25934470</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hyv&#x00E4;rinen</surname> <given-names>A.</given-names></name> <name><surname>Oja</surname> <given-names>E.</given-names></name></person-group> (<year>2000</year>). <article-title>Independent component analysis: algorithms and applications</article-title>. <source>Neural Networks Off. J. Int. Neur. Netw. Soc.</source> <volume>13</volume>, <fpage>411</fpage>&#x2013;<lpage>430</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0893-6080(00)00026-5</pub-id>, PMID: <pub-id pub-id-type="pmid">10946390</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ismail Fawaz</surname> <given-names>H.</given-names></name> <name><surname>Forestier</surname> <given-names>G.</given-names></name> <name><surname>Weber</surname> <given-names>J.</given-names></name> <name><surname>Idoumghar</surname> <given-names>L.</given-names></name> <name><surname>Muller</surname> <given-names>P.-A.</given-names></name></person-group> (<year>2019</year>). <article-title>Deep learning for time series classification: a review</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>33</volume>, <fpage>917</fpage>&#x2013;<lpage>963</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-019-00619-1</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ismail Fawaz</surname> <given-names>H.</given-names></name> <name><surname>Lucas</surname> <given-names>B.</given-names></name> <name><surname>Forestier</surname> <given-names>G.</given-names></name> <name><surname>Pelletier</surname> <given-names>C.</given-names></name> <name><surname>Schmidt</surname> <given-names>D. F.</given-names></name> <name><surname>Weber</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>InceptionTime: finding AlexNet for time series classification</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>34</volume>, <fpage>1936</fpage>&#x2013;<lpage>1962</lpage>. doi: <pub-id pub-id-type="doi">10.1007/S10618-020-00710-Y</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kaufmann</surname> <given-names>T.</given-names></name> <name><surname>Aln&#x00E6;s</surname> <given-names>D.</given-names></name> <name><surname>Doan</surname> <given-names>N. T.</given-names></name> <name><surname>Brandt</surname> <given-names>C. L.</given-names></name> <name><surname>Andreassen</surname> <given-names>O. A.</given-names></name> <name><surname>Westlye</surname> <given-names>L. T.</given-names></name></person-group> (<year>2017</year>). <article-title>Delayed stabilization and individualization in connectome development are related to psychiatric disorders</article-title>. <source>Nat. Neurosci.</source> <volume>20</volume>, <fpage>513</fpage>&#x2013;<lpage>515</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.4511</pub-id>, PMID: <pub-id pub-id-type="pmid">28218917</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Keogh</surname> <given-names>E.</given-names></name> <name><surname>Kasetty</surname> <given-names>S.</given-names></name></person-group> (<year>2003</year>). <article-title>On the need for time series data Mining benchmarks: a survey and empirical demonstration</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>7</volume>, <fpage>349</fpage>&#x2013;<lpage>371</lpage>. doi: <pub-id pub-id-type="doi">10.1023/A:1024988512476</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kong</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Xu</surname> <given-names>S.</given-names></name> <name><surname>Babiloni</surname> <given-names>F.</given-names></name> <name><surname>Chen</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>EEG fingerprints: phase synchronization of EEG signals as biomarker for subject identification</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>121165</fpage>&#x2013;<lpage>121173</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2931624</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Larson-Prior</surname> <given-names>L. J.</given-names></name> <name><surname>Oostenveld</surname> <given-names>R.</given-names></name> <name><surname>Della Penna</surname> <given-names>S.</given-names></name> <name><surname>Michalareas</surname> <given-names>G.</given-names></name> <name><surname>Prior</surname> <given-names>F.</given-names></name> <name><surname>Babajani-Feremi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Adding dynamics to the human connectome project with MEG</article-title>. <source>NeuroImage</source> <volume>80</volume>, <fpage>190</fpage>&#x2013;<lpage>201</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.05.056</pub-id>, PMID: <pub-id pub-id-type="pmid">23702419</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>L&#x00F6;ning</surname> <given-names>M.</given-names></name> <name><surname>Bagnall</surname> <given-names>A.</given-names></name> <name><surname>Ganesh</surname> <given-names>S.</given-names></name> <name><surname>Kazakov</surname> <given-names>V.</given-names></name> <name><surname>Lines</surname> <given-names>J.</given-names></name> <name><surname>Kir&#x00E1;ly</surname> <given-names>F. J.</given-names></name></person-group> (<year>2019</year>). <article-title>Sktime: a unified Interface for machine learning with time series</article-title>. <conf-name>33rd conference on neural information processing systems</conf-name>, <publisher-loc>Vancouver, Canada</publisher-loc>.</citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miranda-Dominguez</surname> <given-names>O.</given-names></name> <name><surname>Mills</surname> <given-names>B. D.</given-names></name> <name><surname>Carpenter</surname> <given-names>S. D.</given-names></name> <name><surname>Grant</surname> <given-names>K. A.</given-names></name> <name><surname>Kroenke</surname> <given-names>C. D.</given-names></name> <name><surname>Nigg</surname> <given-names>J. T.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Connectotyping: model based fingerprinting of the functional connectome</article-title>. <source>PLoS One</source> <volume>9</volume>:<fpage>e111048</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0111048</pub-id>, PMID: <pub-id pub-id-type="pmid">25386919</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mugler</surname> <given-names>J. P.</given-names></name> <name><surname>Brookeman</surname> <given-names>J. R.</given-names></name></person-group> (<year>1990</year>). <article-title>Three-dimensional magnetization-prepared rapid gradient-echo imaging (3D MP RAGE)</article-title>. <source>Magn. Reson. Med.</source> <volume>15</volume>, <fpage>152</fpage>&#x2013;<lpage>157</lpage>. doi: <pub-id pub-id-type="doi">10.1002/mrm.1910150117</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pedregosa</surname> <given-names>F.</given-names></name> <name><surname>Varoquaux</surname> <given-names>G.</given-names></name> <name><surname>Gramfort</surname> <given-names>A.</given-names></name> <name><surname>Michel</surname> <given-names>V.</given-names></name> <name><surname>Thirion</surname> <given-names>B.</given-names></name> <name><surname>Grisel</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Scikit-learn: machine learning in python</article-title>. <source>J. Mach. Learn. Res.</source> <volume>12</volume>, <fpage>2825</fpage>&#x2013;<lpage>2830</lpage>.</citation></ref>
<ref id="ref35"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Robinson</surname> <given-names>S. E.</given-names></name></person-group> (<year>1989</year>). <article-title>Environmental noise cancellation for biomagnetic measurements</article-title>, <source>Advances in Biomagnetism</source>, (Ed.) <person-group person-group-type="editor"><name><surname>Williamson</surname> <given-names>S.</given-names></name> <etal/></person-group>. <publisher-loc>Boston, MA</publisher-loc>: <publisher-name>Springer US</publisher-name>, <fpage>721</fpage>&#x2013;<lpage>724</lpage></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rocca</surname> <given-names>D. L.</given-names></name> <name><surname>Campisi</surname> <given-names>P.</given-names></name> <name><surname>Vegso</surname> <given-names>B.</given-names></name> <name><surname>Cserti</surname> <given-names>P.</given-names></name> <name><surname>Kozmann</surname> <given-names>G.</given-names></name> <name><surname>Babiloni</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Human brain distinctiveness based on EEG spectral coherence connectivity</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>61</volume>, <fpage>2406</fpage>&#x2013;<lpage>2412</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2014.2317881</pub-id>, PMID: <pub-id pub-id-type="pmid">24759981</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ruiz</surname> <given-names>A. P.</given-names></name> <name><surname>Flynn</surname> <given-names>M.</given-names></name> <name><surname>Large</surname> <given-names>J.</given-names></name> <name><surname>Middlehurst</surname> <given-names>M.</given-names></name> <name><surname>Bagnall</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>The great multivariate time series classification bake off: a review and experimental evaluation of recent algorithmic advances</article-title>. <source>Data Min. Knowl. Disc.</source> <volume>35</volume>, <fpage>401</fpage>&#x2013;<lpage>449</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-020-00727-3</pub-id>, PMID: <pub-id pub-id-type="pmid">33679210</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sareen</surname> <given-names>E.</given-names></name> <name><surname>Zahar</surname> <given-names>S.</given-names></name> <name><surname>Ville</surname> <given-names>D. V. D.</given-names></name> <name><surname>Gupta</surname> <given-names>A.</given-names></name> <name><surname>Griffa</surname> <given-names>A.</given-names></name> <name><surname>Amico</surname> <given-names>E.</given-names></name></person-group> (<year>2021</year>). <article-title>Exploring MEG brain fingerprints: evaluation, pitfalls, and interpretations</article-title>. <source>NeuroImage</source> <volume>240</volume>:<fpage>118331</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.118331</pub-id>, PMID: <pub-id pub-id-type="pmid">34237444</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schl&#x00F6;gl</surname> <given-names>A.</given-names></name> <name><surname>Supp</surname> <given-names>G.</given-names></name></person-group> (<year>2006</year>). <article-title>Analyzing event-related EEG data with multivariate autoregressive parameters</article-title>. <source>Prog. Brain Res.</source>, <volume>159</volume>:<fpage>135</fpage>&#x2013;<lpage>147</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0079-6123(06)59009-0</pub-id>, PMID: <pub-id pub-id-type="pmid">17071228</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Valizadeh</surname> <given-names>S. A.</given-names></name> <name><surname>Liem</surname> <given-names>F.</given-names></name> <name><surname>M&#x00E9;rillat</surname> <given-names>S.</given-names></name> <name><surname>H&#x00E4;nggi</surname> <given-names>J.</given-names></name> <name><surname>J&#x00E4;ncke</surname> <given-names>L.</given-names></name></person-group> (<year>2018</year>). <article-title>Identification of individual subjects on the basis of their brain anatomical features</article-title>. <source>Sci. Rep.</source> <volume>8</volume>, <fpage>5611</fpage>&#x2013;<lpage>5619</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-018-23696-6</pub-id>, PMID: <pub-id pub-id-type="pmid">29618790</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Essen</surname> <given-names>D. C.</given-names></name> <name><surname>Smith</surname> <given-names>S. M.</given-names></name> <name><surname>Barch</surname> <given-names>D. M.</given-names></name> <name><surname>Behrens</surname> <given-names>T. E. J.</given-names></name> <name><surname>Yacoub</surname> <given-names>E.</given-names></name> <name><surname>Ugurbil</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>The WU-Minn human connectome project: an overview</article-title>. <source>NeuroImage</source> <volume>80</volume>, <fpage>62</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.05.041</pub-id>, PMID: <pub-id pub-id-type="pmid">23684880</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Essen</surname> <given-names>D. C.</given-names></name> <name><surname>Ugurbil</surname> <given-names>K.</given-names></name> <name><surname>Auerbach</surname> <given-names>E.</given-names></name> <name><surname>Barch</surname> <given-names>D.</given-names></name> <name><surname>Behrens</surname> <given-names>T. E. J.</given-names></name> <name><surname>Bucholz</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>The human connectome project: a data acquisition perspective</article-title>. <source>NeuroImage</source> <volume>62</volume>, <fpage>2222</fpage>&#x2013;<lpage>2231</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.02.018</pub-id>, PMID: <pub-id pub-id-type="pmid">22366334</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Horn</surname> <given-names>J. D.</given-names></name> <name><surname>Grafton</surname> <given-names>S. T.</given-names></name> <name><surname>Miller</surname> <given-names>M. B.</given-names></name></person-group> (<year>2008</year>). <article-title>Individual variability in brain activity: a nuisance or an opportunity?</article-title> <source>Brain Imaging Behav.</source> <volume>2</volume>, <fpage>327</fpage>&#x2013;<lpage>334</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11682-008-9049-9</pub-id>, PMID: <pub-id pub-id-type="pmid">19777073</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wachinger</surname> <given-names>C.</given-names></name> <name><surname>Golland</surname> <given-names>P.</given-names></name> <name><surname>Kremen</surname> <given-names>W.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Reuter</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>BrainPrint: a discriminative characterization of brain morphology</article-title>. <source>NeuroImage</source> <volume>109</volume>, <fpage>232</fpage>&#x2013;<lpage>248</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.01.032</pub-id>, PMID: <pub-id pub-id-type="pmid">25613439</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Welch</surname> <given-names>B. L.</given-names></name></person-group> (<year>1947</year>). <article-title>The generalisation of student&#x2019;s problems when several different population variances are involved</article-title>. <source>Biometrika</source> <volume>34</volume>, <fpage>28</fpage>&#x2013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.1093/biomet/34.1-2.28</pub-id>, PMID: <pub-id pub-id-type="pmid">20287819</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Q.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name></person-group> (<year>2006</year>). <article-title>10 CHALLENGING PROBLEMS IN DATA MINING RESEARCH</article-title>. <source>Int. J. Inf. Technol. Decision Making</source> <volume>5</volume>, <fpage>597</fpage>&#x2013;<lpage>604</lpage>. doi: <pub-id pub-id-type="doi">10.1142/S0219622006002258</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>F.</given-names></name> <name><surname>Koltun</surname> <given-names>V.</given-names></name></person-group> (<year>2016</year>). <article-title>Multi-scale context aggregation by dilated convolutions</article-title>. <conf-name>4th International Conference on Learning Representations</conf-name>.</citation></ref>
</ref-list>
</back>
</article>