<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2022.875851</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A Regional Smoothing Block Sparse Bayesian Learning Method With Temporal Correlation for Channel Selection in P300 Speller</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhao</surname> <given-names>Xueqing</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1672093/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Jin</surname> <given-names>Jing</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/143026/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Xu</surname> <given-names>Ren</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/123020/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Shurui</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/808912/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Sun</surname> <given-names>Hao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Wang</surname> <given-names>Xingyu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/275258/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Cichocki</surname> <given-names>Andrzej</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1345366/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>The Key Laboratory of Smart Manufacturing in Energy Chemical Process, Ministry of Education, East China University of Science and Technology</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Shenzhen Research Institute of East China University of Technology</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>g.tec medical engineering GmbH</institution>, <addr-line>Graz</addr-line>, <country>Austria</country></aff>
<aff id="aff4"><sup>4</sup><institution>Skolkovo Institute of Science and Technology</institution>, <addr-line>Moscow</addr-line>, <country>Russia</country></aff>
<aff id="aff5"><sup>5</sup><institution>Systems Research Institute of Polish Academy of Science</institution>, <addr-line>Warsaw</addr-line>, <country>Poland</country></aff>
<aff id="aff6"><sup>6</sup><institution>Department of Informatics, Nicolaus Copernicus University</institution>, <addr-line>Toru&#x0144;</addr-line>, <country>Poland</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Bj&#x00F6;rn H. Schott, Leibniz Institute for Neurobiology (LG), Germany</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Christoph Reichert, Leibniz Institute for Neurobiology (LG), Germany; Jianjun Meng, Shanghai Jiao Tong University, China</p></fn>
<corresp id="c001">&#x002A;Correspondence: Jing Jin, <email>jinjingat@gmail.com</email></corresp>
<fn fn-type="other" id="fn004"><p>This article was submitted to Brain Health and Clinical Neuroscience, a section of the journal Frontiers in Human Neuroscience</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>10</day>
<month>06</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>16</volume>
<elocation-id>875851</elocation-id>
<history>
<date date-type="received">
<day>14</day>
<month>02</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>18</day>
<month>05</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2022 Zhao, jin, Xu, Li, Sun, Wang and Cichocki.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Zhao, jin, Xu, Li, Sun, Wang and Cichocki</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>The P300-based brain&#x2013;computer interfaces (BCIs) enable participants to communicate by decoding the electroencephalography (EEG) signal. Different regions of the brain correspond to various mental activities. Therefore, removing weak task-relevant and noisy channels through channel selection is necessary when decoding a specific type of activity from EEG. It can improve the recognition accuracy and reduce the training time of the subsequent models. This study proposes a novel block sparse Bayesian-based channel selection method for the P300 speller. In this method, we introduce block sparse Bayesian learning (BSBL) into the channel selection of P300 BCI for the first time and propose a regional smoothing BSBL (RSBSBL) by combining the spatial distribution properties of EEG. The RSBSBL can determine the number of channels adaptively. To ensure practicality, we design an automatic selection iteration strategy model to reduce the time cost caused by the inverse operation of the large-size matrix. We verified the proposed method on two public P300 datasets and on our collected datasets. The experimental results show that the proposed method can remove the inferior channels and work with the classifier to obtain high-classification accuracy. Hence, RSBSBL has tremendous potential for channel selection in P300 tasks.</p>
</abstract>
<kwd-group>
<kwd>channel selection</kwd>
<kwd>sparse bayesian learning</kwd>
<kwd>temporal correlation</kwd>
<kwd>brain-computer interface</kwd>
<kwd>EEG</kwd>
<kwd>P300</kwd>
</kwd-group>
<counts>
<fig-count count="7"/>
<table-count count="4"/>
<equation-count count="17"/>
<ref-count count="55"/>
<page-count count="13"/>
<word-count count="9423"/>
</counts>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<title>Introduction</title>
<p>Brain&#x2013;computer interface (BCI) is a direct interactive pathway designed to establish a non-muscle connection between the human brain and the computer (<xref ref-type="bibr" rid="B48">Wolpaw et al., 2002</xref>; <xref ref-type="bibr" rid="B22">Jin et al., 2015</xref>). It provides a new way to communicate with the outside, for example, daily communication (<xref ref-type="bibr" rid="B40">Sorbello et al., 2017</xref>; <xref ref-type="bibr" rid="B17">He et al., 2019</xref>) and wheelchair control (<xref ref-type="bibr" rid="B25">Kim et al., 2016</xref>; <xref ref-type="bibr" rid="B11">Deng et al., 2019</xref>). In addition, BCIs can also be used to aid in the diagnosis of disorders of consciousness (<xref ref-type="bibr" rid="B31">Maest&#x00FA; et al., 2019</xref>; <xref ref-type="bibr" rid="B2">Ando et al., 2021</xref>). BCIs can be divided into invasive and non-invasive ones. Electroencephalography (EEG) is a non-invasive technique that records brain signals through electrodes placed on the scalp. Generally, users&#x2019; brain signals are recorded, amplified, and pre-processed with an EEG recorder, and then the signals are converted to commands <italic>via</italic> classifiers (<xref ref-type="bibr" rid="B5">Bashashati et al., 2007</xref>). Currently, BCIs based on the Event-Related Potential (ERP) (<xref ref-type="bibr" rid="B18">Hoffmann et al., 2008a</xref>; <xref ref-type="bibr" rid="B30">Lopez-Calderon and Luck, 2014</xref>), Steady-State Visual Evoked Potential (SSVEP) (<xref ref-type="bibr" rid="B36">Nakanishi et al., 2017</xref>), and Motor Imagery (MI) (<xref ref-type="bibr" rid="B37">Padfield et al., 2019</xref>) are the three main research directions. The oddball paradigm is a typical paradigm of P300, where standard and deviant stimuli are included. These two kinds of stimuli appear randomly with large and small probabilities, and deviant stimuli are the targets in small probability events that correspond to the spelling character (<xref ref-type="bibr" rid="B13">Donchin et al., 2000</xref>). The spelling paradigms and algorithms based on P300 have been widely developed in recent years (<xref ref-type="bibr" rid="B9">Cecotti and Graser, 2010</xref>; <xref ref-type="bibr" rid="B46">Townsend et al., 2010</xref>; <xref ref-type="bibr" rid="B15">Hammer et al., 2018</xref>; <xref ref-type="bibr" rid="B4">Arvaneh et al., 2019</xref>; <xref ref-type="bibr" rid="B21">Jin et al., 2019</xref>; <xref ref-type="bibr" rid="B20">Huang et al., 2022</xref>). This study is focused on the P300 BCI system.</p>
<p>To provide a complete coverage of regions related to EEG activity, a large number of electrodes are used for EEG acquisition. An electrode is regarded as a channel. However, a realistic EEG system typically uses the data of a small number of channels during computation to minimize the preparation time and cost (<xref ref-type="bibr" rid="B10">Cecotti et al., 2011</xref>). Channel selection helps to exclude the weak task-relevant and noisy channels, thus improving the classification accuracy and reducing the classifier training time. Inter-participant differences and equipment differences can make the best subset of channels in the same paradigm different. The flexibility of selecting a subset of empirical channels in the complex BCI data is insufficient, and the data-based channel selection method is more conducive to giving the optimal channel selection. Therefore, the method of automatically determining a subset of channels has better application prospects than selecting a fixed subset.</p>
<p>Different evaluation approaches, such as filter, wrapper, embedded, hybrid, and human-based techniques have been widely used to select features and the subset of channels in the P300 speller (<xref ref-type="bibr" rid="B1">Alotaiby et al., 2015</xref>). Filters like Fisher Score (<xref ref-type="bibr" rid="B27">Lal et al., 2004</xref>) are usually independent of the classifier and select channels based on the relevance. A CCA spatial filter also proved to be effective in event-related signal processing (<xref ref-type="bibr" rid="B39">Reichert et al., 2017</xref>). On the other hand, wrappers select the channel set according to the algorithm effect and search for channels through continuous heuristic methods. Support Vector Machine based recursive channel elimination (SVM-RCE) can be considered a typical example of a wrapper (<xref ref-type="bibr" rid="B38">Rakotomamonjy and Guigue, 2008</xref>). The hybrid approach is a combination of filter and wrapper and uses the wrapper to obtain a subset of the available channels after handling the filter (<xref ref-type="bibr" rid="B29">Liu and Yu, 2005</xref>). The human-based approaches are the methods in which the experienced experts select channels by analyzing certain technical indicators (<xref ref-type="bibr" rid="B42">Tekgul et al., 2005</xref>). In addition, some channel selection algorithms are based on evolutionary algorithms, such as Particle Swarm Optimization (PSO), which also belong to wrappers (<xref ref-type="bibr" rid="B33">Martinez-Cagigal and Hornero, 2017</xref>; <xref ref-type="bibr" rid="B3">Arican and Polat, 2019</xref>). For embedded methods, the selection is usually implicit and integrated with the learner training process. By giving sparse weight to features or channels, sparse methods can obtain a classifier that needs fewer selected features or channels. The Least Absolute Shrinkage and Selectionator operator (LASSO), a linear regressor with <italic>L<sub>1</sub></italic> regularization, can be regarded as an embedded method (<xref ref-type="bibr" rid="B43">Tibshirani, 1996</xref>). In EEG research, LASSO has also become a commonly used feature selection algorithm and extended to channel selection (<xref ref-type="bibr" rid="B45">Tomioka and M&#x00FC;ller, 2010</xref>). Yuan extended the LASSO method to groups in 2006, giving birth to the group LASSO (GLASSO), which allows us to group all variables and then penalize the <italic>L<sub>2</sub></italic> parametrization of each group in the objective function, thus achieving the effect of eliminating a whole group of coefficients to zero at the same time (<xref ref-type="bibr" rid="B51">Yuan and Lin, 2006</xref>). The Bayesian framework-based feature selection and classification methods are widely used in EEG. Studies have shown the outstanding performance of Bayesian linear discriminant analysis (BLDA) in EEG decoding (<xref ref-type="bibr" rid="B18">Hoffmann et al., 2008a</xref>; <xref ref-type="bibr" rid="B28">Lei et al., 2009</xref>; <xref ref-type="bibr" rid="B32">Manyakov et al., 2011</xref>). Tipping et al. proposed a sparse Bayesian learning (SBL) method under the Bayesian framework to solve the regression problem (<xref ref-type="bibr" rid="B44">Tipping, 2001</xref>). SBL can complete the feature selection of P300 through sparsity (<xref ref-type="bibr" rid="B19">Hoffmann et al., 2008b</xref>) and has been used for channel selection (<xref ref-type="bibr" rid="B49">Wu et al., 2014</xref>; <xref ref-type="bibr" rid="B52">Zhang et al., 2017</xref>; <xref ref-type="bibr" rid="B12">Dey et al., 2020</xref>). EEG is a common response of regional neurons (<xref ref-type="bibr" rid="B16">Hassan and Wendling, 2018</xref>). However, the channel optimization approach described above does not consider the spatial structure between the channels of EEG signals. In addition, a few existing algorithms consider the temporal correlation in a single channel, which means the amplitude correlation between time points within each channel.</p>
<p>This paper proposes a regional smoothing SBL (RSBSBL) method for channel selection of the P300 signal. Block sparse Bayesian learning (BSBL) was first proposed for sparse signal recovery (<xref ref-type="bibr" rid="B55">Zhang and Rao, 2011</xref>). It is the first time that the BSBL is applied to EEG channel selection. The P300 features are usually filtered and down-sampled in the temporal series, and features from the same channel are correlated. In this method, we combine BSBL with the spatial distribution properties of EEG to propose an RSBSBL. To ensure practicality, we design an automatic selection iteration strategy model to reduce the time cost caused by the inverse operation of large-size matrices.</p>
<p>For verification, RSBSBL was compared with some other methods with similar principles on the three BCI datasets. We used BLDA as a unified classifier for a fair comparison. The effectiveness of the proposed method was verified by the effectiveness of channel subsets and the character recognition performance.</p>
<p>We organize the rest of the paper as follows. Section &#x201C;Materials and Methods&#x201D; describes the principle and calculation process of the proposed algorithm. Section &#x201C;Materials and Experiments&#x201D; describes the dataset used and the data processing framework. Section &#x201C;Results&#x201D; shows the experimental results. Section &#x201C;Discussion&#x201D; further discusses the effectiveness of the selected channel subsets, character recognition performance, effectiveness of regional smoothing, time cost, and future work. Finally, section &#x201C;Conclusion&#x201D; gives the conclusion.</p>
</sec>
<sec id="S2">
<title>Methods</title>
<p>Here, we show the principle and solution process of RSBSBL and give its flow of selecting channels. The input features of one channel are regarded as a block. Based on the BSBL, we considered the spatial distribution of EEG and divided different regions according to the location of the electrodes. The automatic selection mode of the iterative strategy is used to ensure practicality.</p>
<sec id="S2.SS1">
<title>Regional Smoothing Sparse Bayesian Learning</title>
<p>The EEG signals collected by the device are generally two-dimensional data after pre-processing. <italic>N<sub>c</sub></italic> is denoted as the number of channels and <italic>N<sub>t</sub></italic> as temporal points. Input data <bold>X</bold> contains <italic>N</italic> samples <bold>x</bold><sub>1</sub>,<bold>x</bold><sub>2</sub>,<bold>x</bold><sub>3</sub> &#x2026;&#x2026; <bold>x</bold><sub><italic>N</italic></sub> &#x2208; <italic>R<sup>D</sup></italic>, where <italic>D</italic> = <italic>N</italic><sub><italic>t</italic></sub><italic>N</italic><sub><italic>c</italic></sub> represents the number of features in each sample. Then, <bold>X</bold> = [<bold>x</bold><sub>1</sub>,<bold>x</bold><sub>2</sub>,<bold>x</bold><sub>3</sub>, &#x2026; <bold>x</bold><sub><italic>N</italic></sub>]<sup>T</sup> &#x2208; <italic>R</italic><sup><italic>N</italic> &#x00D7; <italic>D</italic></sup> and <bold>y</bold> = [<italic>y</italic><sub>1</sub>,<italic>y</italic><sub>2</sub>,<italic>y</italic><sub>3</sub>, &#x2026;, <italic>y</italic><sub><italic>N</italic></sub>]<sup>T</sup> &#x2208; <italic>R<sup>N</sup></italic> represent the corresponding labels, where <italic>y</italic><sub><italic>i</italic></sub> &#x2208; &#x007B;1,&#x2212;1&#x007D; is the class label. Its mathematical model can be expressed linearly as follows:</p>
<disp-formula id="S2.E1"><label>(1)</label><mml:math id="M1"><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mrow><mml:mtext mathvariant="bold">X</mml:mtext><mml:mpadded width="+3.3pt"><mml:mi mathvariant="normal">w</mml:mi></mml:mpadded></mml:mrow><mml:mo rspace="5.8pt">+</mml:mo><mml:mi mathvariant="normal">&#x03B5;</mml:mi></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>where <bold>w</bold> = [<italic>w</italic><sub>1</sub>,<italic>w</italic><sub>2</sub>,<italic>w</italic><sub>3</sub> &#x2026;&#x2026; <italic>w</italic><sub><italic>D</italic></sub>]<sup>T</sup> is a learnable weight vector, &#x03B5; is noise, and <bold>X</bold> can be replaced by &#x03A6;(<bold>X</bold>) expressed in the form of a kernel function. Assume &#x03B5; &#x223C; &#x1D4A9; (0, &#x03C3;<sup>2</sup><bold>I</bold><sub><italic>N</italic></sub>), then <bold>y</bold> &#x223C; &#x1D4A9; (<bold>X</bold>w, &#x03C3;<sup>2</sup><bold>I</bold><sub><italic>N</italic></sub>) and its probabilistic framework is.</p>
<disp-formula id="S2.E2"><label>(2)</label><mml:math id="M2"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo rspace="7.5pt">,</mml:mo><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mi mathvariant="normal">&#x03C0;</mml:mi><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mfrac><mml:mi>N</mml:mi><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:msup><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mn>2</mml:mn><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mfrac><mml:msubsup><mml:mrow><mml:mo fence="true">&#x007C;&#x007C;</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo>-</mml:mo><mml:mrow><mml:mtext mathvariant="bold">X</mml:mtext><mml:mi mathvariant="normal">w</mml:mi></mml:mrow></mml:mrow><mml:mo fence="true">&#x007C;&#x007C;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mn>2</mml:mn></mml:msubsup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The RSBSBL adds the symmetric positive definite matrix in the variance term of the distribution that <bold>w</bold> obeys. The input data of one channel are regarded as a block. So, for the mathematical model (1), assume that <bold>w</bold><sub><italic>b</italic></sub>(&#x2200;<italic>b</italic>) is mutually independent and Gaussian distributed.</p>
<disp-formula id="S2.E3"><label>(3)</label><mml:math id="M3"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mtext mathvariant="bold">w</mml:mtext><mml:mi>b</mml:mi></mml:msub><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mtext mathvariant="bold">B</mml:mtext><mml:mi>b</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mrow><mml:mo>&#x2200;</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#x223C;</mml:mo><mml:mrow><mml:mi>&#x1D4A9;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:msub><mml:mtext mathvariant="bold">B</mml:mtext><mml:mi>b</mml:mi></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mi>b</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#x2026;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <bold>w</bold><sub><italic>b</italic></sub> containing several <italic>w<sub>i</sub></italic> is <italic>b</italic>th block of <bold>w</bold>, &#x03B3;<sub><italic>b</italic></sub> is a non-negative scalar that controls the variance of <bold>w</bold><sub><italic>b</italic></sub>, <bold>B</bold><sub><italic>b</italic></sub> is a positive definite matrix reflecting the intra-block correlation, and <italic>N<sub>b</sub></italic> is the number of blocks. Since the features of a channel are considered to be a block, <italic>N</italic><sub><italic>b</italic></sub> = <italic>N</italic><sub><italic>c</italic></sub>.</p>
<p>In our case of EEG signal, <italic>b</italic> is the index of channels. In a channel of EEG signal with corresponding weight <bold>w</bold><sub><italic>b</italic></sub>, it is assumed that all its feature weights share the same &#x03B3;<sub><italic>b</italic></sub> to control the variance of their distribution, and <bold>B</bold><sub><italic>b</italic></sub> controls the intra-block correlation.</p>
<p>In this case, we express the prior of <bold>w</bold> as <italic>p</italic>(<bold>w</bold>&#x007C;&#x03B3;, <bold>B</bold>) &#x223C; &#x1D4A9; (0, <bold>&#x03A3;</bold><sub>0</sub>), where <bold>&#x03A3;</bold><sub>0</sub> is</p>
<disp-formula id="S2.E4"><label>(4)</label><mml:math id="M4"><mml:mrow><mml:msub><mml:mn mathvariant="bold">&#x03A3;</mml:mn><mml:mn>0</mml:mn></mml:msub><mml:mo>=&#x00A0;</mml:mo><mml:mrow><mml:mo>[</mml:mo> <mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mi>&#x03B3;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi mathvariant="bold-italic">B</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:msub><mml:mi>&#x03B3;</mml:mi><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:msub><mml:msub><mml:mi mathvariant="bold-italic">B</mml:mi><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:msub></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow> <mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>the posterior probability has been calculated by the Bayesian rule,</p>
<disp-formula id="S2.E5"><label>(5)</label><mml:math id="M5"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">w</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">y</mml:mi><mml:mo>,</mml:mo><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="bold">B</mml:mi></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo>,</mml:mo><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">w</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="bold">B</mml:mi></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="bold">B</mml:mi></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>and the corresponding variance and mean of the posterior probability density <italic>p</italic>(<bold>w</bold>&#x007C;<bold>y</bold>, &#x03C3;<sup>2</sup>, &#x03B3;, <bold>B</bold>) &#x223C; &#x1D4A9; (&#x03BC;<sub><bold>w</bold></sub>, <bold>&#x03A3;<sub>w</sub></bold>) can be described as</p>
<disp-formula id="S2.E6"><label>(6)</label><mml:math id="M6"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mpadded lspace="10pt" width="+10pt"><mml:mi mathvariant="bold">&#x03A3;</mml:mi></mml:mpadded><mml:mi mathvariant="bold">w</mml:mi></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>-</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup><mml:mtext mathvariant="bold">X</mml:mtext></mml:mrow><mml:mo rspace="5.8pt">+</mml:mo><mml:msubsup><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="S2.E7"><label>(7)</label><mml:math id="M7"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>-</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup><mml:mtext mathvariant="bold">y</mml:mtext></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>When <italic>N</italic> &#x2265; <italic>D</italic>, the Eqs (6) and (7) are suitable because the maximum size of the inverse matrix is D in this case. Now, we give the iterative ways when <italic>N</italic> &#x003C; <italic>D</italic>. According to the matrix inversion formula and the matrix identity.</p>
<disp-formula id="S2.E8"><label>(8)</label><mml:math id="M8"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">E</mml:mtext><mml:mo>&#x00A0;+&#x00A0;</mml:mo><mml:mtext mathvariant="bold">FGH</mml:mtext></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msup><mml:mtext mathvariant="bold">E</mml:mtext><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:mrow><mml:msup><mml:mtext mathvariant="bold">E</mml:mtext><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup><mml:mtext mathvariant="bold">FG</mml:mtext><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">I</mml:mtext><mml:mo rspace="2.8pt">+</mml:mo><mml:mrow><mml:msup><mml:mtext mathvariant="bold">HE</mml:mtext><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup><mml:mtext mathvariant="bold">FG</mml:mtext></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mtext mathvariant="bold">HE</mml:mtext><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="S2.E9"><label>(9)</label><mml:math id="M9"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">I</mml:mtext><mml:mo>&#x00A0;+&#x00A0;</mml:mo><mml:mtext mathvariant="bold">EF</mml:mtext></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup><mml:mtext mathvariant="bold">E</mml:mtext></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mtext mathvariant="bold">E</mml:mtext><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">I</mml:mtext><mml:mo>&#x00A0;+&#x00A0;</mml:mo><mml:mtext mathvariant="bold">FE</mml:mtext></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="bold">1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>we replace the Eqs (6) and (7) with the following equations:</p>
<disp-formula id="S2.E10"><label>(10)</label><mml:math id="M10"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>-</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mtext mathvariant="bold">I</mml:mtext></mml:mrow><mml:mo rspace="2.8pt">+</mml:mo><mml:mrow><mml:mtext mathvariant="bold">X</mml:mtext><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="S2.E11"><label>(11)</label><mml:math id="M11"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mtext mathvariant="bold">I</mml:mtext></mml:mrow><mml:mo rspace="2.8pt">+</mml:mo><mml:mrow><mml:mtext mathvariant="bold">X</mml:mtext><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msup><mml:mtext mathvariant="bold">X</mml:mtext><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mtext mathvariant="bold">y</mml:mtext></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>To find the iterative equation of the parameters <bold>&#x0398;</bold> = &#x007B;&#x03B3;, <bold>B</bold>, &#x03C3;<sup>2</sup>&#x007D;, the expectation&#x2013;maximization (EM) is used to maximize log <italic>p</italic> (<bold>y</bold>&#x007C;<bold>&#x0398;</bold>). The <italic>Q</italic> function is.</p>
<disp-formula id="S2.Ex12"><mml:math id="M12"><mml:mrow><mml:mrow><mml:mi>Q</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi mathvariant="bold">&#x0398;</mml:mi><mml:mo rspace="5.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">y</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="bold">&#x0398;</mml:mi><mml:mrow><mml:mtext>old</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mpadded width="+5pt"><mml:mtext>log</mml:mtext></mml:mpadded><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi mathvariant="bold">y</mml:mi><mml:mo>,</mml:mo><mml:mrow><mml:mtext mathvariant="bold">w</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mi mathvariant="bold">&#x0398;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mrow><mml:mo rspace="2.8pt">=</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">y</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="bold">&#x0398;</mml:mi><mml:mrow><mml:mtext>old</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:mpadded width="+5pt"><mml:mi>log</mml:mi></mml:mpadded><mml:mo>&#x2061;</mml:mo><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo>,</mml:mo><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mrow><mml:mo lspace="2.8pt" rspace="2.8pt">+</mml:mo><mml:mrow><mml:msub><mml:mtext>E</mml:mtext><mml:mrow><mml:mi mathvariant="bold">w</mml:mi><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="bold">y</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="bold">&#x0398;</mml:mi><mml:mrow><mml:mtext>old</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mpadded width="+5pt"><mml:mtext>log</mml:mtext></mml:mpadded><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext mathvariant="bold">w</mml:mtext><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="bold">B</mml:mi></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>The first term of the <italic>Q</italic> function is related to &#x03C3;<sup>2</sup> and the second term is related to &#x03B3; and <bold>B</bold>. Then, we can get the parameters iteratively by maximizing the <bold>Q</bold> function.</p>
<disp-formula id="S2.E13"><label>(13)</label><mml:math id="M13"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mfrac><mml:mrow><mml:mpadded width="+3.3pt"><mml:msubsup><mml:mrow><mml:mo fence="true">&#x007C;&#x007C;</mml:mo><mml:mrow><mml:mtext mathvariant="bold">y</mml:mtext><mml:mo>-</mml:mo><mml:mrow><mml:mtext mathvariant="bold">X</mml:mtext><mml:msub><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo fence="true">&#x007C;&#x007C;</mml:mo></mml:mrow><mml:mn>2</mml:mn><mml:mn>2</mml:mn></mml:msubsup></mml:mpadded><mml:mo rspace="5.8pt">+</mml:mo><mml:mrow><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mtext>old</mml:mtext></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>-</mml:mo><mml:mrow><mml:mtext>Tr</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi></mml:msub><mml:msubsup><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mn>0</mml:mn><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="S2.E14"><label>(14)</label><mml:math id="M14"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mtext>Tr</mml:mtext><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msubsup><mml:mtext mathvariant="bold">B</mml:mtext><mml:mi>b</mml:mi><mml:mrow><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mpadded width="+3.3pt"><mml:msubsup><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup></mml:mpadded><mml:mo rspace="5.8pt">+</mml:mo><mml:mrow><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mfrac><mml:mo>,</mml:mo><mml:mrow><mml:mo>&#x2200;</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="S2.E15"><label>(15)</label><mml:math id="M15"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mpadded width="+3.3pt"><mml:msub><mml:mtext mathvariant="bold">B</mml:mtext><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mfrac><mml:mrow><mml:munder><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo><mml:mrow><mml:mi>b</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msub><mml:mi>G</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:munder><mml:mfrac><mml:mrow><mml:mpadded width="+3.3pt"><mml:msubsup><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup></mml:mpadded><mml:mo rspace="5.8pt">+</mml:mo><mml:mrow><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mtext>T</mml:mtext></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03B3;</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mfrac></mml:mrow></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mo>&#x2200;</mml:mo><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x002A;old represents the hyperparameter in the previous iteration, and the superscript <italic>b</italic> of <inline-formula><mml:math id="INEQ36"><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="INEQ37"><mml:msubsup><mml:mi mathvariant="bold">&#x03A3;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi>b</mml:mi></mml:msubsup></mml:math></inline-formula> indicates the <italic>b</italic>th block in &#x03BC;<sub><bold>w</bold></sub> and <bold>&#x03A3;<sub>w</sub></bold> with the size of <italic>d</italic><sub><italic>b</italic></sub> &#x00D7; 1 and <italic>d</italic><sub><italic>b</italic></sub> &#x00D7; <italic>d</italic><sub><italic>b</italic></sub> (<italic>d<sub>b</sub></italic> is the number of elements in w<sub>b</sub>).</p>
<p>The potential similarity exists in the adjacent electrode signals considering the volume conduction effects in the brain (<xref ref-type="bibr" rid="B16">Hassan and Wendling, 2018</xref>). We assign the same <bold>B</bold><sub><italic>re</italic></sub> for channels with close locations for regional smoothing, and the region <italic>G</italic><sub><italic>re</italic></sub> contains <italic>g</italic><sub><italic>re</italic></sub> channels. As shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, all the channels are divided into 13 regions by position, and each region contains at least three channels. <bold>B</bold><sub><italic>re</italic></sub> is the average of blocks in region <italic>re</italic> (<italic>re</italic> &#x2208; [1, 13]).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Region division. Channels belonging to a region are circled with the dotted line. The left subfigure shows the division for DS1 and DS2, while the right subfigure shows the division for DS3.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g001.tif"/>
</fig>
<p>We use a first-order Auto-Regressive (AR) process to model the intra-block correlation. Many applications have used the AR process to express it (<xref ref-type="bibr" rid="B55">Zhang and Rao, 2011</xref>; <xref ref-type="bibr" rid="B54">Zhang et al., 2013</xref>; <xref ref-type="bibr" rid="B50">Yin et al., 2020</xref>). Thus, to find a symmetric positive definite matrix to approximate <bold>B</bold>, it can be constrained to the following form of the Toeplitz matrix.</p>
<disp-formula id="S2.E16"><label>(16)</label><mml:math id="M16"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:msub><mml:mtext mathvariant="bold">B</mml:mtext><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub><mml:mo>&#x225C;</mml:mo><mml:mrow><mml:mtext>Toeplitz</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mn>1</mml:mn><mml:mo rspace="7.5pt">,</mml:mo><mml:mi>r</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="normal">&#x2026;</mml:mi><mml:mo>,</mml:mo><mml:msup><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup><mml:mo>]</mml:mo></mml:mrow><mml:mo rspace="5.8pt">)</mml:mo></mml:mrow></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mn>1</mml:mn></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:msup><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:msup><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mo>.</mml:mo></mml:mtd><mml:mtd><mml:mn>1</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:mrow> <mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Empirically calculate <inline-formula><mml:math id="INEQ47"><mml:mrow><mml:mpadded width="+3.3pt"><mml:mi>r</mml:mi></mml:mpadded><mml:mo rspace="5.8pt">=</mml:mo><mml:mfrac><mml:msub><mml:mi>m</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>m</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mfrac></mml:mrow></mml:math></inline-formula>, where <italic>m<sub>0</sub></italic> is the average of the main diagonal of <bold>B</bold><sub><italic>re</italic></sub> and <italic>m</italic><sub><italic>1</italic></sub> is the average of the main sub-diagonal.</p>
</sec>
<sec id="S2.SS2">
<title>Channel Selection Based on Regional Smoothing BSBL</title>
<p>Regarding the feature extracted from the same channel as a block, we perform RSBSBL to get the weight vector of features and design a channel selection based on the weight vector as <xref ref-type="table" rid="A1">Algorithm 1</xref>.</p>
<table-wrap position="float" id="A1">
<label>Algorithm 1</label>
<caption><p>Regional Smoothing Sparse Bayesian Learning (RSBSBL).</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<tbody>
<tr>
<td valign="top" align="left"><bold>Input:</bold> features <bold>X</bold><sub><italic>N</italic> &#x00D7; (<italic>N</italic><sub><italic>c</italic></sub><italic>N</italic><sub><italic>t</italic></sub>)</sub> and labels <bold>Y</bold><sub><italic>N</italic> &#x00D7; 1</sub>, where <italic>N</italic> denotes the number of samples, <italic>N<sub>c</sub></italic> represents the number of channels, and <italic>N<sub>t</sub></italic> is the number of features (sampling points) in one channel.</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Output:</bold> sparse weights <bold>w</bold> and selected channels <italic>C<sub>s</sub></italic>.</td>
</tr>
<tr>
<td valign="top" align="left">1: Choose an initial setting for &#x03C3;<sup>2</sup>, &#x03B3;, <bold>B</bold>. The block size is <italic>N<sub>t</sub></italic>.</td>
</tr>
<tr>
<td valign="top" align="left">2: Set a shear threshold &#x03C4; to obtain the sparsity weights.</td>
</tr>
<tr>
<td valign="top" align="left">3: <bold>While</bold> the convergence criterion is not satisfied, <bold>do</bold></td>
</tr>
<tr>
<td valign="top" align="left">4: <bold>If</bold> <italic>N</italic> &#x2265; <italic>N</italic><sub><italic>c</italic></sub> &#x00D7; <italic>N</italic><sub><italic>t</italic></sub>, <bold>then</bold></td>
</tr>
<tr>
<td valign="top" align="left">5: &#x2004;&#x2004;Calculate <bold>&#x03A3;<sub>w</sub></bold>, &#x03BC;<sub><bold>w</bold></sub>, according to (6)(7).</td>
</tr>
<tr>
<td valign="top" align="left">6: <bold>Else</bold></td>
</tr>
<tr>
<td valign="top" align="left">7: &#x2004;&#x2004;Calculate <bold>&#x03A3;<sub>w</sub></bold>, &#x03BC;<sub><bold>w</bold></sub>, according to (10)(11).</td>
</tr>
<tr>
<td valign="top" align="left">8: <bold>End if</bold></td>
</tr>
<tr>
<td valign="top" align="left">9: Update &#x03C3;<sup>2</sup>, &#x03B3;, <bold>B</bold> according to (13)(14)(15)and (16).</td>
</tr>
<tr>
<td valign="top" align="left">10: &#x2004;<bold>If</bold> &#x03B3;<sub><italic>b</italic></sub> &#x003C; &#x03C4;, <bold>then</bold> &#x03B3;<sub><italic>b</italic></sub> = 0, &#x03B3;<sub><italic>b</italic></sub> &#x2208; &#x03B3;.</td>
</tr>
<tr>
<td valign="top" align="left">11: &#x2004;<inline-formula><mml:math id="INEQ63"><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mtext>old</mml:mtext></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> = &#x03C3;<sup>2</sup>, <bold>B</bold><sub>old</sub> = <bold>B</bold>, &#x03B3;<sub>old</sub> = &#x03B3;.</td>
</tr>
<tr>
<td valign="top" align="left">12: <bold>End while</bold></td>
</tr>
<tr>
<td valign="top" align="left">13: <italic>C<sub>s</sub></italic> = &#x007B;<italic>b</italic>&#x007C;&#x03B3;<sub><italic>b</italic></sub> &#x003E; &#x03C4;, <italic>b</italic> &#x2208; 1, 2, &#x2026;, <italic>N</italic><sub><italic>c</italic></sub>, &#x03B3;<sub><italic>b</italic></sub> &#x2208; &#x03B3;&#x007D;.</td>
</tr>
<tr>
<td valign="top" align="left">14: <bold>Return</bold> <inline-formula><mml:math id="INEQ69"><mml:mrow><mml:mtext mathvariant="bold">w</mml:mtext><mml:mo rspace="5.8pt">=</mml:mo><mml:mrow><mml:mo stretchy="false">&#x007B;</mml:mo><mml:msubsup><mml:mi mathvariant="normal">&#x03BC;</mml:mi><mml:mi mathvariant="bold">w</mml:mi><mml:mi mathvariant="bold">b</mml:mi></mml:msubsup><mml:mo stretchy="false">&#x007C;</mml:mo><mml:mrow><mml:mi>b</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msub><mml:mi>C</mml:mi><mml:mi>s</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">&#x007D;</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> and <italic>C<sub>s</sub></italic>.</td>
</tr>
<tr>
<td valign="top" align="left"></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>As shown in <xref ref-type="table" rid="A1">Algorithm 1</xref>, the parameters are initialized, and the shear threshold &#x03C4; is set. Then, from Line 3 to Line 12, the algorithm iteratively solves BSBL and prunes the &#x03B3;. Line 4 to Line 8 decide the calculation of <bold>&#x03A3;<sub>w</sub></bold>, &#x03BC;<sub><bold>w</bold></sub>, so that the large time cost caused by finding the inverse matrix of a large-size matrix can be alleviated. The parameters are updated on Line 9 and Line 10. <xref ref-type="fig" rid="F2">Figure 2</xref> illustrates the relationship between the parameters in a single iteration, where the parameters calculated simultaneously have the same color. The solid line indicates the passing relationship between the parameters of this iteration, and the dashed line indicates the passing relationship between the parameters of this iteration and the next iteration. After the parameters are calculated, in order to achieve the sparse block effect, make &#x03B3;<sub><italic>b</italic></sub> to <italic>0</italic> when &#x03B3;<sub><italic>b</italic></sub> is less than the threshold &#x03C4;. Then, it comes into the next iteration until the convergence criterion is satisfied. Line 13 automatically selects the channels with &#x03B3;<sub><italic>b</italic></sub> greater than the shear threshold &#x03C4;. Finally, the algorithm returns the selected channel and the corresponding weight vector.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Parameter relationship graphical model in a single iteration. Parameters of the same color can be iterated simultaneously.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g002.tif"/>
</fig>
<p>The off-diagonal matrix <bold>B</bold> makes the weights <bold>w</bold> in the same block relevant in distribution. It means that the correlation of the features from the same channel can be reflected during the process. Moreover, the components of the temporal correlation of different channels in close locations are the same because the <bold>B</bold><sub><italic>re</italic></sub> of channels in the same region are shared. The sparsity of weights will form the units of channels. The features from one channel share the same weight distribution whose variance is controlled by &#x03B3;. For practicality, up to five channels are removed in a single iteration when making a channel selection.</p>
</sec>
</sec>
<sec id="S3">
<title>Materials and Experiments</title>
<sec id="S3.SS1">
<title>Data Descriptions</title>
<p>Three datasets were used in this study to validate the proposed method. DS1 is BCI Competition II dataset IIb (one participant) (<xref ref-type="bibr" rid="B7">Blankertz et al., 2004</xref>) and DS2 is BCI Competition III dataset II (two participants) (<xref ref-type="bibr" rid="B8">Blankertz et al., 2006</xref>). DS3 is the EEG signal collected in our lab (12 participants). The stimulus numbers for each participant of the above three datasets are shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>The stimulus numbers for each participant of DS1, DS2, and DS3.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left">Dataset</td>
<td valign="top" align="center">Stimulus category</td>
<td valign="top" align="center">Training dataset size</td>
<td valign="top" align="center">Test dataset size</td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">DS1<break/> P1.1</td>
<td valign="top" align="center">Target</td>
<td valign="top" align="center">1260</td>
<td valign="top" align="center">930</td>
</tr>
<tr>
<td valign="top" align="left"/><td valign="top" align="center">Non-target</td>
<td valign="top" align="center">6300</td>
<td valign="top" align="center">4650</td>
</tr>
<tr>
<td valign="top" align="left">DS2<break/> P2.1/P2.2</td>
<td valign="top" align="center">Target</td>
<td valign="top" align="center">2550</td>
<td valign="top" align="center">3000</td>
</tr>
<tr>
<td valign="top" align="left"/><td valign="top" align="center">Non-target</td>
<td valign="top" align="center">12750</td>
<td valign="top" align="center">15000</td>
</tr>
<tr>
<td valign="top" align="left">DS3<break/> P3.1-P3.12</td>
<td valign="top" align="center">Target</td>
<td valign="top" align="center">144</td>
<td valign="top" align="center">144</td>
</tr>
<tr>
<td valign="top" align="left"/><td valign="top" align="center">Non-target</td>
<td valign="top" align="center">720</td>
<td valign="top" align="center">720</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p><italic>Pi.j represents the jth participant in the ith dataset.</italic></p></fn>
</table-wrap-foot>
</table-wrap>
<p>DS1 and DS2 provided by the BCI Competition are public datasets and follow the same experimental paradigm of Farwell and Donchin, as shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. In a six-by-six character matrix containing 26 characters and 10 numbers, participants were asked to focus on a specified character in each trial (a trial is a set of stimuli that can support the output of a recognized character). They could do this by mentally counting the target stimuli&#x2019; number of flashes (intensifications). The paradigm continuously intensified and randomly scanned all rows and columns of the matrix at a rate of 5.7 Hz. Each row and column in the matrix was randomly intensified for 100 ms and was left blank for 75 ms. DS1 contained 42 training characters and 31 testing characters. The training set of DS2 contained 85 characters, and the testing set contained 100 characters. A trial for each character had 15 epochs to apply reliable spelling, and each epoch was comprised of 12 intensifications. Both datasets were collected using a 64-channel cap, filtered by 0.1&#x2013;60 Hz, and digitized at a sampling rate of 240 Hz. DS1 and DS2 can be downloaded from the websites: <ext-link ext-link-type="uri" xlink:href="http://www.bbci.de/competition/ii/">http://www.bbci.de/competition/ii/</ext-link> and <ext-link ext-link-type="uri" xlink:href="http://www.bbci.de/competition/iii/">http://www.bbci.de/competition/iii/</ext-link>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>Diagram of the data processing framework, including pre-processing, channel selection, and classification. Using the block sparsity property of RSBSBL, we do pruning on the eligible channels by fitting the training data and labels.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g003.tif"/>
</fig>
<p>DS3 was collected in our lab. Its paradigm was similar to the BCI Competition. It contained 26 characters and 10 numbers. DS3 consisted of 12 participants who were graduate students between the ages of 20 and 26 years, with normal or corrected-to-normal vision. The experiments used a 64-channel wireless EEG acquisition system (Neuracle, NeuSen W series, 59 EEG, 4EOG, 1ECG) to acquire data at the sampling rate of 1,000 Hz. In the paradigm, each row and column in the six-by-six matrix was randomly intensified for 80 ms and kept extinguished for 80 ms. A trial for each target character included four epochs, and each epoch had 12 intensifications. Participants were required to spell 36 characters. We randomly selected 18 characters as the training dataset and the rest as the test dataset.</p>
</sec>
<sec id="S3.SS2">
<title>The Framework of Data Processing</title>
<p>Considering that some channels contain less task-relevant information but more noise, it is vital to use a reasonable method to select the most effective channels. This study compares the proposed RSBSBL with two empirical channel sets (Set 1 and Set 2) (<xref ref-type="bibr" rid="B26">Krusienski et al., 2008</xref>), LASSO, GLASSO, and SBL in the case of using the same pre-processing process and classifier. Set 1 includes Fz, Cz, Pz, Oz, PO7, and PO8. Set 2 includes Fz, FCz, Cz, C3, C4, CPz, Pz, P3, P4, P7, P8, POz, PO3, PO4, PO7, PO8, Oz, O1, and O2.</p>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> shows the diagram of the data processing framework, which includes three main parts: (1) pre-processing, (2) channel selection, and (3) classification. DS1 and DS2 shared the same pre-processing: bandpass filtering of data from 0.5 to 20 Hz and downsampling by a factor of 5. Then, the sampling rate of the data was 48 Hz. We intercepted 0&#x2013;667 ms after each stimulus as the primary analysis objective was to obtain 32 sampling points for each stimulus. For the DS3, the 59-channel dataset that went through 0.5&#x2013;20 Hz bandpass filtering was down-sampled to 50 Hz and the data segment from 0 to 600 ms was taken after stimulation to obtain 30 sampling points for each stimulus. Thus, denoting the number of channels as <italic>N<sub>c</sub></italic> and number of signal sampling points as <italic>N<sub>t</sub></italic>, a 1 &#x00D7; <italic>D</italic> feature matrix was obtained for each stimulus, where <italic>D</italic> = <italic>N</italic><sub><italic>t</italic></sub><italic>N</italic><sub><italic>c</italic></sub>. A feature matrix was labeled &#x201C;1&#x201D; only if the corresponding stimulus belongs to the row or column of the target characters. Otherwise, the label was assigned to &#x201C;0.&#x201D;</p>
<p>The typical classification methods of P300 include traditional machine learning methods and neural network-based methods. Traditional machine learning can achieve outstanding performance with less complexity. This study regarded BLDA as a unified classifier for different channel selection algorithms.</p>
</sec>
<sec id="S3.SS3">
<title>Parameter Setting</title>
<p>The optimal combination of parameters was determined by a 10-fold cross-validation. There were two modes of the selected channel number in the experiment for the channel selections: automatic and fixed. When the channel number was determined automatically, we used a threshold to determine the channel number. For LASSO and SBL, the absolute values of the feature weights in one channel were summed up to represent the importance of the channel. The threshold equaled the mean minus 0.5 times the standard deviation of the channel importance values, and the channels with importance values higher than the threshold were selected. As for GLASSO and RSBSBL, automatic channel selection had been enabled in the methods. When the number of selected channels is fixed (<italic>M</italic> channels were selected), we used the same way to evaluate each channel. For all the four methods, the absolute values of the feature weights <bold>w</bold> of each channel were summed, and the top <italic>M</italic> channels were selected in descending order.</p>
</sec>
<sec id="S3.SS4">
<title>Evaluation</title>
<p>We used character recognition accuracy to evaluate the performance of a classification. The character recognition accuracy is defined as follows:</p>
<disp-formula id="S3.E17"><label>(17)</label><mml:math id="M17"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mrow><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mpadded width="+3.3pt"><mml:mi>c</mml:mi></mml:mpadded></mml:mrow><mml:mo rspace="5.8pt">=</mml:mo><mml:mfrac><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi><mml:mi mathvariant="normal">_</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi><mml:mi mathvariant="normal">_</mml:mi><mml:mi>t</mml:mi><mml:mi>o</mml:mi><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>C</italic><sub><italic>test_total</italic></sub> represents the total number of characters in the test dataset, and <italic>C</italic><sub><italic>test_correct</italic></sub> is the sum of all the correctly predicted characters. Besides, to evaluate the significance of performance difference, we introduced a non-parametric statistical hypothesis test, the Wilcoxon signed-rank test. The Wilcoxon signed-rank test can be used as an alternative to the paired <italic>t</italic>-test for matched pairs when the population cannot be assumed to be normally distributed. The significance of the pairs can be confirmed when the corresponding <italic>p</italic>-value is less than 0.05.</p>
</sec>
</sec>
<sec id="S4" sec-type="results">
<title>Results</title>
<p>We evaluated the performance of the proposed method on the three datasets. The results covered the experiments of automatic channel selection and the experiments of selecting <italic>M</italic> channels. For further analysis, we also evaluated the sensitivity of the parameters of the proposed method.</p>
<sec id="S4.SS1">
<title>Results of Automatic Channel Selection</title>
<p>Channel selection is supposed to reserve channels with more helpful information and exclude the channels with more noise. According to the data processing, we chose a unified classifier to verify the performance of different methods for a fair comparison. In <xref ref-type="table" rid="T2">Table 2</xref>, we compared the character recognition accuracy of each method on the three datasets, and the number of selected channels was automatically determined as described in section &#x201C;Parameter Setting.&#x201D; Set 1 and Set 2 are empirical subsets of channels (Set 1 contains 6 channels and Set 2 contains 19 channels). The best results were marked in bold, and the number of channels selected for each participant is presented in the corresponding parentheses.</p>
<table-wrap position="float" id="T2">
<label>TABLE 2</label>
<caption><p>Character recognition accuracy (%) (number of channels) and Wilcoxon signed-rank test comparisons for DS1, DS2, and DS3 when each compared method was used for channel selection.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left">Participant</td>
<td valign="top" align="center" colspan="6">Methods<hr/></td>
</tr>
<tr>
<td/>
<td valign="top" align="center">Set 1</td>
<td valign="top" align="center">Set 2</td>
<td valign="top" align="center">LASSO</td>
<td valign="top" align="center">GLASSO</td>
<td valign="top" align="center">SBL</td>
<td valign="top" align="center">RSBSBL</td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">P1.1</td>
<td valign="top" align="center"><bold>100.00</bold></td>
<td valign="top" align="center"><bold>100.00</bold></td>
<td valign="top" align="center"><bold>100.00</bold> (43)</td>
<td valign="top" align="center"><bold>100.00</bold> (54)</td>
<td valign="top" align="center"><bold>100.00</bold> (36)</td>
<td valign="top" align="center"><bold>100.00</bold> (29)</td>
</tr>
<tr>
<td valign="top" align="left">P2.1</td>
<td valign="top" align="center">80.00</td>
<td valign="top" align="center">92.00</td>
<td valign="top" align="center">96.00 (43)</td>
<td valign="top" align="center">98.00 (64)</td>
<td valign="top" align="center">97.00 (44)</td>
<td valign="top" align="center"><bold>99.00</bold> (44)</td>
</tr>
<tr>
<td valign="top" align="left">P2.2</td>
<td valign="top" align="center">90.00</td>
<td valign="top" align="center">92.00</td>
<td valign="top" align="center">93.00 (41)</td>
<td valign="top" align="center">95.00 (56)</td>
<td valign="top" align="center">93.00 (39)</td>
<td valign="top" align="center"><bold>96.00</bold> (45)</td>
</tr>
<tr>
<td valign="top" align="left">Average</td>
<td valign="top" align="center">85.00</td>
<td valign="top" align="center">92.00</td>
<td valign="top" align="center">94.50 (42.00)</td>
<td valign="top" align="center">96.50 (60.00)</td>
<td valign="top" align="center">95.00 (41.50)</td>
<td valign="top" align="center"><bold>97.50</bold> (44.50)</td>
</tr>
<tr>
<td valign="top" align="left">P3.1</td>
<td valign="top" align="center">55.56</td>
<td valign="top" align="center">61.11</td>
<td valign="top" align="center">83.33 (39)</td>
<td valign="top" align="center"><bold>88.89</bold> (22)</td>
<td valign="top" align="center">83.33 (40)</td>
<td valign="top" align="center"><bold>88.89</bold> (15)</td>
</tr>
<tr>
<td valign="top" align="left">P3.2</td>
<td valign="top" align="center">50.00</td>
<td valign="top" align="center">61.11</td>
<td valign="top" align="center">77.78 (39)</td>
<td valign="top" align="center">72.22 (37)</td>
<td valign="top" align="center">66.67 (42)</td>
<td valign="top" align="center"><bold>94.44</bold> (14)</td>
</tr>
<tr>
<td valign="top" align="left">P3.3</td>
<td valign="top" align="center">72.22</td>
<td valign="top" align="center">72.22</td>
<td valign="top" align="center">72.22 (42)</td>
<td valign="top" align="center">72.22 (38)</td>
<td valign="top" align="center">72.22 (39)</td>
<td valign="top" align="center"><bold>94.44</bold> (13)</td>
</tr>
<tr>
<td valign="top" align="left">P3.4</td>
<td valign="top" align="center">72.22</td>
<td valign="top" align="center">77.78</td>
<td valign="top" align="center">77.78 (35)</td>
<td valign="top" align="center"><bold>83.33 (24)</bold></td>
<td valign="top" align="center">77.78 (39)</td>
<td valign="top" align="center">77.78 (18)</td>
</tr>
<tr>
<td valign="top" align="left">P3.5</td>
<td valign="top" align="center">55.56</td>
<td valign="top" align="center">61.11</td>
<td valign="top" align="center">83.33 (42)</td>
<td valign="top" align="center"><bold>88.89 (23)</bold></td>
<td valign="top" align="center">77.78 (40)</td>
<td valign="top" align="center"><bold>88.89</bold> (14)</td>
</tr>
<tr>
<td valign="top" align="left">P3.6</td>
<td valign="top" align="center">44.44</td>
<td valign="top" align="center">44.44</td>
<td valign="top" align="center">72.22 (39)</td>
<td valign="top" align="center">72.22 (24)</td>
<td valign="top" align="center">83.33 (40)</td>
<td valign="top" align="center"><bold>88.89</bold> (40)</td>
</tr>
<tr>
<td valign="top" align="left">P3.7</td>
<td valign="top" align="center">66.67</td>
<td valign="top" align="center">77.78</td>
<td valign="top" align="center"><bold>83.33</bold> (41)</td>
<td valign="top" align="center"><bold>83.33</bold> (31)</td>
<td valign="top" align="center">72.22 (38)</td>
<td valign="top" align="center"><bold>83.33</bold> (15)</td>
</tr>
<tr>
<td valign="top" align="left">P3.8</td>
<td valign="top" align="center">72.22</td>
<td valign="top" align="center">77.78</td>
<td valign="top" align="center">72.22 (40)</td>
<td valign="top" align="center">77.78 (16)</td>
<td valign="top" align="center">77.78 (40)</td>
<td valign="top" align="center"><bold>88.89</bold> (13)</td>
</tr>
<tr>
<td valign="top" align="left">P3.9</td>
<td valign="top" align="center">61.11</td>
<td valign="top" align="center">66.67</td>
<td valign="top" align="center">77.78 (42)</td>
<td valign="top" align="center">83.33 (23)</td>
<td valign="top" align="center">77.78 (38)</td>
<td valign="top" align="center"><bold>88.89</bold> (15)</td>
</tr>
<tr>
<td valign="top" align="left">P3.10</td>
<td valign="top" align="center">72.22</td>
<td valign="top" align="center"><bold>94.44</bold></td>
<td valign="top" align="center">88.89 (42)</td>
<td valign="top" align="center">88.89 (32)</td>
<td valign="top" align="center">88.89 (41)</td>
<td valign="top" align="center"><bold>94.44</bold> (15)</td>
</tr>
<tr>
<td valign="top" align="left">P3.11</td>
<td valign="top" align="center">61.11</td>
<td valign="top" align="center">66.67</td>
<td valign="top" align="center">50.00 (36)</td>
<td valign="top" align="center">72.22 (20)</td>
<td valign="top" align="center">50.00 (40)</td>
<td valign="top" align="center"><bold>77.78</bold> (19)</td>
</tr>
<tr>
<td valign="top" align="left">P3.12</td>
<td valign="top" align="center">38.89</td>
<td valign="top" align="center">83.33</td>
<td valign="top" align="center">83.33 (40)</td>
<td valign="top" align="center">83.33 (24)</td>
<td valign="top" align="center">88.89 (39)</td>
<td valign="top" align="center"><bold>94.44</bold> (13)</td>
</tr>
<tr>
<td valign="top" align="left">Average</td>
<td valign="top" align="center">60.19</td>
<td valign="top" align="center">70.37</td>
<td valign="top" align="center">76.85 (39.75)</td>
<td valign="top" align="center">80.55 (26.17)</td>
<td valign="top" align="center">76.39 (39.67)</td>
<td valign="top" align="center"><bold>88.43</bold> (17.00)</td>
</tr>
<tr>
<td valign="top" align="left"><italic>p</italic>-value</td>
<td valign="top" align="center"><bold>0.002</bold></td>
<td valign="top" align="center"><bold>0.005</bold></td>
<td valign="top" align="center"><bold>0.005</bold></td>
<td valign="top" align="center"><bold>0.013</bold></td>
<td valign="top" align="center"><bold>0.003</bold></td>
<td valign="top" align="center">&#x2013;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p><italic>Pi.j represents the jth participant in the ith dataset. The number of selected channels is in parentheses. The highest classification accuracy of each participant of different methods is indicated in bold. p-value is the results of Wilcoxon signed-rank test. Set 1 includes F<sub>z</sub>, Cz, Pz, Oz, PO7, and PO8. Set 2 includes Fz, FCz, Cz, C3, C4, CPz, Pz, P3, P4, P7, P8, POz, PO3, PO4, PO7, PO8, Oz, O1, and O2.</italic></p></fn>
</table-wrap-foot>
</table-wrap>
<p>For DS1, RSBSBL selected the minimum number of channels when the classification accuracy of all the methods was 100%. For DS2, RSBSBL had the highest average accuracy, 97.50%, which was 1.00% higher than the second-ranked GLASSO. Although SBL selected fewer channels than others, the average recognition accuracy was 95.00%.</p>
<p>For DS3, RSBSBL as a channel selection method could bring higher accuracy with BLDA in 11 participants among 12 and got 88.43% average accuracy by eliminating insufficient data than using all channels. It outperformed the second-ranked GSBL on an average by 7.88% and selected the fewest channels as 17. We evaluated the significance of the classification performance of DS3 <italic>via</italic> the Wilcoxon signed-rank test and found that the proposed method performed significantly better than others (RSBSBL vs. LASSO: <italic>p</italic> = 0.005 &#x003C; 0.05; RSBSBL vs. GLASSO: <italic>p</italic> = 0.013 &#x003C; 0.05; RSBSBL vs. SBL: <italic>p</italic> = 0.003 &#x003C; 0.05).</p>
</sec>
<sec id="S4.SS2">
<title>Results of Selecting M Channels</title>
<p>To further compare the effectiveness of the four methods, we compared the recognition results of the algorithms when <italic>M</italic> channels were selected (<italic>M</italic> = [4, 8, 12, 16]). Top <italic>M</italic> channels were selected by ranking the corresponding channels according to the sum of the absolute values of the feature weights. The classifiers were retrained with the data with the selected channel. It was supposed that the number of channels <italic>M</italic>&#x2032; automatically selected by the method was less than the value of <italic>M</italic>. In that case, the latest deleted M-M&#x2032; channels are added according to the order in which they were deleted during the iteration of the method.</p>
<p><xref ref-type="fig" rid="F4">Figure 4</xref> shows the accuracy of each method on DS1, DS2, and DS3, with the horizontal coordinates of the bars indicating the selection of the top <italic>M</italic> channels. For DS1, the accuracy of all the methods was the same except that the accuracy of SBL was 96.77% when eight channels were selected, and it was lower than others. For DS2, SBL and RSBSBL obtained better performance with 80% average recognition accuracy when four channels were selected. When 8, 12, and 16 channels were selected, GLASSO obtained an average recognition accuracy of 78.5, 84.5, 91, and 92%, respectively, and RSBSBL obtained a better performance of 80, 85.5, 91.5, and 93.5%, respectively. For DS3, GLASSO obtained average recognition accuracy of 73.61, 75.93, 75.46, and 79.63% when 4, 8, 12, and 16 channels were selected, respectively. Moreover, RSBSBL obtained the best performance of 74.07, 82.87, 80.09, and 80.56%, respectively. The average recognition accuracies of LASSO, GLASSO, and RSBSBL on DS3 with <italic>M</italic> = 16 were 77.31, 79.63, and 80.56%, respectively. The results of experiments with the fixed number of selected channels revealed that the feature weights generated by RSBSBL could provide more reasonable guidelines for the channel selection.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>The average recognition accuracy of the four methods on DS1, DS2, and DS3 when <italic>M</italic> channels are selected, where <italic>M</italic> = [4, 8, 12, 16]. The error bars are the standard deviations for DS2 and DS3.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g004.tif"/>
</fig>
<p>We counted the selected channels at the same location and used it to describe the number of times a channel has been selected in the dataset. If 6 of the 12 participants&#x2019; selected channels contain Pz, then the contribution value of the channel corresponding to the Pz electrode is 6. <xref ref-type="fig" rid="F5">Figure 5</xref> indicates the scalp distributions of the contribution value of channels on DS1, DS2, and DS3. The color changes from red to blue, indicating that the channel was selected less often. As shown in <xref ref-type="fig" rid="F5">Figure 5</xref>, when the number of selected channels was small (<italic>M</italic> = 4, 8), RSBSBL selected the occipital and parietal electrodes more often. It shows that, in addition to the P300 potential, the early visual components also contribute to a classification in the paradigm (<xref ref-type="bibr" rid="B6">Blankertz et al., 2011</xref>).</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>The scalp distribution of the four methods on DS1, DS2, and DS3 when <italic>M</italic> channels are selected. The contribution value of each channel is equal to the sum of the selected numbers among all participants in the dataset. The color changes from red to blue, indicating that the channel is selected less often.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g005.tif"/>
</fig>
</sec>
<sec id="S4.SS3">
<title>Parameter Sensitivity</title>
<p>In RSBSBL, &#x03B3;<sub><italic>b</italic></sub> smaller than the threshold &#x03C4; was set to zero, indicating that &#x03C4; determines the pruning strength. We analyzed the change in the number of channels selected and the recognition results when &#x03C4; is assigned different values in the range 10<sup>&#x2212;8</sup> to 10<sup>&#x2212;1</sup>. The recognition accuracy of each participant varying with &#x03C4; was normalized to highlight the location of the optimal threshold. <xref ref-type="fig" rid="F6">Figure 6</xref> illustrates the effect of the threshold on the proposed method. The <italic>x</italic>-axis indicates the number of selected channels, the <italic>y</italic>-axis indicates the value of &#x03C4;, and the <italic>z</italic>-axis indicates the participant ID. The color changes from red to blue, indicating that the point corresponds to a higher to lower normalized accuracy.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>The effect of shear threshold &#x03C4; in RSBSBL on the number of selected channels and accuracy. The <italic>x</italic>-axis indicates the number of selected channels, the <italic>y</italic>-axis indicates the value of &#x03C4;, and the <italic>z</italic>-axis indicates the participant ID. The color of the sphere represents the normalized recognition accuracy for each participant with different thresholds.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g006.tif"/>
</fig>
<p>As shown in <xref ref-type="fig" rid="F6">Figure 6</xref>, the number of channels selected by each participant increased as the threshold value decreased. When the threshold was less than or equal to 10<sup>&#x2212;6</sup>, the number of selected channels was the original number in the dataset, and the algorithm loses the ability to select the channels automatically. Therefore, 10-fold cross-validation can be used to select the optimal parameter values in the range of 10<sup>&#x2212;6</sup> to 10<sup>&#x2212;1</sup>. From the curves corresponding to P3.2, P3.3, P3.7, and P3.12, using selected channels can obtain better recognition accuracy than using all the channels, which proves that channel selection can remove weak task-relevant and noisy channels to improve the classification accuracy.</p>
</sec>
</sec>
<sec id="S5" sec-type="discussion">
<title>Discussion</title>
<p>The experimental results on the three datasets illustrated that the proposed RSBSBL as a channel selection algorithm could automatically screen out effective channels and get the best overall performance among all the compared methods.</p>
<sec id="S5.SS1">
<title>Effectiveness of Channel Subsets</title>
<p><xref ref-type="bibr" rid="B14">Fabiani et al. (1987)</xref> confirmed that the visual P300 paradigm should at least include Fz, Cz, Pz electrodes signed as the 10&#x2013;20 international electrode system. <xref ref-type="bibr" rid="B26">Krusienski et al. (2008)</xref> and <xref ref-type="bibr" rid="B35">McCann et al. (2015)</xref> made sure that Fz, Cz, Pz, Oz, PO7, and PO8 corresponded to the parietal and occipital regions of the brain that take a significant part in the recognition of P300 signals. In <xref ref-type="table" rid="T2">Table 2</xref>, Set 1 and Set 2 represent two empirical channel subsets. Set 1 includes Fz, Cz, Pz, Oz, PO7, and PO8. Set 2 includes Fz, FCz, Cz, C3, C4, CPz, Pz, P3, P4, P7, P8, POz, PO3, PO4, PO7, PO8, Oz, O1, and O2. It can be seen that for many participants (P2.1, P2.2, P3.1, P3.2, P3.5, P3.6), the character recognition accuracy was lower when the empirical channel subsets were used. The empirical selection may not include some channels that contribute to the classification. The channels assumed to reflect visual components and also some frontal channels contribute to the classification for some participants. It also indicates the lower robustness of the empirical channel subset. In <xref ref-type="fig" rid="F5">Figure 5</xref>, the scalp mapped according to channel selection of RSBSBL could be observed with high values in Pz, P3, P4, O1, O2, Oz, PO7, PO8, and POz regions. These electrodes are very similar to the abovementioned electrodes, which are closely related to the visually induced ERPs. The P1, N1, and N2 components are mainly concentrated in the parietal and occipital regions. And the central distribution of P2 and P3 is elongated along the midline electrodes (<xref ref-type="bibr" rid="B6">Blankertz et al., 2011</xref>). It can be assumed that a multitude of ERP components is affected by attention to the target and utilized by classifiers rather than just the P300 (<xref ref-type="bibr" rid="B47">Treder and Blankertz, 2010</xref>). In addition, it can be found from <xref ref-type="fig" rid="F5">Figures 5</xref>, <xref ref-type="fig" rid="F6">6</xref> that many participants in DS3 had poorer classification using full-channel data compared to DS1 and DS2, and their topographic maps select more frontal channels when <italic>M</italic> = 8, 12,16. This phenomenon may be due to the effect of eye artifacts and noise during the experiment.</p>
</sec>
<sec id="S5.SS2">
<title>Character Recognition Performance</title>
<p><xref ref-type="table" rid="T2">Table 2</xref> and <xref ref-type="fig" rid="F3">Figure 3</xref> show the superiority of RSBSBL in channel selection. When the number of channels was determined automatically, the proposed method achieved the highest average recognition accuracy of 100, 97.5, and 88.43% for DS1, DS2, and DS3, with the lowest average number of channels on DS1 and DS3. The RSBSBL achieved better performance than the compared methods when selecting the channels with the fixed number, and the average accuracies of 90.21, 80, and 74.07% were obtained with the top four selected channels on the three datasets.</p>
<p>To verify the performance of RSBSBL, we compared the proposed method with the state-of-the-art developments in recent years on DS2, as shown in <xref ref-type="table" rid="T3">Table 3</xref>. Most of them are based on evolutionary computational algorithms (<xref ref-type="bibr" rid="B23">Kee et al., 2015</xref>; <xref ref-type="bibr" rid="B24">Khairullah et al., 2020</xref>; <xref ref-type="bibr" rid="B41">Tang et al., 2020</xref>; <xref ref-type="bibr" rid="B34">Martinez-Cagigal et al., 2022</xref>). The channel selection methods and classifiers used in each study are shown in the table.</p>
<table-wrap position="float" id="T3">
<label>TABLE 3</label>
<caption><p>Character recognition accuracy (%) of comparison with state-of-the-art results (DS2).</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left">Author</td>
<td valign="top" align="center">Channel selection method</td>
<td valign="top" align="center">Classification method</td>
<td valign="top" align="center">Accuracy</td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B23">Kee et al., 2015</xref></td>
<td valign="top" align="center">NSGA-II</td>
<td valign="top" align="center">BLDA</td>
<td valign="top" align="center">94.9%</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B24">Khairullah et al., 2020</xref></td>
<td valign="top" align="center">BPSO</td>
<td valign="top" align="center">Ensemble LDA</td>
<td valign="top" align="center">97.0%</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B41">Tang et al., 2020</xref></td>
<td valign="top" align="center">RF-GA</td>
<td valign="top" align="center">CNN</td>
<td valign="top" align="center">96.9%</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B34">Martinez-Cagigal et al., 2022</xref></td>
<td valign="top" align="center">BMOPSO</td>
<td valign="top" align="center">LDA</td>
<td valign="top" align="center">92.5%</td>
</tr>
<tr>
<td/>
<td valign="top" align="center">PEAIL</td>
<td valign="top" align="center">LDA</td>
<td valign="top" align="center">94.0%</td>
</tr>
<tr>
<td valign="top" align="left">Our method</td>
<td valign="top" align="center">RSBSBL</td>
<td valign="top" align="center">BLDA</td>
<td valign="top" align="center">97.5%</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p><italic>NSGA-II, Non-dominated sorting genetic algorithm II; BPSO, binary particle swarm optimization; GA, genetic algorithm; BMOPSO, binary multi-objective particle swarm optimization; PEAIL, Pareto Evolutionary Algorithm based on Incremental Learning.</italic></p></fn>
</table-wrap-foot>
</table-wrap>
<p>The shear threshold &#x03C4; significantly impacted the final results, so cross-validation was required to determine the optimal parameters. According to the analysis of parameter sensitivity, as shown in <xref ref-type="fig" rid="F6">Figure 6</xref>, the recommended threshold selection range was [10<sup>&#x2212;6</sup>, 10<sup>&#x2212;1</sup>]. Besides, <xref ref-type="fig" rid="F6">Figure 6</xref> reflects the variation of character recognition accuracy with the shear threshold for each participant. Compared with others, P3.2, P3.3, P3.9, P3.10, and P3.12 cannot achieve the best recognition accuracies with the full channels, which implies that the EEG signals of these participants have more channels with noise, and these channels are not conducive to signal classification. As shown in <xref ref-type="table" rid="T2">Table 2</xref>, when determining the number of channels automatically, RSBSBL can achieve the best recognition accuracies of them with the corresponding number of selected channels of 14, 13, 15, 15, and 13, respectively. It confirms that RSBSBL can remove unfavorable channels and improve the recognition accuracies.</p>
</sec>
<sec id="S5.SS3">
<title>Effectiveness of Regional Smoothing</title>
<p>To verify the effectiveness of regional smoothing, we conducted further controlled experiments on the three datasets, and the results are shown in <xref ref-type="table" rid="T4">Table 4</xref>. Case 1 represents that B is a unit matrix, implying that no temporal correlation is considered. Case 2 has the same B for all blocks, indicating that all channels share the same B. Case 3 has a different B matrix for each block, showing that regional smoothing is no longer done. The comparison between Case 3 and Case 1 in <xref ref-type="table" rid="T4">Table 4</xref> illustrates the improvement of the model due to temporal correlation. The comparison between our algorithm and Cases 3 and 1 indicates the improvement brought by region smoothing. The&#x201C;&#x002A;&#x201D; in <xref ref-type="table" rid="T4">Table 4</xref> represents a significant difference in our method after Wilcoxon signed-rank test (RSBSBL vs. Case 1: <italic>p</italic> = 0.015 &#x003C; 0.05; RSBSBL vs. Case 2: <italic>p</italic> = 0.031 &#x003C; 0.05; RSBSBL vs. Case 3: <italic>p</italic> = 0.124).</p>
<table-wrap position="float" id="T4">
<label>TABLE 4</label>
<caption><p>The average character recognition accuracy (%) (number of channels) comparisons on three datasets.</p></caption>
<table cellspacing="5" cellpadding="5" frame="hsides" rules="groups">
<thead>
<tr>
<td valign="top" align="left">Method</td>
<td valign="top" align="center">DS1</td>
<td valign="top" align="center">DS2</td>
<td valign="top" align="center">DS3</td>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Case 1</td>
<td valign="top" align="center">100.00 (30)</td>
<td valign="top" align="center">94.50 (35.50)</td>
<td valign="top" align="center">82.41 (17.75)<xref ref-type="table-fn" rid="t4fns1">&#x002A;</xref></td>
</tr>
<tr>
<td valign="top" align="left">Case 2</td>
<td valign="top" align="center">100.00 (27)</td>
<td valign="top" align="center">96.00 (36.50)</td>
<td valign="top" align="center">82.41 (18.67)<xref ref-type="table-fn" rid="t4fns1">&#x002A;</xref></td>
</tr>
<tr>
<td valign="top" align="left">Case 3</td>
<td valign="top" align="center">100.00 (27)</td>
<td valign="top" align="center">96.50 (47.50)</td>
<td valign="top" align="center">85.65 (16.33)</td>
</tr>
<tr>
<td valign="top" align="left">Our method</td>
<td valign="top" align="center">100.00 (29)</td>
<td valign="top" align="center">97.50 (44.50)</td>
<td valign="top" align="center">88.43 (17.00)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="t4fns1"><p><italic>Case 1: B is the unitary matrix. Case 2: All blocks have the same B. Case 3: The B of each block is different. &#x201C;&#x002A;&#x201D; represents a significant difference with our method after Wilcoxon signed-rank test (p &#x003C; 0.05).</italic></p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="S5.SS4">
<title>Time Costs and Limitations</title>
<p>As described in sections &#x201C;Data Descriptions&#x201D; and &#x201C;The Framework of Data Processing,&#x201D; for DS1 and DS2, <italic>N</italic><sub><italic>t</italic></sub> = 32 and <italic>N</italic><sub><italic>c</italic></sub> = 64 after pre-processing, then we can get a 1 &#x00D7; <italic>D</italic> (<italic>D</italic> = <italic>N</italic><sub><italic>t</italic></sub><italic>N</italic><sub><italic>c</italic></sub> = 2048) vector for each stimulus. As shown in <xref ref-type="table" rid="T1">Table 1</xref>, in the training datasets of DS1 and DS2, the total number of stimuli was 7,560 and 15,300, which is larger than the number of features <italic>D</italic>. For DS3, <italic>N</italic><sub><italic>t</italic></sub> = 30 and <italic>N</italic><sub><italic>c</italic></sub> = 59 after pre-processing, then the feature is a 1 &#x00D7; <italic>D</italic> (<italic>D</italic> = <italic>N</italic><sub><italic>t</italic></sub><italic>N</italic><sub><italic>c</italic></sub> = 1770) vector. In <xref ref-type="table" rid="T1">Table 1</xref>, in the training datasets of DS3, the total number of stimuli was 864, which is smaller than the number of its features.</p>
<p>In a preliminary study, we found that inappropriate iterations can make the algorithm to have a large time cost [e.g., using equations (10) and (11) on DS1 and DS2]. Therefore, a strategy of automatic selection of the iteration method is used to avoid this problem. In <xref ref-type="fig" rid="F7">Figure 7</xref>, we analyze the variation of the matrix inversion run-time when the size of the matrix increases (the matrix is a square matrix). In the left part, the horizontal axis represents the size of the square matrix. The vertical axis is the value after taking the logarithm of the time, and the actual time (s) is also indicated in the figure. It can be noticed that the time spent on matrix inversion is more than 1 s when the matrix size is larger than 3,000 &#x00D7; 3,000. Therefore, we consider that the method may not be suitable for data with numbers of features and samples larger than 3,000. Of course, this problem can be solved by reducing the number of features and optimizing the iteration steps. The right bar in <xref ref-type="fig" rid="F7">Figure 7</xref> indicates the average time cost of the proposed method on the three data sets, which is acceptable.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>Changes in the run-time (s) of matrix inversion when the size of the matrix increases. In the left part, the horizontal axis represents the size of the square array. The vertical axis is the value after taking the logarithm of the time. The bar chart represents the average time cost of the proposed method on the three datasets.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-16-875851-g007.tif"/>
</fig>
</sec>
<sec id="S5.SS5">
<title>Future Work</title>
<p>The sparse Bayesian algorithm can make the sparsity of the algorithm change by changing the prior distribution of <bold>w</bold> (<xref ref-type="bibr" rid="B44">Tipping, 2001</xref>). <xref ref-type="bibr" rid="B53">Zhang et al. (2015)</xref> used the Laplace distribution instead of the traditional Gaussian distribution for the classification of P300 signals using SBL. Therefore, RSBSBL can change the prior of the weights to make the sparsity stronger in the future, such as the Gamma distribution. The proposed method used the EM algorithm for iteration, and there is still room for improvement in the computational speed. In the future, we will also explore the suitability of the proposed method for other ERPs.</p>
</sec>
</sec>
<sec id="S6" sec-type="conclusion">
<title>Conclusion</title>
<p>This study proposed a novel channel selection method, namely RSBSBL, which improved the original BSBL and obtained the assigned sparse weights. While considering the temporal correlation of sampling points of the same channel, it exploits the spatial distribution characteristics of the electrodes so that channels in adjacent regions share a positive definite matrix to get regional smoothing. Also, we discussed the efficiency of RSBSBL in the channel selection and design an automatic selection iteration strategy model to reduce the time cost caused by the inverse operation of the large-size matrix. The experimental results on three datasets indicate that RSBSBL can select appropriate channels, leading to high recognition accuracy. We will conduct future studies to improve the robustness of this algorithm.</p>
</sec>
<sec id="S7" sec-type="data-availability">
<title>Data Availability Statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="S8">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by the Ethics Committee of East China University of Science and Technology. The patients/participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec id="S9">
<title>Author Contributions</title>
<p>XZ was the main author to raise the idea of the manuscript, designed the experimental procedure, and collected the original dataset. JJ made effective suggestions on the manuscript&#x2019;s structure and provided the experimental site. RX has embellished the language of the manuscript and made key suggestions. SL and HS were involved in revising the manuscript&#x2019;s results section. XW and AC provided inputs for optimizing the data processing flow. All authors contributed to the manuscript revision, and read and approved the submitted version.</p>
</sec>
<sec id="conf1" sec-type="COI-statement">
<title>Conflict of Interest</title>
<p>RX is employed by the company g.tec medical engineering GmbH. The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="pudiscl1" sec-type="disclaimer">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<sec id="S10" sec-type="funding-information">
<title>Funding</title>
<p>This work was supported by the Science and Technology Innovation 2030 Major Projects 2022ZD0208900 and the Grant National Natural Science Foundation of China under Grant 62176090; in part by Shanghai Municipal Science and Technology Major Project under Grant 2021SHZDZX; in part by the Program of Introducing Talents of Discipline to Universities through the 111 Project under Grant B17017; in part by the Shu Guang Project supported by the Shanghai Municipal Education Commission and the Shanghai Education Development Foundation under Grant 19SG25; in part by the Ministry of Education and Science of the Russian Federation under Grant 14.756.31.0001, and in part by the Polish National Science Center under Grant UMO-2016/20/W/NZ4/00354.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alotaiby</surname> <given-names>T.</given-names></name> <name><surname>Abd El-Samie</surname> <given-names>F. E.</given-names></name> <name><surname>Alshebeili</surname> <given-names>S. A.</given-names></name> <name><surname>Ahmad</surname> <given-names>I.</given-names></name></person-group> (<year>2015</year>). <article-title>A review of channel selection algorithms for EEG signal processing.</article-title> <source><italic>EURASIP J. Adv. Signal Process</italic></source> <volume>2015</volume> <fpage>1</fpage>&#x2013;<lpage>21</lpage>.</citation></ref>
<ref id="B2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ando</surname> <given-names>M.</given-names></name> <name><surname>Nobukawa</surname> <given-names>S.</given-names></name> <name><surname>Kikuchi</surname> <given-names>M.</given-names></name> <name><surname>Takahashi</surname> <given-names>T.</given-names></name></person-group> (<year>2021</year>). <article-title>Identification of Electroencephalogram Signals in Alzheimer&#x2019;s Disease by Multifractal and Multiscale Entropy Analysis.</article-title> <source><italic>Front. Neurosci.</italic></source> <volume>15</volume>:<fpage>667614</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2021.667614</pub-id> <pub-id pub-id-type="pmid">34262427</pub-id></citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arican</surname> <given-names>M.</given-names></name> <name><surname>Polat</surname> <given-names>K.</given-names></name></person-group> (<year>2019</year>). &#x201C;<article-title>&#x201C;Comparison of the Performances of Selected EEG Electrodes with Optimization Algorithms in P300 Based Speller Systems,&#x201D;</article-title>,&#x201D; in <source><italic>in 2019 Scientific Meeting on Electrical-Electronics &#x0026; Biomedical Engineering and Computer Science (EBBT)</italic></source>, (<publisher-loc>Piscataway</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>4</lpage>.</citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arvaneh</surname> <given-names>M.</given-names></name> <name><surname>Robertson</surname> <given-names>I. H.</given-names></name> <name><surname>Ward</surname> <given-names>T. E.</given-names></name></person-group> (<year>2019</year>). <article-title>A p300-based brain-computer interface for improving attention.</article-title> <source><italic>Front. Hum. Neurosci.</italic></source> <volume>12</volume>:<fpage>524</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2018.00524</pub-id> <pub-id pub-id-type="pmid">30662400</pub-id></citation></ref>
<ref id="B5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bashashati</surname> <given-names>A.</given-names></name> <name><surname>Fatourechi</surname> <given-names>M.</given-names></name> <name><surname>Ward</surname> <given-names>R. K.</given-names></name> <name><surname>Birch</surname> <given-names>G. E.</given-names></name></person-group> (<year>2007</year>). <article-title>A survey of signal processing algorithms in brain-computer interfaces based on electrical brain signals.</article-title> <source><italic>J. Neural Eng.</italic></source> <volume>4</volume>:<fpage>R32</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2560/4/2/R03</pub-id></citation></ref>
<ref id="B6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>Lemm</surname> <given-names>S.</given-names></name> <name><surname>Treder</surname> <given-names>M.</given-names></name> <name><surname>Haufe</surname> <given-names>S.</given-names></name> <name><surname>M&#x00FC;ller</surname> <given-names>K.-R.</given-names></name></person-group> (<year>2011</year>). <article-title>Single-trial analysis and classification of ERP components&#x2014;a tutorial.</article-title> <source><italic>Neuroimage</italic></source> <volume>56</volume> <fpage>814</fpage>&#x2013;<lpage>825</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.06.048</pub-id> <pub-id pub-id-type="pmid">20600976</pub-id></citation></ref>
<ref id="B7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>Muller</surname> <given-names>K.-R.</given-names></name> <name><surname>Curio</surname> <given-names>G.</given-names></name> <name><surname>Vaughan</surname> <given-names>T. M.</given-names></name> <name><surname>Schalk</surname> <given-names>G.</given-names></name> <name><surname>Wolpaw</surname> <given-names>J. R.</given-names></name><etal/></person-group> (<year>2004</year>). <article-title>The BCI competition 2003: progress and perspectives in detection and discrimination of EEG single trials.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>51</volume> <fpage>1044</fpage>&#x2013;<lpage>1051</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2004.826692</pub-id> <pub-id pub-id-type="pmid">15188876</pub-id></citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>Muller</surname> <given-names>K.-R.</given-names></name> <name><surname>Krusienski</surname> <given-names>D. J.</given-names></name> <name><surname>Schalk</surname> <given-names>G.</given-names></name> <name><surname>Wolpaw</surname> <given-names>J. R.</given-names></name> <name><surname>Schlogl</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2006</year>). <article-title>The BCI competition III: validating alternative approaches to actual BCI problems.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>14</volume> <fpage>153</fpage>&#x2013;<lpage>159</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2006.875642</pub-id> <pub-id pub-id-type="pmid">16792282</pub-id></citation></ref>
<ref id="B9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cecotti</surname> <given-names>H.</given-names></name> <name><surname>Graser</surname> <given-names>A.</given-names></name></person-group> (<year>2010</year>). <article-title>Convolutional neural networks for P300 detection with application to brain-computer interfaces.</article-title> <source><italic>IEEE Transac. Pattern Anal. Mach. Intell.</italic></source> <volume>33</volume> <fpage>433</fpage>&#x2013;<lpage>445</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2010.125</pub-id> <pub-id pub-id-type="pmid">20567055</pub-id></citation></ref>
<ref id="B10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cecotti</surname> <given-names>H.</given-names></name> <name><surname>Rivet</surname> <given-names>B.</given-names></name> <name><surname>Congedo</surname> <given-names>M.</given-names></name> <name><surname>Jutten</surname> <given-names>C.</given-names></name> <name><surname>Bertrand</surname> <given-names>O.</given-names></name> <name><surname>Maby</surname> <given-names>E.</given-names></name><etal/></person-group> (<year>2011</year>). <article-title>A robust sensor-selection method for P300 brain-computer interfaces.</article-title> <source><italic>J. Neural Eng.</italic></source> <volume>8</volume>:<fpage>16001</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2560/8/1/016001</pub-id></citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deng</surname> <given-names>X.</given-names></name> <name><surname>Yu</surname> <given-names>Z. L.</given-names></name> <name><surname>Lin</surname> <given-names>C.</given-names></name> <name><surname>Gu</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>A bayesian shared control approach for wheelchair robot with brain machine interface.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>28</volume> <fpage>328</fpage>&#x2013;<lpage>338</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2019.2958076</pub-id> <pub-id pub-id-type="pmid">31825869</pub-id></citation></ref>
<ref id="B12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dey</surname> <given-names>M. R.</given-names></name> <name><surname>Shiraz</surname> <given-names>A.</given-names></name> <name><surname>Sharif</surname> <given-names>S.</given-names></name> <name><surname>Lota</surname> <given-names>J.</given-names></name> <name><surname>Demosthenous</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Dictionary selection for compressed sensing of EEG signals using sparse binary matrix and spatiotemporal sparse Bayesian learning.</article-title> <source><italic>Biomed. Phys. Eng. Express</italic></source> <volume>6</volume>:<fpage>65024</fpage>. <pub-id pub-id-type="doi">10.1088/2057-1976/abc133</pub-id> <pub-id pub-id-type="pmid">35093940</pub-id></citation></ref>
<ref id="B13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Donchin</surname> <given-names>E.</given-names></name> <name><surname>Spencer</surname> <given-names>K. M.</given-names></name> <name><surname>Wijesinghe</surname> <given-names>R.</given-names></name></person-group> (<year>2000</year>). <article-title>The mental prosthesis: assessing the speed of a P300-based brain-computer interface.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>8</volume> <fpage>174</fpage>&#x2013;<lpage>179</lpage>. <pub-id pub-id-type="doi">10.1109/86.847808</pub-id></citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fabiani</surname> <given-names>M.</given-names></name> <name><surname>Gratton</surname> <given-names>G.</given-names></name> <name><surname>Karis</surname> <given-names>D.</given-names></name> <name><surname>Donchin</surname> <given-names>E.</given-names></name></person-group> (<year>1987</year>). <article-title>Definition, identification, and reliability of measurement of the P300 component of the event-related brain potential.</article-title> <source><italic>Adv. Psychophysiol.</italic></source> <volume>2</volume>:<fpage>78</fpage>.</citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hammer</surname> <given-names>E. M.</given-names></name> <name><surname>Halder</surname> <given-names>S.</given-names></name> <name><surname>Kleih</surname> <given-names>S. C.</given-names></name> <name><surname>K&#x00FC;bler</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Psychological predictors of visual and auditory P300 brain-computer interface performance.</article-title> <source><italic>Front. Neurosci.</italic></source> <volume>12</volume>:<fpage>307</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2018.00307</pub-id> <pub-id pub-id-type="pmid">29867319</pub-id></citation></ref>
<ref id="B16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hassan</surname> <given-names>M.</given-names></name> <name><surname>Wendling</surname> <given-names>F.</given-names></name></person-group> (<year>2018</year>). <article-title>Electroencephalography Source Connectivity: aiming for High Resolution of Brain Networks in Time and Space.</article-title> <source><italic>IEEE Signal Proc. Magaz.</italic></source> <volume>35</volume> <fpage>81</fpage>&#x2013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1109/MSP.2017.2777518</pub-id></citation></ref>
<ref id="B17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>S.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Yu</surname> <given-names>T.</given-names></name> <name><surname>Zhang</surname> <given-names>R.</given-names></name> <name><surname>Huang</surname> <given-names>Q.</given-names></name> <name><surname>Chuai</surname> <given-names>L.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>EEG-and EOG-based asynchronous hybrid BCI: a system integrating a speller, a web browser, an e-mail client, and a file explorer.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>28</volume> <fpage>519</fpage>&#x2013;<lpage>530</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2019.2961309</pub-id> <pub-id pub-id-type="pmid">31870987</pub-id></citation></ref>
<ref id="B18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoffmann</surname> <given-names>U.</given-names></name> <name><surname>Vesin</surname> <given-names>J.-M.</given-names></name> <name><surname>Ebrahimi</surname> <given-names>T.</given-names></name> <name><surname>Diserens</surname> <given-names>K.</given-names></name></person-group> (<year>2008a</year>). <article-title>An efficient P300-based brain-computer interface for disabled subjects.</article-title> <source><italic>J. Neurosci. Methods</italic></source> <volume>167</volume> <fpage>115</fpage>&#x2013;<lpage>125</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2007.03.005</pub-id> <pub-id pub-id-type="pmid">17445904</pub-id></citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoffmann</surname> <given-names>U.</given-names></name> <name><surname>Yazdani</surname> <given-names>A.</given-names></name> <name><surname>Vesin</surname> <given-names>J.-M.</given-names></name> <name><surname>Ebrahimi</surname> <given-names>T.</given-names></name></person-group> (<year>2008b</year>). &#x201C;<article-title>Bayesian feature selection applied in a P300 brain-computer interface,&#x201D;</article-title>,&#x201D; in <source><italic>2008 16th European Signal Processing Conference</italic></source>, (<publisher-loc>Piscataway</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</citation></ref>
<ref id="B20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>Z.</given-names></name> <name><surname>Guo</surname> <given-names>J.</given-names></name> <name><surname>Zheng</surname> <given-names>W.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Lin</surname> <given-names>Z.</given-names></name> <name><surname>Zheng</surname> <given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>A Calibration-free Approach to Implementing P300-based Brain-computer Interface.</article-title> <source><italic>Cogn. Comput.</italic></source> <volume>14</volume> <fpage>887</fpage>&#x2013;<lpage>899</lpage>.</citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Daly</surname> <given-names>I.</given-names></name> <name><surname>Miao</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>The study of generic model set for reducing calibration time in P300-based brain-computer interface.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>28</volume> <fpage>3</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2019.2956488</pub-id> <pub-id pub-id-type="pmid">31794401</pub-id></citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>J.</given-names></name> <name><surname>Sellers</surname> <given-names>E. W.</given-names></name> <name><surname>Zhou</surname> <given-names>S.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Cichocki</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>A P300 brain-computer interface based on a modification of the mismatch negativity paradigm.</article-title> <source><italic>Int. J. Neural Syst.</italic></source> <volume>25</volume>:<fpage>1550011</fpage>. <pub-id pub-id-type="doi">10.1142/S0129065715500112</pub-id> <pub-id pub-id-type="pmid">25804352</pub-id></citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kee</surname> <given-names>C.-Y.</given-names></name> <name><surname>Ponnambalam</surname> <given-names>S. G.</given-names></name> <name><surname>Loo</surname> <given-names>C.-K.</given-names></name></person-group> (<year>2015</year>). <article-title>Multi-objective genetic algorithm as channel selection method for P300 and motor imagery data set.</article-title> <source><italic>Neurocomputing</italic></source> <volume>161</volume> <fpage>120</fpage>&#x2013;<lpage>131</lpage>.</citation></ref>
<ref id="B24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khairullah</surname> <given-names>E.</given-names></name> <name><surname>Arican</surname> <given-names>M.</given-names></name> <name><surname>Polat</surname> <given-names>K.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain-computer interface speller system design from electroencephalogram signals with channel selection algorithms.</article-title> <source><italic>Med. Hypothes.</italic></source> <volume>141</volume>:<fpage>109690</fpage>. <pub-id pub-id-type="doi">10.1016/j.mehy.2020.109690</pub-id> <pub-id pub-id-type="pmid">32278892</pub-id></citation></ref>
<ref id="B25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>K. T.</given-names></name> <name><surname>Suk</surname> <given-names>H. I.</given-names></name> <name><surname>Lee</surname> <given-names>S. W.</given-names></name></person-group> (<year>2016</year>). <article-title>Commanding a brain-controlled wheelchair using steady-state somatosensory evoked potentials.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>26</volume> <fpage>654</fpage>&#x2013;<lpage>665</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2016.2597854</pub-id> <pub-id pub-id-type="pmid">27514060</pub-id></citation></ref>
<ref id="B26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krusienski</surname> <given-names>D. J.</given-names></name> <name><surname>Sellers</surname> <given-names>E. W.</given-names></name> <name><surname>McFarland</surname> <given-names>D. J.</given-names></name> <name><surname>Vaughan</surname> <given-names>T. M.</given-names></name> <name><surname>Wolpaw</surname> <given-names>J. R.</given-names></name></person-group> (<year>2008</year>). <article-title>Toward enhanced P300 speller performance.</article-title> <source><italic>J. Neurosci. Methods</italic></source> <volume>167</volume> <fpage>15</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2007.07.017</pub-id> <pub-id pub-id-type="pmid">17822777</pub-id></citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lal</surname> <given-names>T. N.</given-names></name> <name><surname>Schroder</surname> <given-names>M.</given-names></name> <name><surname>Hinterberger</surname> <given-names>T.</given-names></name> <name><surname>Weston</surname> <given-names>J.</given-names></name> <name><surname>Bogdan</surname> <given-names>M.</given-names></name> <name><surname>Birbaumer</surname> <given-names>N.</given-names></name><etal/></person-group> (<year>2004</year>). <article-title>Support vector channel selection in BCI.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>51</volume> <fpage>1003</fpage>&#x2013;<lpage>1010</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2004.827827</pub-id> <pub-id pub-id-type="pmid">15188871</pub-id></citation></ref>
<ref id="B28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lei</surname> <given-names>X.</given-names></name> <name><surname>Yang</surname> <given-names>P.</given-names></name> <name><surname>Yao</surname> <given-names>D.</given-names></name></person-group> (<year>2009</year>). <article-title>An empirical Bayesian framework for brain-computer interfaces.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>17</volume> <fpage>521</fpage>&#x2013;<lpage>529</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2009.2027705</pub-id> <pub-id pub-id-type="pmid">19622442</pub-id></citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>H.</given-names></name> <name><surname>Yu</surname> <given-names>L.</given-names></name></person-group> (<year>2005</year>). <article-title>Toward integrating feature selection algorithms for classification and clustering.</article-title> <source><italic>IEEE Trans. Knowledge Data Eng.</italic></source> <volume>17</volume> <fpage>491</fpage>&#x2013;<lpage>502</lpage>. <pub-id pub-id-type="doi">10.1109/TCBB.2009.6</pub-id> <pub-id pub-id-type="pmid">21233523</pub-id></citation></ref>
<ref id="B30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lopez-Calderon</surname> <given-names>J.</given-names></name> <name><surname>Luck</surname> <given-names>S. J.</given-names></name></person-group> (<year>2014</year>). <article-title>ERPLAB: an open-source toolbox for the analysis of event-related potentials.</article-title> <source><italic>Front. Hum. Neurosci.</italic></source> <volume>8</volume>:<fpage>213</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2014.00213</pub-id> <pub-id pub-id-type="pmid">24782741</pub-id></citation></ref>
<ref id="B31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maest&#x00FA;</surname> <given-names>F.</given-names></name> <name><surname>Cuesta</surname> <given-names>P.</given-names></name> <name><surname>Hasan</surname> <given-names>O.</given-names></name> <name><surname>Fernand&#x00E9;z</surname> <given-names>A.</given-names></name> <name><surname>Funke</surname> <given-names>M.</given-names></name> <name><surname>Schulz</surname> <given-names>P. E.</given-names></name></person-group> (<year>2019</year>). <article-title>The Importance of the Validation of M/EEG With Current Biomarkers in Alzheimer&#x2019;s Disease.</article-title> <source><italic>Front. Hum. Neurosci.</italic></source> <volume>13</volume>:<fpage>17</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2019.00017</pub-id> <pub-id pub-id-type="pmid">30792632</pub-id></citation></ref>
<ref id="B32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Manyakov</surname> <given-names>N. V.</given-names></name> <name><surname>Chumerin</surname> <given-names>N.</given-names></name> <name><surname>Combaz</surname> <given-names>A.</given-names></name> <name><surname>van Hulle</surname> <given-names>M. M.</given-names></name></person-group> (<year>2011</year>). <article-title>Comparison of classification methods for P300 brain-computer interface on disabled subjects.</article-title> <source><italic>Comput. Intell. Neurosci.</italic></source> <volume>2011</volume> <fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1155/2011/519868</pub-id> <pub-id pub-id-type="pmid">21941530</pub-id></citation></ref>
<ref id="B33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martinez-Cagigal</surname> <given-names>V.</given-names></name> <name><surname>Hornero</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>Binary Bees Algorithm for P300-Based Brain-Computer Interfaces Channel Selection</article-title>,&#x201D; in <source><italic>Advances in Computational Intelligence. IWANN 2017. Lecture Notes in Computer Science()</italic></source>, <volume>Vol. 10306</volume> <role>eds</role> <person-group person-group-type="editor"><name><surname>Rojas</surname> <given-names>I.</given-names></name> <name><surname>Joya</surname> <given-names>G.</given-names></name> <name><surname>Catala</surname> <given-names>A.</given-names></name></person-group> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>).</citation></ref>
<ref id="B34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martinez-Cagigal</surname> <given-names>V.</given-names></name> <name><surname>Santamar&#x00ED;a-V&#x00E1;zquez</surname> <given-names>E.</given-names></name> <name><surname>Hornero</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Brain-computer interface channel selection optimization using meta-heuristics and evolutionary algorithms.</article-title> <source><italic>Mart&#x00ED;nez-Cagigal. Appl. Soft Comput.</italic></source> <volume>115</volume>:<fpage>108176</fpage>.</citation></ref>
<ref id="B35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McCann</surname> <given-names>M. T.</given-names></name> <name><surname>Thompson</surname> <given-names>D. E.</given-names></name> <name><surname>Syed</surname> <given-names>Z. H.</given-names></name> <name><surname>Huggins</surname> <given-names>J. E.</given-names></name></person-group> (<year>2015</year>). <article-title>Electrode subset selection methods for an EEG-based P300 brain-computer interface.</article-title> <source><italic>Disabil. Rehabil.-Assist. Technol.</italic></source> <volume>10</volume> <fpage>216</fpage>&#x2013;<lpage>220</lpage>. <pub-id pub-id-type="doi">10.3109/17483107.2014.884174</pub-id> <pub-id pub-id-type="pmid">24506528</pub-id></citation></ref>
<ref id="B36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nakanishi</surname> <given-names>M.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>Y.-T.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name> <name><surname>Jung</surname> <given-names>T.-P.</given-names></name></person-group> (<year>2017</year>). <article-title>Enhancing detection of SSVEPs for a high-speed brain speller using task-related component analysis.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>65</volume> <fpage>104</fpage>&#x2013;<lpage>112</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2017.2694818</pub-id> <pub-id pub-id-type="pmid">28436836</pub-id></citation></ref>
<ref id="B37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Padfield</surname> <given-names>N.</given-names></name> <name><surname>Zabalza</surname> <given-names>J.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Masero</surname> <given-names>V.</given-names></name> <name><surname>Ren</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>EEG-based brain-computer interfaces using motor-imagery: techniques and challenges.</article-title> <source><italic>Sensors</italic></source> <volume>19</volume>:<fpage>1423</fpage>. <pub-id pub-id-type="doi">10.3390/s19061423</pub-id> <pub-id pub-id-type="pmid">30909489</pub-id></citation></ref>
<ref id="B38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rakotomamonjy</surname> <given-names>A.</given-names></name> <name><surname>Guigue</surname> <given-names>V.</given-names></name></person-group> (<year>2008</year>). <article-title>BCI competition III: dataset II-ensemble of SVMs for BCI P300 speller.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>55</volume> <fpage>1147</fpage>&#x2013;<lpage>1154</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2008.915728</pub-id> <pub-id pub-id-type="pmid">18334407</pub-id></citation></ref>
<ref id="B39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reichert</surname> <given-names>C.</given-names></name> <name><surname>D&#x00FC;rschmid</surname> <given-names>S.</given-names></name> <name><surname>Heinze</surname> <given-names>H.-J.</given-names></name> <name><surname>Hinrichs</surname> <given-names>H.</given-names></name></person-group> (<year>2017</year>). <article-title>A comparative study on the detection of covert attention in event-related EEG and MEG signals to control a BCI.</article-title> <source><italic>Front. Neurosci.</italic></source> <volume>11</volume>:<fpage>575</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2017.00575</pub-id> <pub-id pub-id-type="pmid">29085279</pub-id></citation></ref>
<ref id="B40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sorbello</surname> <given-names>R.</given-names></name> <name><surname>Tramonte</surname> <given-names>S.</given-names></name> <name><surname>Giardina</surname> <given-names>M. E.</given-names></name> <name><surname>La Bella</surname> <given-names>V.</given-names></name> <name><surname>Spataro</surname> <given-names>R.</given-names></name> <name><surname>Allison</surname> <given-names>B.</given-names></name><etal/></person-group> (<year>2017</year>). <article-title>A human-humanoid interaction through the use of BCI for locked-in ALS patients using neuro-biological feedback fusion.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>26</volume> <fpage>487</fpage>&#x2013;<lpage>497</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2017.2728140</pub-id> <pub-id pub-id-type="pmid">28727554</pub-id></citation></ref>
<ref id="B41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>C.</given-names></name> <name><surname>Xu</surname> <given-names>T.</given-names></name> <name><surname>Chen</surname> <given-names>P.</given-names></name> <name><surname>He</surname> <given-names>Y.</given-names></name> <name><surname>Bezerianos</surname> <given-names>A.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>&#x201C;A Channel Selection Method for Event Related Potential Detection based on Random Forest and Genetic Algorithm,&#x201D;</article-title>,&#x201D; in <source><italic>2020 Chinese Automation Congress (CAC), 5419&#x2013;5424</italic></source>, (<publisher-loc>Piscataway</publisher-loc>: <publisher-name>IEEE</publisher-name>).</citation></ref>
<ref id="B42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tekgul</surname> <given-names>H.</given-names></name> <name><surname>Bourgeois</surname> <given-names>B. F. D.</given-names></name> <name><surname>Gauvreau</surname> <given-names>K.</given-names></name> <name><surname>Bergin</surname> <given-names>A. M.</given-names></name></person-group> (<year>2005</year>). <article-title>Electroencephalography in neonatal seizures: comparison of a reduced and a full 10/20 montage.</article-title> <source><italic>Pediatr. Neurol.</italic></source> <volume>32</volume> <fpage>155</fpage>&#x2013;<lpage>161</lpage>. <pub-id pub-id-type="doi">10.1016/j.pediatrneurol.2004.09.014</pub-id> <pub-id pub-id-type="pmid">15730894</pub-id></citation></ref>
<ref id="B43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tibshirani</surname> <given-names>R.</given-names></name></person-group> (<year>1996</year>). <article-title>Regression shrinkage and selection <italic>via</italic> the lasso.</article-title> <source><italic>J. R. Stat. Soc. Ser. B-Stat. Methodol.</italic></source> <volume>58</volume> <fpage>267</fpage>&#x2013;<lpage>288</lpage>.</citation></ref>
<ref id="B44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tipping</surname> <given-names>M. E.</given-names></name></person-group> (<year>2001</year>). <article-title>Sparse Bayesian learning and the relevance vector machine.</article-title> <source><italic>J. Mach. Learn. Res.</italic></source> <volume>1</volume> <fpage>211</fpage>&#x2013;<lpage>244</lpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2008.05.002</pub-id> <pub-id pub-id-type="pmid">18562039</pub-id></citation></ref>
<ref id="B45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tomioka</surname> <given-names>R.</given-names></name> <name><surname>M&#x00FC;ller</surname> <given-names>K.-R.</given-names></name></person-group> (<year>2010</year>). <article-title>A regularized discriminative framework for EEG analysis with application to brain-computer interface.</article-title> <source><italic>Neuroimage</italic></source> <volume>49</volume> <fpage>415</fpage>&#x2013;<lpage>432</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2009.07.045</pub-id> <pub-id pub-id-type="pmid">19646534</pub-id></citation></ref>
<ref id="B46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Townsend</surname> <given-names>G.</given-names></name> <name><surname>LaPallo</surname> <given-names>B. K.</given-names></name> <name><surname>Boulay</surname> <given-names>C. B.</given-names></name> <name><surname>Krusienski</surname> <given-names>D. J.</given-names></name> <name><surname>Frye</surname> <given-names>G. E.</given-names></name> <name><surname>Hauser</surname> <given-names>C.</given-names></name><etal/></person-group> (<year>2010</year>). <article-title>A novel P300-based brain-computer interface stimulus presentation paradigm: moving beyond rows and columns.</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>121</volume> <fpage>1109</fpage>&#x2013;<lpage>1120</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2010.01.030</pub-id> <pub-id pub-id-type="pmid">20347387</pub-id></citation></ref>
<ref id="B47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Treder</surname> <given-names>M. S.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name></person-group> (<year>2010</year>). <article-title>(C) overt attention and visual speller design in an ERP-based brain-computer interface.</article-title> <source><italic>Behav. Brain Funct.</italic></source> <volume>6</volume> <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1186/1744-9081-6-28</pub-id> <pub-id pub-id-type="pmid">20509913</pub-id></citation></ref>
<ref id="B48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wolpaw</surname> <given-names>J. R.</given-names></name> <name><surname>Birbaumer</surname> <given-names>N.</given-names></name> <name><surname>McFarland</surname> <given-names>D. J.</given-names></name> <name><surname>Pfurtscheller</surname> <given-names>G.</given-names></name> <name><surname>Vaughan</surname> <given-names>T. M.</given-names></name></person-group> (<year>2002</year>). <article-title>Brain-computer interfaces for communication and control.</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>113</volume> <fpage>767</fpage>&#x2013;<lpage>791</lpage>.</citation></ref>
<ref id="B49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>W.</given-names></name> <name><surname>Wu</surname> <given-names>C.</given-names></name> <name><surname>Gao</surname> <given-names>S.</given-names></name> <name><surname>Liu</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name></person-group> (<year>2014</year>). <article-title>Bayesian estimation of ERP components from multicondition and multichannel EEG.</article-title> <source><italic>Neuroimage</italic></source> <volume>88</volume> <fpage>319</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.11.028</pub-id> <pub-id pub-id-type="pmid">24333395</pub-id></citation></ref>
<ref id="B50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yin</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>K.</given-names></name> <name><surname>Tong</surname> <given-names>T.</given-names></name> <name><surname>An</surname> <given-names>Y.</given-names></name> <name><surname>Meng</surname> <given-names>H.</given-names></name> <name><surname>Yang</surname> <given-names>X.</given-names></name><etal/></person-group> (<year>2020</year>). <article-title>Improved Block Sparse Bayesian Learning Method Using K-Nearest Neighbor Strategy for Accurate Tumor Morphology Reconstruction in Bioluminescence Tomography.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>67</volume> <fpage>2023</fpage>&#x2013;<lpage>2032</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2019.2953732</pub-id> <pub-id pub-id-type="pmid">31751214</pub-id></citation></ref>
<ref id="B51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yuan</surname> <given-names>M.</given-names></name> <name><surname>Lin</surname> <given-names>Y.</given-names></name></person-group> (<year>2006</year>). <article-title>Model selection and estimation in regression with grouped variables.</article-title> <source><italic>J. R. Stat. Soc. Ser. B</italic></source> <volume>68</volume> <fpage>49</fpage>&#x2013;<lpage>67</lpage>.</citation></ref>
<ref id="B52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Jin</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name></person-group> (<year>2017</year>). <article-title>Sparse Bayesian learning for obtaining sparsity of EEG frequency bands based feature vectors in motor imagery classification.</article-title> <source><italic>Int. J. Neural Syst.</italic></source> <volume>27</volume>:<fpage>1650032</fpage>.</citation></ref>
<ref id="B53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>G.</given-names></name> <name><surname>Jin</surname> <given-names>J.</given-names></name> <name><surname>Zhao</surname> <given-names>Q.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Cichocki</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Sparse Bayesian classification of EEG for brain-computer interface.</article-title> <source><italic>IEEE Trans. Neural Syst. Rehabil. Eng.</italic></source> <volume>27</volume> <fpage>2256</fpage>&#x2013;<lpage>2267</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2015.2476656</pub-id> <pub-id pub-id-type="pmid">26415189</pub-id></citation></ref>
<ref id="B54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Jung</surname> <given-names>T.-P.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name> <name><surname>Rao</surname> <given-names>B. D.</given-names></name></person-group> (<year>2013</year>). <article-title>Compressed Sensing for Energy-Efficient Wireless Telemonitoring of Noninvasive Fetal ECG <italic>Via</italic> Block Sparse Bayesian Learning.</article-title> <source><italic>IEEE Trans. Biomed. Eng.</italic></source> <volume>60</volume> <fpage>300</fpage>&#x2013;<lpage>309</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2012.2226175</pub-id> <pub-id pub-id-type="pmid">23144028</pub-id></citation></ref>
<ref id="B55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Rao</surname> <given-names>B. D.</given-names></name></person-group> (<year>2011</year>). <article-title>Sparse signal recovery with temporally correlated source vectors using sparse Bayesian learning.</article-title> <source><italic>IEEE J. Sel. Top. Signal Process.</italic></source> <volume>5</volume> <fpage>912</fpage>&#x2013;<lpage>926</lpage>.</citation></ref>
</ref-list>
</back>
</article>
