<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2021.788258</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Feedback Related Potentials for EEG-Based Typing Systems</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Gonzalez-Navarro</surname> <given-names>Paula</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1628186/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Celik</surname> <given-names>Basak</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x02020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1249642/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Moghadamfalahi</surname> <given-names>Mohammad</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x02020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1628418/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Akcakaya</surname> <given-names>Murat</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/810494/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Fried-Oken</surname> <given-names>Melanie</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1083668/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Erdo&#x0011F;mu&#x0015F;</surname> <given-names>Deniz</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c003"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1130523/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Cognitive Systems Laboratory, Northeastern University</institution>, <addr-line>Boston, MA</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>CAMBI (Consortium for Accessible Multimodal Brain-Body Interfaces)</institution>, <addr-line>Portland, OR</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Electrical and Computer Engineering Department, University of Pittsburgh</institution>, <addr-line>Pittsburgh, PI</addr-line>, <country>United States</country></aff>
<aff id="aff4"><sup>4</sup><institution>Institute on Development and Disability, Oregon Health &#x00026; Science University</institution>, <addr-line>Portland, OR</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Gabriel Pires, University of Coimbra, Portugal</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Qiang Fang, Shantou University, China; Sung-Phil Kim, Ulsan National Institute of Science and Technology, South Korea</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Paula Gonzalez-Navarro <email>pagon&#x00040;microsoft.com</email></corresp>
<corresp id="c002">Basak Celik <email>bcelik&#x00040;ece.neu.edu</email></corresp>
<corresp id="c003">Deniz Erdo&#x0011F;mu&#x0015F; <email>erdogmus&#x00040;ece.neu.edu</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Brain-Computer Interfaces, a section of the journal Frontiers in Human Neuroscience</p></fn>
<fn fn-type="equal" id="fn002"><p>&#x02020;These authors have contributed equally to this work and share second authorship</p></fn></author-notes>
<pub-date pub-type="epub">
<day>25</day>
<month>01</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>15</volume>
<elocation-id>788258</elocation-id>
<history>
<date date-type="received">
<day>01</day>
<month>10</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>12</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2022 Gonzalez-Navarro, Celik, Moghadamfalahi, Akcakaya, Fried-Oken and Erdo&#x0011F;mu&#x0015F;.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Gonzalez-Navarro, Celik, Moghadamfalahi, Akcakaya, Fried-Oken and Erdo&#x0011F;mu&#x0015F;</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Error related potentials (ErrP), which are elicited in the EEG in response to a perceived error, have been used for error correction and adaption in the event related potential (ERP)-based brain computer interfaces designed for typing. In these typing interfaces, ERP evidence is collected in response to a sequence of stimuli presented usually in the visual form and the intended user stimulus is probabilistically inferred (stimulus with highest probability) and presented to the user as the decision. If the inferred stimulus is incorrect, ErrP is expected to be elicited in the EEG. Early approaches to use ErrP in the design of typing interfaces attempt to make hard decisions on the perceived error such that the perceived error is corrected and either the sequence of stimuli are repeated to obtain further ERP evidence, or without further repetition the stimulus with the second highest probability is presented to the user as the decision of the system. Moreover, none of the existing approaches use a language model to increase the performance of typing. In this work, unlike the existing approaches, we study the potential benefits of fusing feedback related potentials (FRP), a form of ErrP, with ERP and context information (language model, LM) in a Bayesian fashion to detect the user intent. We present experimental results based on data from 12 healthy participants using RSVP Keyboard&#x02122; to complete a copy-phrase-task. Three paradigms are compared: [P1] uses only ERP/LM Bayesian fusion; [P2] each RSVP sequence is appended with the top candidate in the alphabet according to posterior after ERP evidence fusion; corresponding FRP is then incorporated; and [P3] the top candidate is shown as a prospect to generate FRP evidence only if its posterior exceeds a threshold. Analyses indicate that ERP/LM/FRP evidence fusion during decision making yields significant speed-accuracy benefits for the user.</p></abstract>
<kwd-group>
<kwd>error related potentials</kwd>
<kwd>feedback related potentials</kwd>
<kwd>event related potentials</kwd>
<kwd>electroencephalography</kwd>
<kwd>brain computer interfaces</kwd>
<kwd>RSVP Keyboard<sup>TM</sup></kwd>
<kwd>Bayesian fusion</kwd>
</kwd-group>
<counts>
<fig-count count="10"/>
<table-count count="3"/>
<equation-count count="6"/>
<ref-count count="32"/>
<page-count count="13"/>
<word-count count="8531"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1. Introduction</title>
<p>Event related potentials (ERPs) are commonly employed in the design of non-invasive electroencephalography (EEG)-based brain computer interfaces (BCIs) to detect the user intent (Farwell and Donchin, <xref ref-type="bibr" rid="B11">1988</xref>; Acqualagna et al., <xref ref-type="bibr" rid="B1">2010</xref>; Orhan et al., <xref ref-type="bibr" rid="B25">2012</xref>; Akcakaya et al., <xref ref-type="bibr" rid="B2">2014</xref>; Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>). The pioneer study from Donchin and Farewell demonstrated that ERPs can be used to design a letter by letter typing BCI (Farwell and Donchin, <xref ref-type="bibr" rid="B11">1988</xref>). In addition to event related potentials (ERPs), depending on the BCI application, error-related potentials (ErrPs) can be used to indicate a perceived error. ErrPs are detectable as deflections in the EEG signal measured over the scalp of a person when they make or perceive an error (Falkenstein et al., <xref ref-type="bibr" rid="B10">2000</xref>; Davies et al., <xref ref-type="bibr" rid="B7">2004</xref>; Buttfield et al., <xref ref-type="bibr" rid="B3">2006</xref>; Yazicioglu et al., <xref ref-type="bibr" rid="B32">2006</xref>; Ferrez and del R. Millan, <xref ref-type="bibr" rid="B12">2008</xref>; G&#x000FC;rel and Mehring, <xref ref-type="bibr" rid="B15">2012</xref>; Margaux et al., <xref ref-type="bibr" rid="B21">2012</xref>; Sp&#x000FC;ler et al., <xref ref-type="bibr" rid="B30">2012</xref>; Kieffaber et al., <xref ref-type="bibr" rid="B18">2016</xref>). Different variants of ErrPs can be measured in recorded EEG signal. For example, when the user realizes that the interface failed to properly recognize user&#x00027;s intention, an ErrP signal is induced, which can characterized by two fronto-central positive peaks appearing 200 and 320 ms after the feedback; a fronto-central negativity near 250 ms and at last, broader fronto-central negative deflection about 450 ms after the feedback. These latencies can change depending on the experimental paradigm (Iturrate et al., <xref ref-type="bibr" rid="B16">2013</xref>). Moreover, some studies have demonstrated correlation between trial-by-trial estimates of the ErrP and the post-error slowing (Debener et al., <xref ref-type="bibr" rid="B8">2005</xref>). Based on these studies, it has been proposed that the negative deflection of the ErrP signal is the result of an error-detection mechanism, as opposed to being an inhibitory or corrective signal. In addition, it has been studied that the positive components of the ErrP reflects conscious error processing or post-error adjustment of response strategies (Falkenstein et al., <xref ref-type="bibr" rid="B10">2000</xref>).</p>
<p>While some BCI typing systems have shown encouraging results (Kawala-Sterniuk et al., <xref ref-type="bibr" rid="B17">2021</xref>), there is still much work to be done to produce real-world-worthy systems that can be comfortably, conveniently, and reliably used by individuals with severe neuromuscular disabilities who cannot use standard communication pathways or other assistive technologies. This work presents several improvements to a language-model-assisted EEG-based typing BCI, RSVP Keyboard&#x02122; (Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>), as well as similar designs that depend on visually evoked P300 potentials. The baseline system fuses text/language and EEG evidence to infer user intent in EEG-controlled spelling to generate expressive language. In particular, we study the potential benefits of fusing feedback related potentials, a form of ErrP, with ERP and context information (language model, LM) in a Bayesian framework. The probabilistic evidence for ERP, ErrP, and non-EEG are computed using different probabilistic generative models.</p>
<p>We represent the domain knowledge and casual relationship among difference variables in a probabilistic graphical model. The presented approach is a general dynamic fusion framework that could be used with various presentation paradigms. Typing interfaces aim to reach a certain confidence level before making a decision on the user intent, and accordingly, sequences of symbols are repeated multiple times. In our approach, after every presented sequence, we compute the posterior distribution of the symbol set (all the symbols in the English alphabet and the backspace symbol) conditioned on ERP likelihoods and LM-based priors. The mode of posterior distribution is selected as prospect symbol that is presented to the user, either after every sequence or after a confidence threshold is reached. The prospect symbol is an additional visual stimuli, which induces an EEG response that is indicative of that prospect&#x00027;s correctness. We refer to this response as feedback related potential (FRP), which takes the form of an ErrP/non ErrP indicating an incorrect/correct prospect symbol being presented. After the prospect symbol is presented and the new FRP evidence is obtained, through the Bayesian graphical model, the FRP evidence is fused with the EEG and LM-based evidence and the posterior distribution of the symbols is updated. Given the low signal-to-noise-ratio of EEG, we take an iterative update approach by presenting multiple sequences of ERP and FRP stimuli to the user to compute a more robust estimate, until the posterior reaches an information theoretic confidence threshold. User intent is then selected using maximum a posteriori (MAP) inference.</p>
<p>Existing typing BCIs that attempt to use ERP/FRP jointly typically fall into one of these categories: a flag produced by the ErrP classifier results in (a) the deletion of the last selection made using the ERP classifier (Dal Seno et al., <xref ref-type="bibr" rid="B6">2010</xref>; Schmidt et al., <xref ref-type="bibr" rid="B28">2012</xref>; Sp&#x000FC;ler et al., <xref ref-type="bibr" rid="B30">2012</xref>; Chavarriaga et al., <xref ref-type="bibr" rid="B4">2014</xref>); (b) replacing the last selection made using the ERP classifier with the second probable option (Combaz et al., <xref ref-type="bibr" rid="B5">2012</xref>; Margaux et al., <xref ref-type="bibr" rid="B21">2012</xref>; Chavarriaga et al., <xref ref-type="bibr" rid="B4">2014</xref>); (c) presenting more stimuli to gather additional ERP evidence, but not using the FRP to update symbol probabilities over the alphabet (Combaz et al., <xref ref-type="bibr" rid="B5">2012</xref>). A language model is not fused with ERP evidence in these particular examples, but it has been suggested for boosting both ERP and FRP evidence assessment. Unlike these early attempts on using FRP evidence to make hard decisions based on ErrP classifier outputs, we seek Bayesian fusion of ERP, FRP, and language evidence using probabilistic generative models. The system presented in this paper automatically decides to select a letter to type or proceed with more ERP/FRP evidence collection in a probabilistic fashion.</p>
<p>In an earlier study, we observed the potential enhancements that can be achieved through a joint probabilistic inference from all evidences (i.e., FRP, ERP, and LM), rather than using FRP as a switch Gonzalez-Navarro et al. (<xref ref-type="bibr" rid="B13">2016a</xref>); Orhan et al. (<xref ref-type="bibr" rid="B26">2016</xref>). In the early study, Monte Carlo simulations are performed using synthetic EEG features from models calibrated with real ERP/FRP data, and the results are simulated for five users with synthetic EEG features (Gonzalez-Navarro et al., <xref ref-type="bibr" rid="B13">2016a</xref>). As our simulation results suggested, Bayesian fusion of all evidence (FRP, ERP, and LM) yields faster typing speeds for all participants without compromising accuracy. On the other hand, use of ErrP in a sub-optimal fashion, by allowing FRP decisions to override ERP, also improved speed relative to not using FRP at all. But our results indicated that Bayesian fusion of FRP with ERP, and not treating the former as a de facto superior form of evidence, may yield better outcomes. Based on these results, we decided to conduct a new study, presented in this manuscript, to evaluate the performance of two different system strategies for a joint probabilistic inference framework. This is the first work where we study experimental results based on data from healthy participants. We study the potential benefits of fusing feedback related potentials (FRP) with ERP and context information (LM) in a Bayesian fashion to detect the user intent.</p>
<p>To illustrate the efficacy of our approach we use RSVP Keyboard&#x02122; (Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>), an EEG based BCI for letter by letter typing, which is described in more details in section 3. Three strategies [P1], [P2], and [P3] are compared in terms of speed, accuracy, and information transfer rate (ITR). The EEG for this study is acquired from 12 healthy participants using RSVP Keyboard&#x02122; to complete a copy-phrase-task. [P1], the baseline system fuses LM and ERP (collected from RSVPs) evidence in a Bayesian fashion to infer user intent. On the other hand, our novel propositions, [P2] and [P3], use a joint inference from all evidence (FRP, ERP, and LM) to make a decision. In [P2], FRP evidence is collected after every RSVP sequence; whereas in [P3], RSVP sequences are repeated multiple times until a confidence level is achieved, then the feedback is presented as the mode of estimated posterior (in other words, FRP evidence is collected less frequently in [P3]).</p>
</sec>
<sec id="s2">
<title>2. Proposed Graphical Model for Inference</title>
<sec>
<title>2.1. Decision Framework</title>
<p>In a typical letter by letter typing BCI application, the user has to select among a discrete set of <italic>task symbols</italic> from a Dictionary <inline-formula><mml:math id="M1"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>,</mml:mo><mml:mi>B</mml:mi><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mi>Z</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>&#x0222A;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mo>&#x0003C;</mml:mo><mml:mo>,</mml:mo><mml:mo>-</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> where &#x0201C;&#x02212;&#x0201D; represents space symbol and &#x0201C;&#x0003C;&#x0201D; represents backspace symbol. Here, we examine how a BCI can infer a <italic>task symbol</italic> from different EEG evidence and prior context information. In particular, we build a decision framework that takes into account two types of EEG evidence: FRP and ERP evidence. We propose several methods for combining FRP, ERP evidence and prior context information, using real-time posterior probability updates. This BCI application utilizes a visual presentation module to detect the user intent and the EEG collected during the visual stimulation is then employed in decision making procedure.</p>
<p>Different visual presentation methods can be considered in order to evoke visual potentials. Rapid serial visual presentation (RSVP) paradigm is a minimally gaze dependent alternative for matrix presentation paradigms, that is aimed to induce ERPs for intent detection. In the RSVP paradigm, the symbols are rapidly presented as a time series on a prefixed location on the screen in a pseudo-random order, to evoke the response when the target symbol appears (Acqualagna et al., <xref ref-type="bibr" rid="B1">2010</xref>; Orhan et al., <xref ref-type="bibr" rid="B25">2012</xref>; Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>). In this presentation scheme, each flashing letter is a trial and in each &#x0201C;sequence,&#x0201D; a subset of dictionary is presented. From now on, we will be referring to only inducing ERP (target) evidence when we mention RSVP trial.</p>
<p><xref ref-type="fig" rid="F1">Figures 1A,B</xref> illustrate a flash of a prospect symbol and RSVP trial respectively. Due to low signal-to-noise-ratio (SNR) of EEG, the system usually requires to query the user with more than one &#x0201C;sequence&#x0201D; and &#x0201C;prospect symbol&#x0201D; to achieve a desired confidence level before making a decision. The set of &#x0201C;sequence&#x0201D; and &#x0201C;prospect symbol&#x0201D; which leads to a decision is called an &#x0201C;epoch.&#x0201D; In every epoch, it is assumed that the target symbol remains unchanged. <xref ref-type="fig" rid="F1">Figure 1C</xref> represents a schematic of an EEG epoch in the RSVP Keyboard&#x02122; including a series of letters in an ERP sequence and a feedback stimulus as a &#x0201C;prospect symbol.&#x0201D; The feedback stimulus is always presented at the end of the RSVP sequence (shown in green). In <xref ref-type="fig" rid="F1">Figures 1A,B</xref>, &#x0201C;Press Space Bar or Enter to pause&#x0201D; indicates the Pause/Play button. &#x0201C;Esc to quit&#x0201D; indicates the exit button should the participant choose to end the experimental session. Both options are added to the experimental design for the convenience of the user.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Different visual stimuli: <bold>(A)</bold> RSVP and <bold>(B)</bold> Feedback trials. <bold>(C)</bold> Schematic of an epoch with RSVP and Feedback sequences. A series of RSVP sequences including non-target and target symbols are shown at a prefixed position on the screen consecutively over time in rapid serial fashion. The RSVP sequence starts with a &#x0002B; symbol. At the end of each RSVP sequence, a prospect symbol is appended.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0001.tif"/>
</fig>
</sec>
<sec>
<title>2.2. Probabilistic Graphical Model (PGM)</title>
<p>The proposed probabilistic graphical model (PGM) that represents <italic>k</italic>th &#x0201C;epoch&#x0201D; for an EEG-based typing application is presented in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Proposed probabilistic graphical model representing the <italic>k</italic>th epoch. Here, the dashed lines show a deterministic relation while the solid lines define a probabilistic correspondence. <italic>z</italic> (&#x00101;<sup><italic>t</italic></sup>) &#x0003D; 1 ErrP label, <italic>z</italic> (&#x00101;<sup><italic>t</italic></sup>) &#x0003D; 0 non ErrP label. <inline-formula><mml:math id="M18"><mml:mi>y</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> target label, <inline-formula><mml:math id="M19"><mml:mi>y</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula> non Target label. <italic>t</italic> denotes sequence index. <italic>j</italic> denotes trial index.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0002.tif"/>
</fig>
<p>Here, <inline-formula><mml:math id="M2"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> is a random variable which represents the user intent in epoch <italic>k</italic>, <inline-formula><mml:math id="M3"><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>|</mml:mo><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> is a subset from the dictionary <inline-formula><mml:math id="M4"><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:math></inline-formula>, treated as the &#x0201C;sequence&#x0201D; at instant <italic>t</italic> of the epoch <italic>k</italic>, c denotes for candidate, |<italic>A</italic><sub><italic>c</italic></sub>(<italic>t</italic>)| is the number of symbols presented in the <italic>t</italic>-th sequence, <inline-formula><mml:math id="M5"><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">C</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the context information that has been provided with the language model for which we will provide a brief description, in section 2.5. Moreover, here we introduce <inline-formula><mml:math id="M6"><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:math></inline-formula>. This set is a singleton <inline-formula><mml:math id="M7"><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> which includes the prospective symbol for the query set <italic>A</italic><sub><italic>p</italic></sub>(<italic>t</italic>) at instant <italic>t</italic> (p denotes for prospect). In addition, <inline-formula><mml:math id="M8"><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M9"><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> are the ERP and FRP evidence obtained in response to an RSVP trial <inline-formula><mml:math id="M10"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and feedback trial &#x00101;<sup><italic>t</italic></sup> respectively. We assume that the user intent is not changing within an epoch. Hence, given that every <inline-formula><mml:math id="M11"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M12"><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> are either target or non-target, the intent inference can be formulated as a binary decision problem. Therefore <italic>y</italic>(&#x000B7;), <italic>z</italic>(&#x000B7;) correspond to binary class labels for ERP, FRP responses. Hence, <inline-formula><mml:math id="M13"><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>:</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x003B4;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:msup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> has a one-to-one relationship with the true state <inline-formula><mml:math id="M14"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> such that <inline-formula><mml:math id="M15"><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> if <inline-formula><mml:math id="M16"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and 0 otherwise. Similarly, <italic>z</italic>(&#x00101;<sup><italic>t</italic></sup>): &#x0003D; &#x003B4;(&#x00101;<sup><italic>t</italic></sup>; <italic>a</italic><sup>&#x0002A;</sup>). <italic>N</italic><sub><italic>c</italic></sub> and <italic>N</italic><sub><italic>p</italic></sub> are the maximum number of &#x0201C;sequences&#x0201D; and &#x0201C;prospect symbols&#x0201D; that can be used in an epoch if a desired confidence level is not reached in reasonable duration. In the case that we do not use FRP evidence, the right box from the graphical model from <xref ref-type="fig" rid="F2">Figure 2</xref> will be eliminated and the rest will remain the same. We utilize the graphical model presented to compute the posterior distribution of the intended character <inline-formula><mml:math id="M17"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> after collecting the EEG evidence and by utilizing the language model evidence. The details of the posterior distribution computation is given in section 2.4. In order to make inference on the user intent, we compare three different evidence acquisition paradigms (one for each strategy). These paradigms are discussed in section 2.3.</p>
</sec>
<sec>
<title>2.3. Evidence Acquisition Paradigms</title>
<p>Here, we present three different evidence acquisition paradigms: (i) [P1], (ii) [P2], and (iii) [P3] as shown in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<list list-type="order">
<list-item><p>[P1] (<italic>Baseline</italic>): In this paradigm a set of pseudo-randomly ordered stimuli are presented to the user to elicit ERP. Each stimulus is a trial. Sets of trials that are presented with no time gaps are called a sequence <italic>A</italic><sub><italic>c</italic></sub>(<italic>t</italic>). Every sequence can only contain up to one target stimulus. After each sequence, the posterior distribution over the character set is computed and a decision is made if the maximum probability exceeds a predefined threshold or a time limit is reached. Otherwise, the system continues with more sequences.</p>
<p>This paradigm, is the baseline for RSVP Keyboard&#x02122; and it does not include FRP evaluation.</p></list-item>
<list-item><p>[P2] (<italic>Always FRP</italic>): In this paradigm we first query the user with ERP sequences in a similar fashion as [P1], then the mode of posterior is depicted as a <italic>prospect symbol</italic> i.e. <italic>A</italic><sub><italic>p</italic></sub>(<italic>t</italic>). <italic>A</italic><sub><italic>p</italic></sub>(<italic>t</italic>) is then presented on a prefixed location of the screen, like in regular RSVP trials, to induce FRP in EEG. Depending on the instructions given to the user, this FRP may take the form of an error-related potential (ErrP) indicating an incorrect prospect symbol being presented. The collected EEG in response to each prospect symbol is used to update the posterior using the PGM shown in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<p>This paradigm is also utilizes MAP inference, in a procedure similar to [P1].</p></list-item>
<list-item><p>[P3] (<italic>Confirm FRP</italic>): This paradigm is similar to [P1] and [P2] but the top candidate is shown as a <italic>prospect symbol</italic> to generate FRP evidence only if its posterior probability exceeds a threshold. The graphical model presented in <xref ref-type="fig" rid="F2">Figure 2</xref> is directly used to fuse the ERP and FRP evidence to infer the user intent.</p></list-item>
</list>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Evidence acquisition paradigms, experimental setup, and visual stimuli type. <bold>(A)</bold> Three evidence acquisition paradigms. First row shows [P1], second row shows [P2], and third row shows [P3]. <bold>(B)</bold> Visual stimuli are projected on a black screen. The EEG evidence collected after the presentation of a typical sequence and prospect symbol stimuli are used for detecting user intent. <bold>(C)</bold> Three different visual stimuli, RSVP sequence, prospect symbol, and decision symbol.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0003.tif"/>
</fig>
</sec>
<sec>
<title>2.4. Maximum a Posteriori (MAP) Inference</title>
<p>The decision making process utilizes a maximum a posteriori (MAP) inference mechanism for intent detection. The graphical model presented in <xref ref-type="fig" rid="F2">Figure 2</xref> is used to compute the posterior distribution of the intended symbol, after evaluating the ERP and FRP likelihoods in recorded EEGs during ERP and FRP sequences and using context priors. A general decision framework for the three evidence acquisition paradigms is presented in <xref ref-type="fig" rid="F4">Figure 4</xref>. According to this framework, before making a final decision the ERP and FRP evidences corresponding to multiple sequences are aggregated and fused with the context prior. Different query selection methods [P<sub><italic>i</italic></sub>] <italic>i</italic> &#x0003D; {1, 2, 3} are presented in <xref ref-type="fig" rid="F4">Figure 4</xref>. (Please see section 2.3 for more details.)</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p><bold>(A)</bold> General decision framework for the three evidence acquisition paradigms [P1], [P2], and [P3]. The only part that differs in each paradigm is the select query block. BCI channel decides which query is going to be presented, the evidence from the query is collected in the user channel, &#x003B1;<sub><italic>d</italic></sub> is the decision threshold. <italic>N</italic><sub><italic>d</italic></sub> is the total number of sequences (including ERP &#x0002B; FRP). Decision is made when the posterior probability of the selected symbol passes the threshold &#x003B1;<sub><italic>d</italic></sub>, or when the total number of sequences is reached (denoted with &#x02264; &#x003B1;<sub><italic>d</italic></sub>/<italic>N</italic><sub><italic>d</italic></sub>). In <bold>(B)</bold>, t%2 stands for t <italic>mod</italic> 2 (modulo operation), indicating that the prospect symbol is shown once after every RSVP sequence.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0004.tif"/>
</fig>
<p>We estimate the prospective symbol <inline-formula><mml:math id="M20"><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> at instant <italic>t</italic>, as the mode of posterior distribution:</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M21"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">max</mml:mo></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow></mml:munder></mml:mstyle><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>;</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M22"><mml:msubsup><mml:mrow><mml:mi>&#x000E2;</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> is the estimated user intent; <inline-formula><mml:math id="M23"><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> is the ERP evaluations for all the sequences in epoch <italic>k</italic> up to <italic>t</italic>; <inline-formula><mml:math id="M24"><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> is the set of observation for the query set <italic>A</italic><sub><italic>c</italic></sub>(<italic>t</italic>); <inline-formula><mml:math id="M25"><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>:</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> is the FRP EEG evidences for all the observed prospective sequences in epoch <italic>k</italic> at instant <italic>t</italic>; <inline-formula><mml:math id="M26"><mml:msub><mml:mrow><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> is the set of observation vectors for the prospective set <italic>A</italic><sub><italic>p</italic></sub>(<italic>t</italic>). For [P2] and [P3] the FRP EEG evidence <inline-formula><mml:math id="M27"><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> is obtained in response to &#x00101;<sup><italic>t</italic></sup>.</p>
<p>To compute the posterior distribution in (1), we utilize the assumptions of the graphical model presented in <xref ref-type="fig" rid="F2">Figure 2</xref>. According to this PGM, the ERP and FRP evidence and context information are independent when the intended symbol <italic>a</italic><sub><italic>k</italic></sub> is given. Then for epoch <italic>k</italic> and at time instant <italic>t</italic>, after observing the query sets <italic>A</italic><sub><italic>c</italic></sub>(<italic>t</italic>) and <italic>A</italic><sub><italic>p</italic></sub>(<italic>t</italic>), the maximum a posteriori can be computed using the objective function in (2).</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M28"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mi>&#x000E2;</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">max</mml:mo></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="-tex-caligraphic">D</mml:mi></mml:mrow></mml:mrow></mml:munder></mml:mstyle><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo stretchy="false">|</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>We can further assume that conditioned on the unknown symbol <italic>a</italic><sub><italic>k</italic></sub> all EEG evidence from different trials are independent, and simplify the first two terms of Equation (2) as:</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M29"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo>&#x0220F;</mml:mo></mml:mrow><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none none none none none none none none none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>N</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:munder></mml:mstyle><mml:mfrac><mml:mrow><mml:mi>p</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">|</mml:mo><mml:mi>y</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">|</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E4"><label>(4)</label><mml:math id="M30"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mi mathvariant="-tex-caligraphic">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo stretchy="false">|</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo>&#x0220F;</mml:mo></mml:mrow><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none none none none none none none none none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>N</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext><mml:mo>,</mml:mo><mml:mi>z</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:munder></mml:mstyle><mml:mfrac><mml:mrow><mml:mi>p</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">|</mml:mo><mml:mi>z</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x00101;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">|</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>According to the inference equation defined in (2), we need to estimate (i) the context prior that we estimated using a language model <inline-formula><mml:math id="M31"><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mstyle class="text"><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mstyle><mml:mo>|</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>, (ii) class conditional distributions over the ERP evidence <italic>p</italic>(<italic>e</italic><sub><italic>c</italic></sub>|1) for target and <italic>p</italic>(<italic>e</italic><sub><italic>c</italic></sub>|0) for non-target classes, and (iii) class conditional distributions over the FRP EEG evidence <italic>p</italic>(<italic>e</italic><sub><italic>p</italic></sub>|0) and <italic>p</italic>(<italic>e</italic><sub><italic>p</italic></sub>|1). We have implemented the proposed ERP and FRP data acquisition paradigms using the RSVP Keyboard&#x02122; framework (Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>).</p>
</sec>
<sec>
<title>2.5. Context Information</title>
<p>To compute <inline-formula><mml:math id="M32"><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mstyle class="text"><mml:mtext class="textrm" mathvariant="normal">a</mml:mtext></mml:mstyle><mml:mo>|</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>, we utilize an n-gram language model which provides a prior probability over every symbol in the dictionary. We have shown that context information when fused with EEG evidence improves the system performance effectively (Orhan et al., <xref ref-type="bibr" rid="B24">2013</xref>; Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>). An n-gram LM is a Markov model of order <italic>n</italic> &#x02212; 1. Let <inline-formula><mml:math id="M33"><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, where <inline-formula><mml:math id="M34"><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> is the <italic>m</italic><sup>th</sup> previously typed character. Then:</p>
<disp-formula id="E5"><label>(5)</label><mml:math id="M35"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo stretchy="false">|</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo stretchy="false">|</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>In our system, we use a 6-gram language model, which is trained on the NY Times portion of the English Gigaword corpus (Roark et al., <xref ref-type="bibr" rid="B27">2010</xref>).</p>
</sec>
</sec>
<sec id="s3">
<title>3. Human-in-the-Loop Experiments</title>
<p>We perform a set of online experiments to compare the effects of [P1], [P2], and [P3] on system performance. We collected data from 12 healthy participants (5 females), 22&#x02013;38 years old. After a calibration session, participants were asked to perform a copy phrase task of RSVP Keyboard&#x02122;. The data were collected according to the guidelines of an IRB-approved protocol at Northeastern University (IRB 130107).</p>
<sec>
<title>3.1. Method</title>
<p>In RSVP Keyboard&#x02122;, the EEG signal is acquired using a g.USBamp biosignal amplifier with active g.Butterfly electrodes at a sampling rate of 256 Hz, from 16 EEG sites (according to the International 10/20 configuration): Fp1, Fp2, F3, F4, Fz, Fc1, Fc2, Cz, P1, P2, C1, C2, Cp3, Cp4, P5, and P6. To improve the signal-to-noise ratio (SNR), and to eliminate drifts, signal is filtered by an FIR linear-phase bandpass filter with cutoff frequencies [1.5,42] Hz and a notch filter at 60 Hz. Typically, a wideband filter, such as a [0.05&#x02013;30] Hz filter is recommended to avoid the potential distortion of ERP waveforms (Luck, <xref ref-type="bibr" rid="B20">2014</xref>). In our work, temporal-windowed EEG signals are filtered by [1.5,42] Hz bandpass filter (FIR, linear phase, length 153, 0 DC-gain) to eliminate the low frequency deviations and high frequency noise. Lower high-cutoff frequencies may be used (Orhan et al., <xref ref-type="bibr" rid="B26">2016</xref>).</p>
<p>In order to capture the ERP and FRP, while omitting the possible motor reposes (Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>), EEG from a time window of [0, 500) ms after each flash&#x00027;s onset is processed as the corresponding raw data for each trial. To further pre-process after filtering, the EEG data for each channel are first down-sampled by 2 and projected to a lower dimensional space using principal component analysis (PCA), and finally data from every channel is concatenated to form the feature vector <inline-formula><mml:math id="M36"><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>y</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> for trial <italic>i</italic>th, of type <italic>j</italic> in response to a trial, as we defined in Equation (7). More specifically, <inline-formula><mml:math id="M37"><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>y</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> represents FRP evidence for the prospective symbol trial <italic>i</italic>th; and <inline-formula><mml:math id="M38"><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>y</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> represents ERP evidence for the query trial <italic>i</italic>th. After pre-processing,</p>
<disp-formula id="E6"><label>(6)</label><mml:math id="M39"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>y</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>v</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mtext>&#x000A0;</mml:mtext><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>v</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>v</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211D;</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M41"><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>v</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula> is the multivariate measurement collected from channel <italic>n</italic>. Note that here <italic>N</italic><sub><italic>ch</italic></sub> &#x0003D; 16 is the number of channels and <italic>N</italic><sub><italic>t</italic></sub> is the number of time samples for each channel after applying PCA.</p>
<p>We then perform a quadratic projection of these feature vectors on to a one dimensional space so that it maximizes the separation between two possible classes of <italic>non-target</italic> and <italic>target</italic>. This projection is obtained as the log-likelihood ratio of two multivariate normal density functions estimated using regularized discriminant analysis (RDA) over target and non-target classes. <inline-formula><mml:math id="M42"><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M43"><mml:msubsup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>, are the one dimensional ERP and FRP evidences, respectively. We estimate the class conditional distributions of <italic>p</italic>(<italic>e</italic><sub><italic>c</italic></sub>(<italic>a</italic>)|1), <italic>p</italic>(<italic>e</italic><sub><italic>c</italic></sub>(<italic>a</italic>)|0) over the ERP evidences; and <italic>p</italic>(<italic>e</italic><sub><italic>p</italic></sub>(<italic>a</italic>)|1), <italic>p</italic>(<italic>e</italic><sub><italic>p</italic></sub>(<italic>a</italic>)|0) over the FRP evidences, using kernel density estimation (KDE). We employ Gaussian kernel with a bandwidth computed using the Silverman&#x00027;s rule from the recorded labeled data (Silverman, <xref ref-type="bibr" rid="B29">1986</xref>). Note that these distributions are computed after collecting data in a calibration session. Then, the estimated densities are used in test sessions.</p>
<p>Recall that the EEG (ERP and FRP) evidence and language model prior are fused using the assumptions of the graphical model presented in <xref ref-type="fig" rid="F2">Figure 2</xref> to obtain the posterior probability mass function (PMF). The posterior probabilities is then used in MAP inference framework to make a joint decision as described in section 2.</p>
</sec>
<sec>
<title>3.2. Experiment Design</title>
<p>All users participate in three <italic>copy phrase</italic> tasks, each task being performed on a separate day. In each day, the user performs the task pursuing one of [P1], [P2], and [P3] paradigms. The order of the paradigms are randomly assigned to the users to avoid the learning impact on the typing performance.</p>
<p>A <italic>copy phrase</italic> task includes typing the following ten different phrases.</p>
<list list-type="order">
<list-item><p>THE DOG &#x0201C;<bold>WILL</bold>&#x0201D; BITE YOU,</p></list-item>
<list-item><p>GO TO &#x0201C;<bold>THE</bold>&#x0201D; MOVIES,</p></list-item>
<list-item><p>GOOD HEALTH &#x0201C;<bold>CARE</bold>&#x0201D; IS CRUCIAL,</p></list-item>
<list-item><p>SUPER &#x0201C;<bold>BOWL</bold>&#x0201D; SUNDAY,</p></list-item>
<list-item><p>EAT THREE TIMES A &#x0201C;<bold>DAY</bold>,&#x0201D;</p></list-item>
<list-item><p>THE THIRD &#x0201C;<bold>SEAT</bold>&#x0201D; FROM THE LEFT,</p></list-item>
<list-item><p>MY PARENTS &#x0201C;<bold>FIND</bold>&#x0201D; ME FUNNY,</p></list-item>
<list-item><p>SHE ALSO &#x0201C;<bold>PAID</bold>&#x0201D; FOR LUNCH,</p></list-item>
<list-item><p>SOMETHING THAT &#x0201C;<bold>BUYS</bold>&#x0201D; US TIME,</p></list-item>
<list-item><p>THE COMPOSER &#x0201C;<bold>SITS</bold>&#x0201D; QUIETLY,</p></list-item>
</list>
<p>Each phrase includes a missing word and the users are asked to complete these words. Here, the target words are written in bold. The entire sentence is shown to the user before each phrase is being typed. We use different phrases with different difficulty levels in terms of prior probability provided by the language model. For instance, the words such as &#x0201C;<bold>THE</bold>&#x0201D; or &#x0201C;<bold>WILL</bold>&#x0201D; are very easy to type because their initial letters are very likely based on the LM prior. However, the words such as &#x0201C;<bold>PAID</bold>&#x0201D; or &#x0201C;<bold>BUYS</bold>&#x0201D; are very difficult to type. <xref ref-type="fig" rid="F5">Figure 5</xref> demonstrates an example of a user performing the copy phrase task.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Copy phrase task performed on EEG-based BCI using RSVP Keyboard&#x02122; paradigm. The user is asked to type <bold>WILL</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0005.tif"/>
</fig>
<p>Prior to each copy phrase task all participants perform two <italic>calibration</italic> tasks: <italic>calibration</italic><sub><italic>ERP</italic></sub> and <italic>calibration</italic><sub><italic>FRP</italic></sub>. <italic>Calibration</italic><sub><italic>ERP</italic></sub> is used to learn the statistics of the ERP classifier (target vs. non target), using the calibration mode of the system to record labeled EEG data. Typically, each <italic>calibration</italic><sub><italic>ERP</italic></sub> session consists of 100 sequences of symbols. Before each sequence, the user is asked to attend to a particular symbol. Then a sequence consisting of the target symbol and 9 other non-target symbols is presented to the user in a random order. <italic>Calibration</italic><sub><italic>FRP</italic></sub> is used to learn the statistics of the FRP classifier (correct vs. non correct) using the copy mode of the system to record labeled EEG data. To obtain compatible evidence, we simulated [P2] and [P3] paradigms to collect supervised FRP EEG data.</p>
<p>During <italic>calibration</italic><sub><italic>FRP</italic></sub>, we modify the LM probabilities, in order to record enough labeled data for correct and non correct classes. Users are asked to rest between <italic>calibrations</italic> and <italic>copy phrase</italic> tasks and continue once they felt ready.</p>
<p>The length of each trial is 500 ms for all paradigms, there are 10 trials in one ERP sequence and 1 trial (i.e., the prospect symbol followed by a question mark) in one FRP sequence. <italic>A</italic><sub><italic>c</italic></sub>(<italic>t</italic>) is selected based on the posterior probability (fusion of evidence &#x0002B; LM). In [P1], the trial symbol is shown for 150 ms followed by a 50 ms blank screen (i.e., the inter-trial interval). The interval between successive sequences is 500 ms. In [P2] and [P3], after ERP evidence is collected, the trial symbol is shown for 0.9 s followed by a 0.1s blank screen for the FRP evidence. Decision symbol is shown for 2 s. The maximum number of sequences allowed in an epoch is 100 for calibration tasks (Simply because during calibration, we have a single combined epoch and we do not make decisions). In copy phrase tasks, the maximum number of sequences allowed in an epoch is 8 (that is, a decision is made after max 8 sequences in an epoch). In paradigm [P3], the posterior probability for showing the prospect symbol is set as &#x003B1;<sub><italic>p</italic></sub> &#x0003D; 0.66. Note that, we do not employ an &#x003B1;<sub><italic>p</italic></sub> during paradigm [P2] because we already show the prospect symbol after every ERP sequence. For all three paradigms ([P1], [P2], and [P3]), the posterior probability threshold for decision is set as &#x003B1;<sub><italic>d</italic></sub> &#x0003D; 0.9.</p>
</sec>
</sec>
<sec id="s4">
<title>4. Analysis Results</title>
<p>Using the data collected in the human-in-the-loop copy phrase and calibration experiments described in section 3, we report the effect of the three evidence acquisition paradigms: [P1] (<italic>Baseline</italic>), [P2], and [P3].</p>
<sec>
<title>4.1. Human-in-the-Loop Calibration Experiment Results</title>
<p>Using the supervised data collected during the <italic>calibration</italic><sub><italic>FRP</italic></sub>, we first analyze the average EEG recorded in response to correct and incorrect feedback for the two evidence acquisition, [P2] and [P3]. <xref ref-type="fig" rid="F6">Figure 6</xref> shows the average FRPs for the correct and incorrect feedback trials for 12 users for the two scenarios that use FRP; [P2] and [P3]. The results show the statistical presence of the ErrP response in both [P2] and [P3]. As we can see in <xref ref-type="fig" rid="F6">Figures 6A,B</xref>, the waveform in response to the incorrect feedback is characterized by a positive component observed ([350 ms]) after the delivery of the incorrect feedback, representing a visually evoked potential (VEP). We do not observe this positive response after the correct feedback, as shown in <xref ref-type="fig" rid="F6">Figures 6C,D</xref>. Upon the presentation of the incorrect feedback, a negative ([50&#x02013;100] ms) component is also observed. In addition, <xref ref-type="fig" rid="F6">Figures 6A&#x02013;D</xref> show the scalp topography at different time windows {100, 320, 400} ms. Based on these results, we observe higher separability between the EEG time series recorded in response to non-correct and correct feedback around the time window of 320&#x02013;350 ms. Finally, <xref ref-type="fig" rid="F7">Figures 7A,B</xref> show the average FRPs for the correct and incorrect feedback for 12 users and 16 electrodes; for [P2] and [P3], respectively. From <xref ref-type="fig" rid="F7">Figures 7A,B</xref>, we can observe that when [P3] paradigm is performed, the amplitude of this positive component ([350 ms]) for the incorrect stimuli is slightly lower compared to [P2] (it could be inferred from the amplitude difference between the green line (non-ErrP) vs. red line (ErrP) for [P2] and [P3]).)</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Average EEG responses of 12 users for <bold>(A)</bold> incorrect [P2], <bold>(B)</bold> incorrect [P3], <bold>(C)</bold> correct [P2] and <bold>(D)</bold> correct [P3].</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0006.tif"/>
</fig>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Average EEG response for correct and non-correct feedback of 12 users and 16 electrodes for <bold>(A)</bold> [P2] and <bold>(B)</bold> [P3].</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0007.tif"/>
</fig>
<p>We then compare classification accuracies across different acquisition paradigms by employing AUC values as the measure of EEG evidence classification accuracy. In particular, using the calibration data obtained in the Human-in-the-loop calibration experiment (<italic>calibration</italic><sub><italic>ERP</italic></sub> and <italic>calibration</italic><sub><italic>FRP</italic></sub>) as described in section 3.2, we compare the offline target vs. non-target stimuli and correct vs. incorrect feedback classification results for the three data acquisition paradigms, [P1], [P2], and [P3]. <xref ref-type="fig" rid="F8">Figure 8A</xref> compares the areas under the receiver operating characteristics curves (AUCs) for the FRP evidences of each user in different acquisition paradigms, [P2] and [P3]. Similarly, <xref ref-type="fig" rid="F8">Figure 8B</xref> shows the ERP classification AUCs for each user for different acquisition paradigms, [P1], [P2], and [P3]. AUC values are calculated based on the cross validation of the classifier&#x00027;s performance on the training (calibration) data sets. In 10 out of 12 users tested, the classification AUC for paradigm [P2] is larger than [P3], as observed in <xref ref-type="fig" rid="F8">Figure 8A</xref>. This can be a result of the experiment [P2] being more controlled. In other words, since each RSVP sequence is appended with a prospect symbol in paradigm [P2], the user always knows when the feedback is going to be presented in [P2] as opposed to [P3]. Comparing the calibration results from <xref ref-type="fig" rid="F8">Figures 8A,B</xref> we can see that for most users, the ERP calibration results have higher AUCs compared to the FRP classification. This difference in the classification AUCs can be due to the fact that the number of observations that we collect during ERP calibration is higher than the number of observations that we can collect during FRP calibration.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>AUCs for 12 users for two calibrations tasks <bold>(A)</bold> <italic>calibration</italic><sub><italic>FRP</italic></sub>, <bold>(B)</bold> <italic>calibration</italic><sub><italic>ERP</italic></sub>, for 3 different evidence acquisition paradigms [P1], [P2], and [P3]. Please note that for [P1] we do not have calibration task FRP, since this paradigm does not use FRP evidence. The absence of FRP in [P1] is denoted with &#x0201C;/ - [P1]&#x0201D; and is shown in black. For [P1]: <italic>AUC</italic><sub><italic>FRP</italic></sub> &#x0003D; 0, <italic>AUC</italic><sub><italic>ERP</italic></sub> &#x0003D; 0.8138, for [P2]: <italic>AUC</italic><sub><italic>FRP</italic></sub> &#x0003D; 0.7966, <italic>AUC</italic><sub><italic>ERP</italic></sub> &#x0003D; 0.8308, for [P3]: <italic>AUC</italic><sub><italic>FRP</italic></sub> &#x0003D; 0.7341, <italic>AUC</italic><sub><italic>ERP</italic></sub> &#x0003D; 0.8300.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0008.tif"/>
</fig>
</sec>
<sec>
<title>4.2. Human-in-the-Loop Copy Phrase Experiment Results</title>
<p>Using the data collected during three <italic>copy phrase</italic> tasks, we analyze the typing performance for the three evidence acquisition paradigms. As explained in section 3.2, each <italic>copy phrase</italic> task includes typing ten different phrases with different difficulty levels. <xref ref-type="table" rid="T1">Table 1</xref> shows the typing accuracy performance of the three evidence acquisition paradigms for all users in terms of two measures: accuracy in typing a letter correctly (ATL), which is the total number of correctly typed letters divided by the total number of typed letters; and probability of the phrase completion (PPC) which is the total number of correctly typed phrases divided by the total number of phrases. We observe that both [P2] and [P3] paradigms improve the typing accuracy performance compared to [P1]. As shown in <xref ref-type="table" rid="T1">Table 1</xref>, none of the users are able to complete the 10 copy phrase tasks correctly using [P1]. A paired t-test is also performed on ATLs to compare the typing accuracies among different paradigms across 12 users. In most EEG-based BCI systems, signal recorded from multiple channels along the scalp is assumed to be a Gaussian process with an unknown covariance and mean (Gonzalez-Navarro et al., <xref ref-type="bibr" rid="B14">2016b</xref>). Assuming the Gaussianity of the recorded signal, we believe that applying t-testing is plausible. The result is presented in <bold>Table 3</bold>. From <bold>Table 3</bold>, we observe very low p-values for [P2] vs. [P1], and [P3] vs. [P1]. However, no significant differences between [P2] and [P3] are observed.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Typing performance results for 12 subjects performing a copy task using RSVP Keyboard&#x02122; for three different strategies [P1], [P2], and [P3].</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>P1</bold></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>P2</bold></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>P3</bold></th>
</tr>
<tr>
<th valign="top" align="left"><bold>User</bold></th>
<th valign="top" align="center"><bold>ATL</bold></th>
<th valign="top" align="center"><bold>PPC</bold></th>
<th valign="top" align="center"><bold>ATL</bold></th>
<th valign="top" align="center"><bold>PPC</bold></th>
<th valign="top" align="center"><bold>ATL</bold></th>
<th valign="top" align="center"><bold>PPC</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.70</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">1.00</td>
</tr>
<tr>
<td valign="top" align="left">2</td>
<td valign="top" align="center">0.82</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">1.00</td>
</tr>
<tr>
<td valign="top" align="left">3</td>
<td valign="top" align="center">0.64</td>
<td valign="top" align="center">0.70</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">4</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.60</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.81</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">5</td>
<td valign="top" align="center">0.74</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.80</td>
</tr>
<tr>
<td valign="top" align="left">6</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">7</td>
<td valign="top" align="center">0.76</td>
<td valign="top" align="center">0.70</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">8</td>
<td valign="top" align="center">0.69</td>
<td valign="top" align="center">0.60</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.83</td>
<td valign="top" align="center">1.00</td>
</tr>
<tr>
<td valign="top" align="left">9</td>
<td valign="top" align="center">0.78</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.75</td>
<td valign="top" align="center">0.70</td>
<td valign="top" align="center">0.76</td>
<td valign="top" align="center">0.80</td>
</tr>
<tr>
<td valign="top" align="left">10</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">11</td>
<td valign="top" align="center">0.78</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.64</td>
<td valign="top" align="center">0.70</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">1.00</td>
</tr>
<tr style="border-bottom: thin solid #000000;">
<td valign="top" align="left">12</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.80</td>
<td valign="top" align="center">0.83</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.78</td>
<td valign="top" align="center">0.80</td>
</tr> <tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.75</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.91</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>ATL represents the accuracy in typing a letter correctly, and PPC is the probability of phrase completion</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>Here, we use information transfer rate (ITR) (Obermaier et al., <xref ref-type="bibr" rid="B23">2001</xref>) as another performance measure. ITR summarizes the accuracy and speed into a single metric and it is commonly used to measure BCI performance. <xref ref-type="fig" rid="F9">Figure 9</xref> illustrates the ITR (bits/sequence) values for all subjects; and <xref ref-type="table" rid="T2">Table 2</xref> reports the mean of the ITR values among 12 subjects for the three strategies. From <xref ref-type="fig" rid="F9">Figure 9</xref> and <xref ref-type="table" rid="T2">Table 2</xref> it can be observed that [P2] (in red) along with [P3] (in blue) yield considerable improvements in both speed and accuracy. Among all the results, [P1] displays the lowest performance. Using paired <italic>t</italic>-test, a hypothesis testing is also performed to compare the ITR values obtained from different paradigms across 12 participants. The results are presented in <xref ref-type="table" rid="T3">Table 3</xref>. <xref ref-type="table" rid="T3">Table 3</xref> also represents [P2] as the slightly better paradigm, although the difference between [P2] and [P3] is not statistically significant.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Average of information transfer rate (bits/sequence) for three evidence acquisition paradigms: [P1] (black), [P2] (red), and [P3] (blue). All 12 users performed the copy phrase task in RSVP Keyboard&#x02122;.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0009.tif"/>
</fig>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Mean of the ITR (bits/sequence) from <xref ref-type="fig" rid="F9">Figure 9</xref> for 12 subjects performing a copy task using RSVP Keyboard&#x02122; for three different strategies [P1], [P2], and [P3].</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th valign="top" align="center"><bold>P1</bold></th>
<th valign="top" align="center"><bold>P2</bold></th>
<th valign="top" align="center"><bold>P3</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Mean ITR</td>
<td valign="top" align="center">0.55</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.80</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Hypothesis t-testing results for accuracy of typing a letter correctly (ATL) and ITR values for different evidence acquisition paradigms for 12 users.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>Paired <italic>t</italic>-Test Results between Different ATL Values</bold></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>Paired <italic>t</italic>-Test Results Between Different ITR Values</bold></th>
</tr>
<tr>
<th valign="top" align="left"><bold><italic>P</italic><sub><italic>i</italic></sub> v.s. <italic>P</italic><sub><italic>j</italic></sub></bold></th>
<th valign="top" align="center"><bold><italic>P</italic>-values</bold></th>
<th valign="top" align="left"><bold><italic>P</italic><sub><italic>i</italic></sub> v.s. <italic>P</italic><sub><italic>j</italic></sub></bold></th>
<th valign="top" align="center"><bold><italic>P</italic>-values</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">[<italic>P</italic>1] v.s. [<italic>P</italic>2]</td>
<td valign="top" align="center">3.00<italic>e</italic><sup>&#x02212;4</sup></td>
<td valign="top" align="left">[<italic>P</italic>1] v.s. [<italic>P</italic>2]</td>
<td valign="top" align="center">0.04</td>
</tr>
<tr>
<td valign="top" align="left">[<italic>P</italic>1] v.s. [<italic>P</italic>3]</td>
<td valign="top" align="center">9.20<italic>e</italic><sup>&#x02212;5</sup></td>
<td valign="top" align="left">[<italic>P</italic>1] v.s. [<italic>P</italic>3]</td>
<td valign="top" align="center">0.06</td>
</tr>
<tr>
<td valign="top" align="left">[<italic>P</italic>3] v.s. [<italic>P</italic>2]</td>
<td valign="top" align="center">0.46</td>
<td valign="top" align="left">[<italic>P</italic>3] v.s. [<italic>P</italic>2]</td>
<td valign="top" align="center">0.47</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>The null hypothesis is that the expected ATL/ITR difference of the two considered ITRs is zero. Here, we conducted a Bonferroni correction in which the critical significance level of &#x003B1; &#x0003D; 0.05 is adjusted</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>Human-in-the-loop copy phrase experiment results in <xref ref-type="fig" rid="F9">Figure 9</xref> and <xref ref-type="table" rid="T1">Table 1</xref> show that the proposed strategies [P2] and [P3] outperform the strategy [P1] in terms of accuracy (with [P2] leading the race); and result in significant improvements in both speed and accuracy when compared to [P1]. We believe that improving not only accuracy but also speed is highly desired for BCI systems that are designed for real-life applications.</p>
<p>Finally, using online copy phrase and calibration results, we report ITR as a function of AUC obtained from the FRP and the ERP classifiers for each paradigm in <xref ref-type="fig" rid="F10">Figures 10A,B</xref>. There are two different AUC values for paradigms [P2] and [P3] since they both use ERP and FRP evidences, whereas there is only one AUC value for [P1] corresponding to the ERP evidence. A linear regression model is fit to the observed data. We also report the coefficient of determination <italic>R</italic><sup>2</sup>. From these figures it can be observed that in all three cases, there is a positive correlation between AUC and ITR. In the case of <italic>AUC</italic><sub><italic>ERP</italic></sub>, the correlation is higher, which indicates that <italic>AUC</italic><sub><italic>ERP</italic></sub> is an effective factor to improve ITR for the three paradigms. For FRP, the correlation is much lower, so we can conclude that <italic>AUC</italic><sub><italic>FRP</italic></sub> has a small effect on ITR for both paradigms [P2] and [P3].</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p>Linear regression relation between ITR (bits/sequence) and AUCs of: <bold>(A)</bold> FRP for evidence acquisition paradigms [P2] (red) and [P3] (blue), and <bold>(B)</bold> ERP for [P1] (black), [P2] (red), and [P3] (blue). Actual ITR values (represented by dots) and ITR values predicted by the linear model (represented by the solid line) are plotted as a function of the AUC for 12 users.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-15-788258-g0010.tif"/>
</fig>
<p>Compared to the benchmark BCI spellers which rely on visually evoked potentials (VEPs) such as SSVEPs, our ERP/ErrP based BCI speller has a slight advantage in accuracy (Liu et al., <xref ref-type="bibr" rid="B19">2020</xref>), (Wong et al., <xref ref-type="bibr" rid="B31">2020</xref>). Compared to the vision-independent BCI paradigms which rely on ERP elicitation via auditory and tactile stimulation, our visually evoked ERP/LM/FRP fusion BCI-speller has a significant advantage in ITR, and the accuracies we obtain with paradigms [P2] and [P3] compete with state-of-the-art P300 BCIs in the literature (Eidel and K&#x000FC;bler, <xref ref-type="bibr" rid="B9">2020</xref>), (Kawala-Sterniuk et al., <xref ref-type="bibr" rid="B17">2021</xref>).</p>
</sec>
</sec>
<sec sec-type="conclusions" id="s5">
<title>5. Conclusions</title>
<p>In this manuscript, we compared three different Bayesian inference frameworks that tightly fuses context information and different EEG evidences to be used in intent inference engines of EEG-based brain computer interfaces. In particular, we study the potential benefits of fusing FRP, ERP, and language evidence using probabilistic generative models for a speller BCI. Based on the human-in-the-loop (copy phrase and calibration) experiments with 12 healthy participants using RSVP Keyboard&#x02122;, three strategies are compared: [P1]-Baseline, which only fuses ERP/LM evidence; [P2]-AlwaysFRP, where each RSVP sequence is followed by an FRP trial using the top candidate in the alphabet according to posterior after ERP/LM evidence fusion; [P3]-ConfirmFRP, where the top candidate is shown as a prospect to generate FRP evidence only if its posterior exceeds a threshold, possibly after multiple ERP-evidence acquisition sequences.</p>
<p>We performed several analyses on the Human-in-the-loop copy phrase experiment results, which are: (i) accuracy (in the form of AUC, ATL, and PPC), (ii) speed (in the form of ITR), (iii) Information Transfer Rate (ITR) (bits/sequence). Our results show that by using enough FRP evidence in addition to ERP evidence and language model (LM), the typing speed could be increased compared to a model that does not use FRP evidence. Overall, both proposed strategies [P2] and [P3], which utilize FRP evidence outperform [P1] in terms of accuracy. Moreover, [P2] yields significant speed and accuracy and, therefore, ITR improvements compared to [P1] and also performs better compared to [P3]. These results could be due to the fact that for [P3] we do not collect enough FRP evidence during copy-phrase tasks, and that [P2] causes less mental fatigue due to its deterministic presentation method. We think that, for a Brain-Computer Interface which is designed to be used daily, it is crucial to improve the speed as well as the accuracy. Our results suggest that, probabilistic fusion of the FRP evidence can bring the true performance of a BCI one step closer to the objective.</p>
<p>According to the results, BCI users can benefit from the fusion of the FRP evidence to the decision making, if there are enough FRP evidences. Based on the analyses, we propose a BCI typing system capable of employing multiple evidence acquisition paradigms. This system, after individual assessments, will be able to determine the most profitable evidence presentation/inference paradigm as per user preference, capabilities, and EEG signal statistics.</p>
<p>We demonstrate theoretically that probing the users intent with FRP-acquisition using the current top candidate is an optimal strategy in an active learning framework employing the independent-trial-EEG-evidence assumption paradigm. This approach constitutes an improvement over previous literature employing ERP paradigms alone. In earlier work, we demonstrated that showing the top letters according to the current posterior in a sequence for ERP evidence acquisition is similarly optimal under the same independence assumption (Moghadamfalahi et al., <xref ref-type="bibr" rid="B22">2015</xref>). Therefore, under the independent-trial-EEG-evidence model, the best strategy is to repeat the following until a decision is confidently made: <italic>show the top candidate, gather EEG evidence, and update the posterior</italic>. Clearly the independence assumption is incorrect, if not for the auto-correlation of EEG time series, due to the overlapping time windows that are used for trial-EEG-evidence extraction. Consequently, in an improved ERP/FRP/LM fusion framework that can be designed in the future, the following issues need to be considered more carefully: (1) a signal model that captures the temporal dependency of EEG features extracted for each trial, (2) the temporal cost of gathering a sequence-worth of ERP evidence vs. FRP evidence by showing the current top prospect. Therefore, in future work, we plan to address these issues and develop an ERP/FRP/LM fusion mechanism for BCI spellers that will dynamically decide whether to gather more ERP evidence, more FRP evidence, or neither during intent inference. The inference framework does not strictly rely on EEG evidence, therefore, we will also explore multi-modal physiological evidence fusion using signal sources such as EMG or eye-gaze trajectories.</p>
</sec>
<sec sec-type="data-availability" id="s6">
<title>Data Availability Statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7">
<title>Ethics Statement</title>
<p>The studies involving human participants were reviewed and approved by Northeastern University Institutional Review Board. The patients/participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s8">
<title>Author Contributions</title>
<p>PG-N and DE conceived of the presented idea. PG-N and MM designed and performed the FRP experiments. All authors contributed to the analysis of the results and to the writing of the manuscript.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>This work was supported by NSF IIS-1149570, IIS-1118061, CNS-1136027, CNS-1544895, and SMA-0835976, by NIDR-H133E140026 and NIDLRR 90RE5017-02-01, and by NIH 5R01DC009834.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<ack><p>Thanks to Bruna Girvent for the efforts in editing some figures.</p></ack>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Acqualagna</surname> <given-names>L.</given-names></name> <name><surname>Treder</surname> <given-names>M. S.</given-names></name> <name><surname>Schreuder</surname> <given-names>M.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name></person-group> (<year>2010</year>). <article-title>A novel brain-computer interface based on the rapid serial visual presentation paradigm</article-title>, in <source>Proceedings of EMBC</source>, <volume>Vol. 1</volume> (<publisher-loc>Buenos Aires</publisher-loc>), <fpage>2686</fpage>&#x02013;<lpage>2689</lpage>. <pub-id pub-id-type="pmid">21096199</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Akcakaya</surname> <given-names>M.</given-names></name> <name><surname>Peters</surname> <given-names>B.</given-names></name> <name><surname>Moghadamfalahi</surname> <given-names>M.</given-names></name> <name><surname>Mooney</surname> <given-names>A.</given-names></name> <name><surname>Orhan</surname> <given-names>U.</given-names></name> <name><surname>Oken</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Noninvasive brain computer interfaces for augmentative and alternative communication</article-title>. <source>IEEE Rev. Biomed. Eng.</source> <volume>7</volume>, <fpage>31</fpage>&#x02013;<lpage>49</lpage>. <pub-id pub-id-type="doi">10.1109/RBME.2013.2295097</pub-id><pub-id pub-id-type="pmid">24802700</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Buttfield</surname> <given-names>A.</given-names></name> <name><surname>Ferrez</surname> <given-names>P. W.</given-names></name> <name><surname>Millan</surname> <given-names>J. R.</given-names></name></person-group> (<year>2006</year>). <article-title>Towards a robust bci: error potentials and online learning</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>14</volume>, <fpage>164</fpage>&#x02013;<lpage>168</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2006.875555</pub-id><pub-id pub-id-type="pmid">16792284</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chavarriaga</surname> <given-names>R.</given-names></name> <name><surname>Sobolewski</surname> <given-names>A.</given-names></name> <name><surname>Millan</surname> <given-names>J. d. R.</given-names></name></person-group> (<year>2014</year>). <article-title>Errare machinale est: the use of error-related potentials in brain-machine interfaces</article-title>. <source>Front. Neurosci.</source> <volume>8</volume>, <fpage>1</fpage>&#x02013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.3389/fnins.2014.00208</pub-id><pub-id pub-id-type="pmid">25100937</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Combaz</surname> <given-names>A.</given-names></name> <name><surname>Chumerin</surname> <given-names>N.</given-names></name> <name><surname>Manyakov</surname> <given-names>N. V.</given-names></name> <name><surname>Robben</surname> <given-names>A.</given-names></name> <name><surname>Suykens</surname> <given-names>J. A. K.</given-names></name> <name><surname>Van Hulle</surname> <given-names>M. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Towards the detection of error-related potentials and its integration in the context of a P300 speller brain-computer interface</article-title>. <source>Neurocomputing</source> <volume>80</volume>, <fpage>73</fpage>&#x02013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2011.09.013</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dal Seno</surname> <given-names>B.</given-names></name> <name><surname>Matteucci</surname> <given-names>M.</given-names></name> <name><surname>Mainardi</surname> <given-names>L.</given-names></name></person-group> (<year>2010</year>). <article-title>Online detection of P300 and error potentials in a BCI speller</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2010</volume>:<fpage>307254</fpage>. <pub-id pub-id-type="doi">10.1155/2010/307254</pub-id><pub-id pub-id-type="pmid">20169142</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davies</surname> <given-names>P. L.</given-names></name> <name><surname>Segalowitz</surname> <given-names>S. J.</given-names></name> <name><surname>Gavin</surname> <given-names>W. J.</given-names></name></person-group> (<year>2004</year>). <article-title>Development of response-monitoring erps in 7- to 25-year-olds</article-title>. <source>Develop. Neuropsychol.</source> <volume>25</volume>, <fpage>355</fpage>&#x02013;<lpage>376</lpage>. <pub-id pub-id-type="doi">10.1207/s15326942dn2503_6</pub-id><pub-id pub-id-type="pmid">15148003</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Debener</surname> <given-names>S.</given-names></name> <name><surname>Ullsperger</surname> <given-names>M.</given-names></name> <name><surname>Siegel</surname> <given-names>M.</given-names></name> <name><surname>Fiehler</surname> <given-names>K.</given-names></name> <name><surname>von Cramon</surname> <given-names>D. Y.</given-names></name> <name><surname>Engel</surname> <given-names>A. K.</given-names></name></person-group> (<year>2005</year>). <article-title>Trial-by-trial coupling of concurrent electroencephalogram and functional magnetic resonance imaging identifies the dynamics of performance monitoring</article-title>. <source>J. Neurosci.</source> <volume>25</volume>, <fpage>11730</fpage>&#x02013;<lpage>11737</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3286-05.2005</pub-id><pub-id pub-id-type="pmid">16354931</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eidel</surname> <given-names>M.</given-names></name> <name><surname>K&#x000FC;bler</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Wheelchair control in a virtual environment by healthy participants using a p300-bci based on tactile stimulation: training effects and usability</article-title>. <source>Front. Human Neurosci.</source> <volume>14</volume>:<fpage>265</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2020.00265</pub-id><pub-id pub-id-type="pmid">32754019</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falkenstein</surname> <given-names>M.</given-names></name> <name><surname>Hoormann</surname> <given-names>J.</given-names></name> <name><surname>Christ</surname> <given-names>S.</given-names></name> <name><surname>Hohnsbein</surname> <given-names>J.</given-names></name></person-group> (<year>2000</year>). <article-title>{ERP} components on reaction errors and their functional significance: a tutorial</article-title>. <source>Biol. Psychol.</source> <volume>51</volume>, <fpage>87</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1016/S0301-0511(99)00031-9</pub-id><pub-id pub-id-type="pmid">10686361</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Farwell</surname> <given-names>L. A.</given-names></name> <name><surname>Donchin</surname> <given-names>E.</given-names></name></person-group> (<year>1988</year>). <article-title>Talking off the top of your head: toward a mental prosthesis utilizing event-related brain potentials</article-title>. <source>Electroencephal. Clin. Neurophysiol.</source> <volume>70</volume>, <fpage>510</fpage>&#x02013;<lpage>523</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(88)90149-6</pub-id><pub-id pub-id-type="pmid">2461285</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrez</surname> <given-names>P. W.</given-names></name> <name><surname>del R. Millan</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). <article-title>Error-related eeg potentials generated during simulated brain &#x00023;x2013;computer interaction</article-title>. <source>IEEE Trans. Biomed. Eng.</source> <volume>55</volume>, <fpage>923</fpage>&#x02013;<lpage>929</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2007.908083</pub-id><pub-id pub-id-type="pmid">18334383</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gonzalez-Navarro</surname> <given-names>P.</given-names></name> <name><surname>Moghadamfalahi</surname> <given-names>M.</given-names></name> <name><surname>Akcakaya</surname> <given-names>M.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name></person-group> (<year>2016a</year>). <article-title>Error-related potentials for eeg-based typing systems</article-title>, in <source>Proceedings of the 6th International Brain-Computer Interface Meeting</source> (<publisher-loc>Asilomar, CA</publisher-loc>), <fpage>25</fpage>.</citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Navarro</surname> <given-names>P.</given-names></name> <name><surname>Moghadamfalahi</surname> <given-names>M.</given-names></name> <name><surname>Akcakaya</surname> <given-names>M.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name></person-group> (<year>2016b</year>). <article-title>Spatio-temporal eeg models for brain interfaces</article-title>. <source>Signal Process.</source> <volume>131</volume>, <fpage>333</fpage>&#x02013;<lpage>343</lpage>. <pub-id pub-id-type="doi">10.1016/j.sigpro.2016.08.001</pub-id><pub-id pub-id-type="pmid">27713590</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>G&#x000FC;rel</surname> <given-names>T.</given-names></name> <name><surname>Mehring</surname> <given-names>C.</given-names></name></person-group> (<year>2012</year>). <article-title>Unsupervised adaptation of brain machine interface decoders</article-title>. <source>Front. Neurosci.</source> <volume>6</volume>:<fpage>164</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2012.00164</pub-id><pub-id pub-id-type="pmid">23162425</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Iturrate</surname> <given-names>I.</given-names></name> <name><surname>Montesano</surname> <given-names>L.</given-names></name> <name><surname>Minguez</surname> <given-names>J.</given-names></name></person-group> (<year>2013</year>). <article-title>Task-dependent signal variations in eeg error-related potentials for brain-computer interfaces</article-title>. <source>J. Neural Eng.</source> <volume>10</volume>:<fpage>026024</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2560/10/2/026024</pub-id><pub-id pub-id-type="pmid">23528750</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kawala-Sterniuk</surname> <given-names>A.</given-names></name> <name><surname>Browarska</surname> <given-names>N.</given-names></name> <name><surname>Al-Bakri</surname> <given-names>A. F.</given-names></name> <name><surname>Pelc</surname> <given-names>M.</given-names></name> <name><surname>Zygarlicki</surname> <given-names>J.</given-names></name> <name><surname>Sidikova</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Summary of over fifty years with brain-computer interfaces&#x02014;a review</article-title>. <source>Brain Sci.</source> <volume>11</volume>, <fpage>43</fpage>. <pub-id pub-id-type="doi">10.3390/brainsci11010043</pub-id><pub-id pub-id-type="pmid">33401571</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kieffaber</surname> <given-names>P. D.</given-names></name> <name><surname>Hershaw</surname> <given-names>J.</given-names></name> <name><surname>Sredl</surname> <given-names>J.</given-names></name> <name><surname>West</surname> <given-names>R.</given-names></name></person-group> (<year>2016</year>). <article-title>Electrophysiological correlates of error initiation and response correction</article-title>. <source>NeuroImage</source> <volume>128</volume>, <fpage>158</fpage>&#x02013;<lpage>166</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.12.046</pub-id><pub-id pub-id-type="pmid">26748078</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>B.</given-names></name> <name><surname>Huang</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name></person-group> (<year>2020</year>). <article-title>Beta: a large benchmark database toward ssvep-bci application</article-title>. <source>Front. Neurosci.</source> <volume>14</volume>:<fpage>627</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2020.00627</pub-id><pub-id pub-id-type="pmid">32655358</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Luck</surname> <given-names>S. J.</given-names></name></person-group> (<year>2014</year>). <source>An Introduction to the Event-Related Potential Technique</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT press</publisher-name>.</citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Margaux</surname> <given-names>P.</given-names></name> <name><surname>Emmanuel</surname> <given-names>M.</given-names></name> <name><surname>S&#x000E9;bastien</surname> <given-names>D.</given-names></name> <name><surname>Olivier</surname> <given-names>B.</given-names></name> <name><surname>J&#x000E9;r&#x000E9;mie</surname> <given-names>M.</given-names></name></person-group> (<year>2012</year>). <article-title>Objective and subjective evaluation of online error correction during p300-based spelling</article-title>. <source>Adv. Human Comput. Int.</source> <volume>2012</volume>, <fpage>4</fpage>. <pub-id pub-id-type="doi">10.1155/2012/578295</pub-id></citation>
</ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moghadamfalahi</surname> <given-names>M.</given-names></name> <name><surname>Orhan</surname> <given-names>U.</given-names></name> <name><surname>Akcakaya</surname> <given-names>M.</given-names></name> <name><surname>Nezamfar</surname> <given-names>H.</given-names></name> <name><surname>Fried-Oken</surname> <given-names>M.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name></person-group> (<year>2015</year>). <article-title>Language-model assisted brain computer interface for typing: A comparison of matrix and rapid serial visual presentation</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>23</volume>, <fpage>910</fpage>&#x02013;<lpage>920</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2015.2411574</pub-id><pub-id pub-id-type="pmid">25775495</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Obermaier</surname> <given-names>B.</given-names></name> <name><surname>Neuper</surname> <given-names>C.</given-names></name> <name><surname>Guger</surname> <given-names>C.</given-names></name> <name><surname>Pfurtscheller</surname> <given-names>G.</given-names></name></person-group> (<year>2001</year>). <article-title>Information transfer rate in a five-classes brain-computer interface</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>9</volume>, <fpage>283</fpage>&#x02013;<lpage>288</lpage>. <pub-id pub-id-type="doi">10.1109/7333.948456</pub-id><pub-id pub-id-type="pmid">11561664</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Orhan</surname> <given-names>U.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name> <name><surname>Roark</surname> <given-names>B.</given-names></name> <name><surname>Oken</surname> <given-names>B.</given-names></name> <name><surname>Fried-Oken</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Offline analysis of context contribution to erp-based typing bci performance</article-title>. <source>J. Neural Eng.</source> <volume>10</volume>:<fpage>066003</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2560/10/6/066003</pub-id><pub-id pub-id-type="pmid">24099944</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Orhan</surname> <given-names>U.</given-names></name> <name><surname>Hild</surname> <given-names>K. E.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name> <name><surname>Roark</surname> <given-names>B.</given-names></name> <name><surname>Oken</surname> <given-names>B.</given-names></name> <name><surname>Fried-Oken</surname> <given-names>M.</given-names></name></person-group> (<year>2012</year>). <article-title>Rsvp keyboard: an eeg based typing interface</article-title>, in <source>International Conference on Acoustics, Speech and Signal Processing (ICASSP)</source> (<publisher-loc>Kyoto</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>645</fpage>&#x02013;<lpage>648</lpage>. <pub-id pub-id-type="pmid">24500542</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Orhan</surname> <given-names>U.</given-names></name> <name><surname>Nezamfar</surname> <given-names>H.</given-names></name> <name><surname>Akcakaya</surname> <given-names>M.</given-names></name> <name><surname>Erdogmus</surname> <given-names>D.</given-names></name> <name><surname>Higger</surname> <given-names>M.</given-names></name> <name><surname>Moghadamfalahi</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Probabilistic simulation framework for eeg-based bci design</article-title>. <source>Brain Comput. Interfaces</source> <volume>3</volume>, <fpage>171</fpage>&#x02013;<lpage>185</lpage>. <pub-id pub-id-type="doi">10.1080/2326263X.2016.1252621</pub-id><pub-id pub-id-type="pmid">29250562</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Roark</surname> <given-names>B.</given-names></name> <name><surname>De Villiers</surname> <given-names>J.</given-names></name> <name><surname>Gibbons</surname> <given-names>C.</given-names></name> <name><surname>Fried-Oken</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Scanning methods and language modeling for binary switch typing</article-title>, in <source>Proceedings of the NAACL HLT 2010 Workshop on Speech and Language Processing for Assistive Technologies</source> (<publisher-loc>Los Angeles, CA</publisher-loc>: <publisher-name>Association for Computational Linguistics</publisher-name>), <fpage>28</fpage>&#x02013;<lpage>36</lpage>.</citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schmidt</surname> <given-names>N. M.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>Treder</surname> <given-names>M. S.</given-names></name></person-group> (<year>2012</year>). <article-title>Online detection of error-related potentials boosts the performance of mental typewriters</article-title>. <source>BMC Neurosci.</source> <volume>13</volume>, <fpage>19</fpage>. <pub-id pub-id-type="doi">10.1186/1471-2202-13-19</pub-id><pub-id pub-id-type="pmid">22336293</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Silverman</surname> <given-names>B. W.</given-names></name></person-group> (<year>1986</year>). <source>Density Estimation for Statistics and Data Analysis</source>, <volume>Vol. 26</volume>. <publisher-name>CRC press</publisher-name>.</citation>
</ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sp&#x000FC;ler</surname> <given-names>M.</given-names></name> <name><surname>Rosenstiel</surname> <given-names>W.</given-names></name> <name><surname>Bogdan</surname> <given-names>M.</given-names></name></person-group> (<year>2012</year>). <article-title>Online adaptation of a c-vep brain-computer interface(bci) based on error-related potentials and unsupervised learning</article-title>. <source>PLoS One</source> <volume>7</volume>, <fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0051077</pub-id><pub-id pub-id-type="pmid">23236433</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wong</surname> <given-names>C. M.</given-names></name> <name><surname>Wan</surname> <given-names>F.</given-names></name> <name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Nan</surname> <given-names>W.</given-names></name> <name><surname>Lao</surname> <given-names>K. F.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Learning across multi-stimulus enhances target recognition methods in ssvep-based bcis</article-title>. <source>J. Neural Eng.</source> <volume>17</volume>:<fpage>016026</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2552/ab2373</pub-id><pub-id pub-id-type="pmid">31112937</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Yazicioglu</surname> <given-names>R. F.</given-names></name> <name><surname>Merken</surname> <given-names>P.</given-names></name> <name><surname>Puers</surname> <given-names>R.</given-names></name> <name><surname>Hoof</surname> <given-names>C. V.</given-names></name></person-group> (<year>2006</year>). <article-title>Low-power low-noise 8-channel eeg front-end asic for ambulatory acquisition systems</article-title>, in <source>2006 Proceedings of the 32nd European Solid-State Circuits Conference</source> (<publisher-loc>Montreaux</publisher-loc>), <fpage>247</fpage>&#x02013;<lpage>250</lpage>.</citation>
</ref>
</ref-list> 
</back>
</article>
