<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2024.1400077</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Two-stage sparse multi-objective evolutionary algorithm for channel selection optimization in BCIs</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Liu</surname> <given-names>Tianyu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1425212/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wu</surname> <given-names>Yu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ye</surname> <given-names>An</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2387237/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Cao</surname> <given-names>Lei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/396775/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Cao</surname> <given-names>Yongnian</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>School of Information Engineering, Shanghai Maritime University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Tiktok Incorporation</institution>, <addr-line>San Jose, CA</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Gernot R. M&#x000FC;ller-Putz, Graz University of Technology, Austria</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Syed Hammad Nazeer Gilani, Air University, Pakistan</p>
<p>Miaoyun Zhao, Dalian University of Technology, China</p>
<p>Wei Wang, Nanjing University of Aeronautics and Astronautics, China</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Tianyu Liu <email>liuty&#x00040;shmtu.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>22</day>
<month>05</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1400077</elocation-id>
<history>
<date date-type="received">
<day>13</day>
<month>03</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>08</day>
<month>05</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2024 Liu, Wu, Ye, Cao and Cao.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Liu, Wu, Ye, Cao and Cao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>Channel selection has become the pivotal issue affecting the widespread application of non-invasive brain-computer interface systems in the real world. However, constructing suitable multi-objective problem models alongside effective search strategies stands out as a critical factor that impacts the performance of multi-objective channel selection algorithms. This paper presents a two-stage sparse multi-objective evolutionary algorithm (TS-MOEA) to address channel selection problems in brain-computer interface systems.</p></sec>
<sec>
<title>Methods</title>
<p>In TS-MOEA, a two-stage framework, which consists of the early and late stages, is adopted to prevent the algorithm from stagnating. Furthermore, The two stages concentrate on different multi-objective problem models, thereby balancing convergence and population diversity in TS-MOEA. Inspired by the sparsity of the correlation matrix of channels, a sparse initialization operator, which uses a domain-knowledge-based score assignment strategy for decision variables, is introduced to generate the initial population. Moreover, a <italic>Score</italic>-based mutation operator is utilized to enhance the search efficiency of TS-MOEA.</p></sec>
<sec>
<title>Results</title>
<p>The performance of TS-MOEA and five other state-of-the-art multi-objective algorithms has been evaluated using a 62-channel EEG-based brain-computer interface system for fatigue detection tasks, and the results demonstrated the effectiveness of TS-MOEA.</p></sec>
<sec>
<title>Conclusion</title>
<p>The proposed two-stage framework can help TS-MOEA escape stagnation and facilitate a balance between diversity and convergence. Integrating the sparsity of the correlation matrix of channels and the problem-domain knowledge can effectively reduce the computational complexity of TS-MOEA while enhancing its optimization efficiency.</p></sec></abstract>
<kwd-group>
<kwd>multi-objective evolutionary algorithm</kwd>
<kwd>channel selection</kwd>
<kwd>two-stage framework</kwd>
<kwd>sparse initialization</kwd>
<kwd>score assignment strategy</kwd>
</kwd-group>
<contract-num rid="cn001">61806122</contract-num>
<contract-num rid="cn001">62102242</contract-num>
<contract-sponsor id="cn001">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content></contract-sponsor>
<counts>
<fig-count count="12"/>
<table-count count="10"/>
<equation-count count="10"/>
<ref-count count="54"/>
<page-count count="21"/>
<word-count count="12645"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain-Computer Interfaces</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Brain-computer interface systems (BCIs) have garnered increasing attention within academic and industrial circles due to their broad real-world applications. By acquiring brain signals, BCIs facilitate external device control and communication without necessitating physical movement. For instance, individuals with paralysis can employ BCIs to manage external devices like wheelchairs, prosthetics, and robots, consequently enhancing their quality of life (Krishna Rao et al., <xref ref-type="bibr" rid="B16">2022</xref>). Furthermore, BCIs find utility in monitoring patients&#x00027; cerebral functions within the medical domain and delivering enhanced immersive experiences in the realm of gaming (Qu et al., <xref ref-type="bibr" rid="B32">2023</xref>).</p>
<p>The majority of existing non-invasive BCIs utilize external sensors equipped with multiple channels (32 channels, 62 channels, or even more) for the acquisition of electroencephalography (EEG) signals (Sibilano et al., <xref ref-type="bibr" rid="B39">2024</xref>). More channels lead to a more comprehensive capture of EEG signals. However, owing to the impact of the skull and scalp on electrical signal transmission, EEG signals obtained through external sensors frequently contain noise and extraneous information irrelevant to specific tasks. Moreover, the extensive number of EEG channels compounds the difficulties in data collection and significantly increases the computational complexity of processing this data, leading to increased and often unnecessary computational costs. Therefore, selecting appropriate channels (known as channel selection optimization) from the entirety has emerged as a pivotal challenge in the realm of BCIs (Almanza-Conejo et al., <xref ref-type="bibr" rid="B2">2023</xref>).</p>
<p>A significant portion of research in channel selection optimization is grounded in EEG signal analysis (Mart&#x000ED;nez-Cagigal et al., <xref ref-type="bibr" rid="B26">2022</xref>). The channel selection method based on signal analysis begins by extracting and selecting features from EEG signals, subsequently choosing the most suitable subset of channels for a specific task. On the one hand, such methods require users to have domain-specific expertise related to the task; otherwise, it may lead to selecting sub-optimal channel subsets. Furthermore, the pre-designed algorithmic workflow may become entirely inappropriate if the task changes. On the other hand, most signal analysis-based channel selection methods focus on a single optimization objective, with task accuracy often chosen as the optimization goal in many algorithms (Rocha-Herrera et al., <xref ref-type="bibr" rid="B34">2022</xref>). However, in addition to task accuracy, the number of selected channels is also a crucial metric when conducting channel selection. This is because the number of channels adopted will determine the convenience of using BCI devices. However, there is typically a trade-off between the number of selected electrodes and task accuracy. Therefore, an efficient channel selection approach must strike a compromise between the number of selected channels and task accuracy since these two factors are mutually exclusive. In this context, the utilization of multi-objective evolutionary algorithms (MOEAs) (He et al., <xref ref-type="bibr" rid="B13">2022</xref>), recognized for their efficiency in resolving problems with conflicting multiple objectives, has captured the attention of researchers. In most multi-objective channel selection algorithms, the number of selected channels (or the number of deleted channels) and the accuracy of tasks are directly employed to construct the multi-objective problem model (Alotaiby et al., <xref ref-type="bibr" rid="B3">2015</xref>). However, researchers have found that utilizing the aforementioned multi-objective problem model can sometimes lead to premature convergence of MOEAs (Abdullah et al., <xref ref-type="bibr" rid="B1">2022</xref>). Consequently, developing a well-designed and practical multi-objective problem model becomes a critical factor influencing the performance of multi-objective channel selection algorithms.</p>
<p>Studies have demonstrated that connectivity information can effectively capture the attributes of EEG signals, given that interactions and collaborations among various regions shape the neural activity in the brain (Moon et al., <xref ref-type="bibr" rid="B28">2020</xref>). Recently, there has been a rising trend in utilizing the correlation matrix of channels to address channel selection optimization problems. This is primarily because the correlation matrix can depict the interactions and cooperative activities among different brain regions (Liu and Ye, <xref ref-type="bibr" rid="B22">2023</xref>). It has been demonstrated that due to the non-uniform connectivity patterns in the brain, the correlation matrix of EEG signals is typically sparse (Liu et al., <xref ref-type="bibr" rid="B19">2019</xref>). For example, <xref ref-type="fig" rid="F1">Figure 1</xref> exemplifies the correlation matrix of 62 EEG channels utilized in a fatigue detection task with a classification accuracy of 94%. In <xref ref-type="fig" rid="F1">Figure 1</xref>, the red color represents that the corresponding two channels are entirely linearly correlated (linear correlation coefficient of 1), while the blue color indicates that the two channels are linearly independent (linear correlation coefficient of 0). It can be observed from <xref ref-type="fig" rid="F1">Figure 1</xref> that the majority of cells in the correlation matrix are depicted in blue, indicating that the correlation coefficients of the corresponding elements are close to zero, thereby demonstrating the sparse nature of the correlation matrix. However, few studies consider the sparsity of the correlation matrix when adopting it for solving channel selection problems.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Correlation matrix of 62 channels under 94% accuracy for a fatigue detection task.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0001.tif"/>
</fig>
<p>This paper introduced a two-stage sparse multi-objective evolutionary algorithm (TS-MOEA), tailored for optimizing channel selection in BCIs. To prevent the algorithm from stagnating, TS-MOEA employs a two-stage framework. In this framework, the entire optimization process is divided into two phases, namely the early and late stages, with each stage addressing different multi-objective problem models. Specifically, in the early-stage phase, the adopted objective function is more sensitive to the deletion of channels to prevent the algorithm from falling into local optima. Furthermore, inspired by the sparsity of the correlation matrix of channels, a sparse initialization operator is employed when initializing the population in TS-MOEA. In the sparse initialization operator, a domain knowledge based strategy, which utilizes channels&#x00027; positions and distance matrix, is used to assign scores to decision variables. Additionally, in the early stage of the algorithm, a <italic>Score</italic>-based mutation strategy is employed to enhance the search efficiency of the algorithm. In summary, the algorithm presented in this paper differs from existing multi-objective lead optimization algorithms in three main aspects. First, the proposed algorithm employs two distinct multi-objective optimization models, whereas current methods optimize for a single multi-objective model throughout the entire search process. Second, the proposed algorithm analyzes the sparsity of the channel correlation matrix and incorporates a sparsity-based strategy in the design of the operators to enhance the efficiency of the algorithm. Lastly, the proposed algorithm utilizes domain-specific knowledge to guide the search process of the algorithm. The primary contributions of this paper are outlined as follows:
<list list-type="order">
<list-item><p>A two-stage framework is employed in this study to assist the algorithm in escaping local optima. This framework divides the optimization process into the early and late stages, and different multi-objective problem models are used in the two stages.</p></list-item>
<list-item><p>Inspired by the sparsity observed in the correlation matrix of channels, a sparse initialization operator is introduced to create the initial population. Within this operator, a strategy based on domain knowledge is employed, leveraging channels&#x00027; positions and distance matrix to allocate scores to decision variables.</p></list-item>
<list-item><p>A <italic>Score</italic>-based mutation strategy is employed to enhance the search efficiency in the early stage of TS-MOEA.</p></list-item>
<list-item><p>The performance of TS-MOEA and five other advanced multi-objective algorithms has been evaluated using a 62-channel EEG-based BCI system for a fatigue detection task.</p></list-item>
</list></p>
<p>The remainder of this paper is structured as follows: Section 2 presents the relevant background theory, followed by the description of the proposed TS-MOEA in Section 3. Section 4 covers the experiment and result analysis. Section 5 contains a discussion of the parameters and results, while Section 6 presents the concluding remarks.</p></sec>
<sec id="s2">
<title>2 Backgrounds</title>
<sec>
<title>2.1 Acquisition and processing of EEG signals</title>
<p>In this study, EEG signals were collected using an ESI-64 high-resolution system (SynAmps2, Neuroscan) with 62 EEG channels (Chen et al., <xref ref-type="bibr" rid="B5">2022</xref>). These 62 electrodes were positioned in accordance with the international 10&#x02013;20 standard, as depicted in <xref ref-type="fig" rid="F2">Figure 2</xref>. The initial sampling frequency was 1,000 Hz, which was down-sampled to 250Hz for data processing. Subsequently, the recorded signals underwent filtering with the frequency from 0 to 40 Hz. The raw EEG signals were sampled every 5 seconds, undergoing conversion from analog to digital signals through the utilization of a sampling window and a sliding window of 5 seconds. In this paper, the bilateral linked mastoid (LM) (Scannella et al., <xref ref-type="bibr" rid="B35">2016</xref>), which is the average of the left and right mastoids, was used as the reference signal during the acquisition of EEG signals.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Electrode placement follows the standard International 10&#x02013;20 System.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0002.tif"/>
</fig>
<p>This study utilizes the correlation matrix to describe the characteristics of the collected EEG signals. In recent times, the Pearson Correlation Coefficient (PCC) (Pearson, <xref ref-type="bibr" rid="B29">1895</xref>), along with the Phase Locking Value (PLV) (Aydore et al., <xref ref-type="bibr" rid="B4">2013</xref>), and Transfer Entropy (TE) (Schreiber, <xref ref-type="bibr" rid="B36">2000</xref>), have become extensively used methods for calculating the correlation coefficient between EEG signals in BCIs.</p>
<p>PCC quantifies the linear correlation between two signals, with its value ranging from -1 to 1. A PCC value of 0 signifies that the signals are linearly uncorrelated. Conversely, PCC values of -1 and 1 indicate negative and positive linear relationships, respectively. Consider <inline-formula><mml:math id="M1"><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> as the EEG signal from the <italic>i</italic><sup><italic>th</italic></sup> channel, where <italic>T</italic> represents the signal&#x00027;s length, and &#x003BC;<sub><italic>i</italic></sub> and &#x003C3;<sub><italic>i</italic></sub> are the mean and standard deviation of the <italic>i</italic><sup><italic>th</italic></sup> signal. The PCC value between <italic>X</italic><sub><italic>i</italic></sub> and <italic>X</italic><sub><italic>k</italic></sub> is computed using <xref ref-type="disp-formula" rid="E1">Equation (1)</xref>.</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M2"><mml:mrow><mml:mi>P</mml:mi><mml:mi>C</mml:mi><mml:mi>C</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>T</mml:mi></mml:mfrac><mml:mstyle displaystyle='true'><mml:msubsup><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:msubsup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>&#x02212;</mml:mo><mml:msub><mml:mi>&#x003BC;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>&#x02212;</mml:mo><mml:msub><mml:mi>&#x003BC;</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mstyle></mml:mrow><mml:mrow><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:msub><mml:mi>&#x003C3;</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mfrac></mml:mrow></mml:math></disp-formula>
<p>PLV is used to describe phase synchronization between two signals by averaging the absolute phase differences. PLV can be calculated using <xref ref-type="disp-formula" rid="E2">Equation (2)</xref>. In this equation, <inline-formula><mml:math id="M3"><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x003C6;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:math></inline-formula> denotes the phase of the signal at time point <italic>t</italic> for the <italic>i</italic><sup><italic>th</italic></sup> signal.</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M4"><mml:mrow><mml:mi>P</mml:mi><mml:mi>L</mml:mi><mml:mi>V</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>T</mml:mi></mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:munderover><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>&#x003C6;</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>&#x02212;</mml:mo><mml:msubsup><mml:mi>&#x003C6;</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mstyle></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>TE is a metric for quantifying the directed information flow from signal <italic>X</italic><sub><italic>i</italic></sub> to <italic>X</italic><sub><italic>k</italic></sub>, as delineated in <xref ref-type="disp-formula" rid="E3">Equation (3)</xref>. Essentially, TE assesses the extent to which signal <italic>X</italic><sub><italic>i</italic></sub> can improve the prediction of signal <italic>X</italic><sub><italic>k</italic></sub>. A TE value of 0 indicates the absence of a causal relationship between the two time series, implying that knowing the past values of <italic>X</italic><sub><italic>i</italic></sub> may not aid in predicting <italic>X</italic><sub><italic>k</italic></sub>.</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M5"><mml:mrow><mml:mi>T</mml:mi><mml:mi>E</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x02192;</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>T</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:mi>p</mml:mi></mml:mstyle><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo stretchy='false'>)</mml:mo><mml:mi>log</mml:mi><mml:mfrac><mml:mrow><mml:mi>p</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0007C;</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mi>p</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0007C;</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi></mml:msubsup><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:math></disp-formula>
<p>Researches have shown that the performance of TE is relatively worse than that of PCC and PLV. PLV performs better than PCC slightly (Gong et al., <xref ref-type="bibr" rid="B12">2024</xref>). However, PCC is faster in terms of computational speed because of its simplicity (Maria et al., <xref ref-type="bibr" rid="B25">2023</xref>). In this case, this paper utilizes PCC to obtain the correlation matrix of EEG signals in terms of both performance and computational speed.</p></sec>
<sec>
<title>2.2 MOEAs</title>
<p>Multi-objective optimization problems (MOPs) are prevalent in various real-world scenarios, characterized by multiple conflicting objectives. The general formulation of a maximum MOP is represented in <xref ref-type="disp-formula" rid="E4">Equation (4)</xref>, where <bold>x</bold> &#x0003D; (<italic>x</italic><sub>1</sub>, ..., <italic>x</italic><sub><italic>n</italic></sub>)&#x02208;&#x003A9; denotes the solution <italic>x</italic> within a search space of dimension <italic>n</italic>, and &#x003A9; represents the feasible region in the search space. <italic>M</italic> corresponds to the number of objectives considered in the optimization problem. For an MOP with multiple exclusive objectives, the optimization algorithms can not find a single optimal solution that simultaneously optimizes all objectives. This is due to the exclusive between objectives, where enhancing the performance of one objective may lead to a decline in others. Consequently, the goal of MOEAs is to identify a set of Pareto optimal solutions. <italic>x</italic><sup>&#x0002A;</sup> is regarded as a Pareto optimal solution if there is no other solutions that can dominate <italic>x</italic><sup>&#x0002A;</sup>. Suppose a maximum MOP as shown in <xref ref-type="disp-formula" rid="E4">Equation (4)</xref>, <italic>x</italic> dominates <italic>x</italic><sup>&#x0002A;</sup> if and only if <inline-formula><mml:math id="M6"><mml:mo>&#x02200;</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>,</mml:mo><mml:mi>M</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02265;</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M7"><mml:mo>&#x02203;</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>,</mml:mo><mml:mi>M</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>x</mml:mtext></mml:mstyle></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0003E;</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>. Being population-based search methods, evolutionary algorithms (EAs) have proven to be efficient tools for tackling MOPs by generating a collection of candidate solutions within a single execution.</p>
<disp-formula id="E4"><label>(4)</label><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi><mml:mi>u</mml:mi><mml:mi>m</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>F</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Current MOEAs can be categorized into three main types: dominance-based, decomposition-based, and index-based algorithms. For MOEAs belonging to the first category, the basic idea is to determine the priority of one solution by the dominant relation between the solution and the others. The typical algorithm in the first category is NSGA-II (Deb et al., <xref ref-type="bibr" rid="B7">2002</xref>). In NSGA-II, a fast non-dominated sort method, which are widely adopted in dominance-based MOEAs, is proposed. Many improved algorithms have been submitted in recent years. For example, in CBGA-ES&#x0002B; (Pradhan et al., <xref ref-type="bibr" rid="B30">2021</xref>) proposes a hybrid selection strategy combining cluster-based methods and the traditional non-dominated elitist selection method to select parent solutions. In Premkumar et al. (<xref ref-type="bibr" rid="B31">2021</xref>), a MOSMA, which combines the Slime Mould Algorithm and the traditional NSGA-II, is proposed to solve MOPs in industries. In the CMMO (Ming et al., <xref ref-type="bibr" rid="B27">2023</xref>) algorithm, a cooperative evolution strategy, combined with customized environmental and mating selection, forms the basis for addressing MOPs. The algorithm utilizes dynamically adjusted relaxation factors to retain advantageous solutions with diverse decision spaces. This algorithm exhibits outstanding performance in solving multi-modal multi-objective problems. In ASDNSGA-II (Deng et al., <xref ref-type="bibr" rid="B9">2022</xref>), a special congestion degree strategy and a new adaptive crossover operator are proposed to improve the performance of NSGA-II when handling multi-modal MOPs.</p>
<p>For MOEAs based on decomposition, the basic idea is to translate the original MOP into a set of single-objective problems [as seen approaches like MOEA/D (Zhang and Li, <xref ref-type="bibr" rid="B50">2007</xref>)] or simple MOPs [as illustrated by MOEA/D-M2M and MOSOS/D (Liu et al., <xref ref-type="bibr" rid="B20">2014</xref>; Ganesh et al., <xref ref-type="bibr" rid="B11">2023</xref>)] with the help of weight vectors or reference points. Therefore, many improvements in this type of MOEAs focus on obtaining more appropriate weight vectors (reference points). For instance, Ma et al. (<xref ref-type="bibr" rid="B23">2020</xref>) propose an adaptive weight vector adjustment strategy, in which the weight vectors are periodically modified to enhance the searching capability of the algorithm. In MOEA/D-CSM (Liu et al., <xref ref-type="bibr" rid="B21">2021</xref>), a dynamic reference points generation strategy, which considers the local knowledge in objective space, is proposed to obtain the reference points that can adapt well to MOPs with irregular Pareto fronts. In DMO-QPSO (You et al., <xref ref-type="bibr" rid="B49">2021</xref>), a combination of the quantum-behaved particle swarm optimization (QPSO) algorithm and the MOEA based on decomposition (MOEA/D) is proposed. This integration aims to enable QPSO to effectively address MOPs while leveraging the strengths of QPSO. Additionally, the algorithm introduces some non-dominated solutions to guide other particles in the global best guidance group. The results indicate that the DMO-QPSO algorithm excels in addressing both two-objective and three-objective problems.</p>
<p>For index-based MOEAs, the additional indexes are adopted to determine the priority of solutions or guide the selection process in algorithms. Some representative indexes are hypervolume (HV) (While et al., <xref ref-type="bibr" rid="B47">2006</xref>; Deist et al., <xref ref-type="bibr" rid="B8">2023</xref>), inverted generation distance (IGD) (Zhou et al., <xref ref-type="bibr" rid="B53">2006</xref>; Ishibuchi et al., <xref ref-type="bibr" rid="B14">2019</xref>), dominance move(DoM) (Lopes et al., <xref ref-type="bibr" rid="B10">2022</xref>), and R2 (Ma et al., <xref ref-type="bibr" rid="B24">2018</xref>), and so on. In recent years, the hybrid index, which combines multiple indexes to improve search efficiency, has been proposed. For example, a hybrid index that combines HV and R2 has been adopted (Shang and Ishibuchi, <xref ref-type="bibr" rid="B37">2020</xref>; Shang et al., <xref ref-type="bibr" rid="B38">2020</xref>). Using HV to assess the distribution of the obtained Pareto fronts and R2 to measure the distance between these Pareto fronts and the ideal ones, the hybrid index facilitates algorithms in attaining a balance between convergence and population diversity.</p></sec>
<sec>
<title>2.3 Sparse MOEAs</title>
<p>Studies have revealed that numerous MOPs possess sparse Pareto optimal solutions, particularly those with large-scale decision variables (Tian et al., <xref ref-type="bibr" rid="B43">2021b</xref>). Such MOPs featuring sparsity are commonly referred to as sparse multi-objective optimization problems (SMOPs). In other words, most decision variables of the Pareto optimal solutions in SMOPs are 0. In this case, traditional MOEAs can not obtain satisfactory results when solving SMOPs. This is because traditional MOEAs do not study the sparse distribution of Pareto optimal solutions and thus cannot effectively generate candidate solutions with sparsity in the evolution process.</p>
<p>In recent years, some variations of MOEAs have been applied to solving SMOPs successfully. These algorithms, called sparse multi-objective evolutionary algorithms (SMOEAs), can be divided into two categories. In the first type, SMOEAs adopte the dimension reduction techniques that are commonly used in machine learning. For example, to reduce the number of sparse large-scale decision variables, MOEA/PSL (Tian et al., <xref ref-type="bibr" rid="B42">2021a</xref>) leverages a denoising auto-encoder (DAE) followed by the utilization of a restricted Boltzmann machine (RBM) for acquiring insight into the sparse distribution of decision variables. PM-MOEA (Tian et al., <xref ref-type="bibr" rid="B41">2022</xref>) adopts pattern mining techniques to identify the maximal and minimal candidate sets of non-zero decision variables from the population and apply specialized genetic operators to these patterns to achieve dimensional reduction. SMEA (Tian et al., <xref ref-type="bibr" rid="B40">2023</xref>) proposes an effective approach for addressing sparse large-scale multi-objective evolutionary problems. The algorithm optimizes the binary vectors of each solution to estimate the sparse distribution of optimal solutions and introduces a rapid clustering method for significantly reducing the dimensionality of the search space. This algorithm partitions a substantial number of decision variables into multiple groups, where all variables within the same group are collectively represented by a single variable for optimization. This innovative strategy substantially diminishes the search space, thereby enhancing the convergence speed.</p>
<p>The search efficiency has been improving for the first type of SMOEAs since the dimension of search space has been reduced. However, some dimension reduction techniques may need high computational cost, and there is no sparsity-related knowledge as guidance information in the evolution process of the algorithms. In the second type, SMOEAs combines the conventional framework of MOEAs (such as NSGA-II) and a hybrid encoding method of solutions. For example, S-NSGA-II (Kropp et al., <xref ref-type="bibr" rid="B17">2023</xref>) introduces a novel set of evolutionary operators, which include Varied Striped Sparse Population Sampling (VSSPS), Sparse Simulated Binary Crossover (S-SBX), and Sparse Polynomial Mutation (S-PM), to address SLMOPs. The aforementioned operators demonstrate remarkable efficacy in solving SLMOPs, particularly when evaluated using HV. In SparseEA, as introduced by Tian et al. (<xref ref-type="bibr" rid="B44">2020</xref>), a solution is represented by two components: a real vector for the original decision variables, and a binary vector, often referred to as a &#x0201C;mask vector,&#x0201D; which governs the solution&#x00027;s sparsity. SparseEA2 adds a decision variable grouping strategy to accelerate the convergence speed of generating sparse Pareto optimal solutions. However, the decision variable grouping strategy in SparseEA2 is designed based on the random grouping method without considering the relation between variables. S-ECSO (Wang et al., <xref ref-type="bibr" rid="B46">2022</xref>), an enhanced competitive swarm optimization approach, which adopts the strongly convex sparse operator(SCSparse), is designed to address SMOPs and exhibits outstanding performance.</p>
<p>As described in Section 1, the channel selection problems in BCIs is a typical SMOP. Based on the domain knowledge in the specific problem, this article proposes a two-stage sparse multi-objective optimization evolutionary algorithm, namely TS-MOEA. In TS-MOEA, both the sparsity and domain knowledge are considered in the design of the fundamental operators. The detailed description of TS-MOEA is shown below.</p></sec></sec>
<sec id="s3">
<title>3 Method</title>
<sec>
<title>3.1 Formulation of two-objective channel selection optimization problem in two stages</title> <p>This paper aims to select as few channels as possible with acceptable task accuracy. So, the number of deleted channels (<italic>f</italic><sub>1</sub>) and the accuracy of tasks (<italic>f</italic><sub>2</sub>) are the two maximized objectives that come to mind intuitively. <xref ref-type="fig" rid="F3">Figure 3</xref> illustrates the modeling process for the channel selection problem. Firstly, the raw signals are processed into sample data by computing the PCC values between each channel. Therefore, the sample data are all presented in the form of correlation matrices (as described in Section 2.1). Then, for the channel optimization problem, the threshold matrix <italic>x</italic> is considered as the decision variable that needs to be optimized. By filtering the sample data through the threshold matrix, it is easy to determine which channel can be deleted (<xref ref-type="table" rid="T11">Algorithm 1</xref>), and thus the value of <italic>f</italic><sub>1</sub> can be obtained, which is the number of deleted channels. Based on the channels that have been deleted, the subset of retained channels can be obtained. By using the data matrix of these selected/retained channels as the input for the classifier, the classification accuracy for a specific task can be achieved, denoted as <italic>f</italic><sub>2</sub>. In summary, the channel optimization problem is modeled as a maximization two-objective problem. As shown in <xref ref-type="fig" rid="F3">Figure 3</xref>, the threshold matrix <italic>x</italic> contains the decision variables that need to be optimized, and <italic>x</italic> has the same size as the connectivity matrices of sample data. Where <italic>D</italic> &#x0003D; {<italic>D</italic><sub>1</sub>, <italic>D</italic><sub>2</sub>, ..., <italic>D</italic><sub><italic>N</italic></sub>} and <italic>N</italic> is the number of samples, <italic>D</italic><sub><italic>i</italic></sub>(1 &#x02264; <italic>i</italic> &#x02264; <italic>N</italic>) is the correlation matrix for the <italic>i</italic><sup><italic>th</italic></sup> sample. After filtering <italic>D</italic> by <italic>x</italic>, one can obtain the set of the filtered correlation matrices, denoted as <italic>B</italic>, for all samples. Specifically, <italic>B</italic> &#x0003D; {<italic>B</italic><sub>1</sub>, <italic>B</italic><sub>2</sub>, ..., <italic>B</italic><sub><italic>N</italic></sub>} and <italic>B</italic><sub><italic>i</italic></sub>(1 &#x02264; <italic>i</italic> &#x02264; <italic>N</italic>) is the filtered correlation matrix for the <italic>i</italic><sup><italic>th</italic></sup> sample. Then, the channels to be deleted can be determined by analyzing the filtered correlation matrices, thereby obtaining the number of selected channels (<italic>f</italic><sub>1</sub>). The detailed procedure of obtaining the number of deleted channels, i.e., <italic>f</italic><sub>1</sub>, is given in <xref ref-type="table" rid="T11">Algorithm 1</xref>. As shown in <xref ref-type="table" rid="T11">Algorithm 1</xref>, if a channel is irrelevant to most channels, then this channel is most likely useless for the specific task and will be deleted (Lines 9, 10). In Line 10 of <xref ref-type="table" rid="T11">Algorithm 1</xref>, The value of <italic>s</italic> determines the difficulty level for channels to meet the deletion criteria. The correlation matrix after channel deletion <italic>C</italic> &#x0003D; {<italic>C</italic><sub>1</sub>, <italic>C</italic><sub>2</sub>, ..., <italic>C</italic><sub><italic>N</italic></sub>} can be obtained based on <italic>B</italic>. For the <italic>k</italic><sup><italic>th</italic></sup> sample, if the <italic>j</italic><sup><italic>th</italic></sup> channel can be deleted, then <italic>C</italic><sub><italic>k</italic></sub> can be acquired by setting the elements in both the <italic>i</italic><sup><italic>th</italic></sup> row and the <italic>k</italic><sup><italic>th</italic></sup> column of <italic>B</italic><sub><italic>k</italic></sub> to 0 (Lines 17, 18). After that, <italic>C</italic> will be used as the input of classifiers, and then the accuracy of classification tasks (<italic>f</italic><sub>2</sub>) can be obtained. The above-mentioned two-objective optimization problem can be formulated as shown in <xref ref-type="disp-formula" rid="E5">Equation (5)</xref>. Please note that any classifier can be utilized for obtaining classification results. Since the focus of this paper does not center on the classifier itself, the classic support vector machine (SVM) is selected here. The hyperparameters used in SVM are obtained through the grid search method, in conjunction with 5-fold cross-validation. The hyperparameter determination process begins with establishing a range of potential values for each hyperparameter, forming a parameter grid. After evaluating each set of hyperparameter combinations by the 5-fold cross-validation method, the best values for the hyperparameters adopted in SVM will be obtained. The classification accuracy of SVM using the best hyperparameter swill regarded as <italic>f</italic><sub>2</sub>.</p>
<disp-formula id="E5"><label>(5)</label><mml:math id="M9"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi><mml:mi>u</mml:mi><mml:mi>m</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>F</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>f</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mi>m</mml:mi><mml:mi>d</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>m</mml:mi><mml:mi>d</mml:mi></mml:msub><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>n</mml:mi><mml:mi>u</mml:mi><mml:mi>m</mml:mi><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mi>s</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>s</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>C</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E6"><label>(6)</label><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>5</mml:mn><mml:mo>&#x0002A;</mml:mo><mml:mfrac><mml:mrow><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>o</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>5</mml:mn><mml:mo>&#x0002A;</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="E7"><label>(7)</label><mml:math id="M11"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi><mml:mi>u</mml:mi><mml:mi>m</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>F</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msubsup><mml:mi>f</mml:mi><mml:mn>1</mml:mn><mml:mo>&#x0002A;</mml:mo></mml:msubsup><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msubsup><mml:mi>f</mml:mi><mml:mn>1</mml:mn><mml:mo>&#x0002A;</mml:mo></mml:msubsup><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mn>0.5</mml:mn><mml:mo>&#x0002A;</mml:mo><mml:mfrac><mml:mrow><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>o</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>C</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mi>C</mml:mi></mml:msub></mml:mrow></mml:mfrac><mml:mo>+</mml:mo><mml:mn>0.5</mml:mn><mml:mo>&#x0002A;</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>m</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow><mml:mi>m</mml:mi></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>X</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>s</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>C</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Channel selection problem.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0003.tif"/>
</fig>
<table-wrap position="float" id="T11">
<label>Algorithm 1</label>
<caption><p>The detailed procedure of obtaining the number of deleted channels.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0001.tif"/>
</table-wrap>
<p>However, as shown in <xref ref-type="table" rid="T11">Algorithm 1</xref>, a channel can only be deleted if it is unanimously agreed upon by all samples. Particularly, when <italic>s</italic> is set to a large value, meeting the deletion criteria for channels becomes even more challenging. This difficulty results in MOEAs encountering stagnation when optimizing <italic>f</italic><sub>1</sub> (number of deleted channels). To address this problem, a novel objective function <inline-formula><mml:math id="M12"><mml:msubsup><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> is introduced, which offers higher sensitivity in reflecting the deletion status of channels. As expressed in <xref ref-type="disp-formula" rid="E6">Equation (6)</xref>, <inline-formula><mml:math id="M13"><mml:mfrac><mml:mrow><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>o</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></inline-formula> signifies the ratio of zero elements in <italic>C</italic>. Specifically, <italic>zero</italic>(<italic>C</italic>) represents the count of zero elements, and <italic>N</italic><sub><italic>C</italic></sub> is the total number of elements in <italic>C</italic>. Let <italic>m</italic><sub><italic>d</italic></sub> denote the number of deleted channels and <italic>m</italic> denote the total count of channels. Then, <inline-formula><mml:math id="M14"><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:math></inline-formula> represents the proportion of deleted channels to the total channels. In this case, the multi-objective problem can be formulated as shown in <xref ref-type="disp-formula" rid="E7">Equation 7</xref>. In <xref ref-type="disp-formula" rid="E7">Equation 7</xref>, the number of deleted channels (<italic>f</italic><sub>1</sub>) from the original <xref ref-type="disp-formula" rid="E5">Equation (5)</xref> is transformed into <inline-formula><mml:math id="M15"><mml:msubsup><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula>, which represents the weighted average sum of the proportion of zero elements in <italic>C</italic> and the proportion of deleted channels to the total. After this transformation, the first objective function in the two-objective optimization model has shifted from a discrete integer search space to a continuous real number search space, which reduces the risk of the algorithm falling into a locally optimal solution. Therefore, compared to the two-objective problem model in <xref ref-type="disp-formula" rid="E5">Equation (5)</xref>, the model in <xref ref-type="disp-formula" rid="E7">Equation (7)</xref> is more sensitive to the deletion status of channels, rendering it less susceptible to stagnation. Hence, this paper introduces a two-stage framework, as illustrated in <xref ref-type="fig" rid="F4">Figure 4</xref>, employing different two-objective problem models in the early and late stages of the proposed algorithm.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Illustration of two-stage framework.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0004.tif"/>
</fig></sec><sec>
<title>3.2 Framework of TS-MOEA</title>
<p>In this paper, a two-stage sparse multi-objective evolutionary algorithm, named TS-MOEA, is introduced to address channel selection problems in BCIs. As illustrated in <xref ref-type="fig" rid="F5">Figure 5</xref>, TS-MOEA adopts a two-stage framework comprising the early and late stages, each dedicated to distinct optimization problem models. It also can be observed from <xref ref-type="fig" rid="F5">Figure 5</xref> that the early and late stages share most operators. Specifically, in addition to the sparse initialization operator, the only difference between the two stages is the mutation of <italic>Dec</italic> variables. Furthermore, due to the sparsity of the correlation matrix, TS-MOEA adopted a hybrid representation of decision variables, which contains <italic>Dec</italic> variables (real numbers) and <italic>Mask</italic> variables (binary numbers). <xref ref-type="table" rid="T12">Algorithm 2</xref> gives the detailed procedure of TS-MOEA.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Framework of TS-MOEA.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0005.tif"/>
</fig>
<table-wrap position="float" id="T12">
<label>Algorithm 2</label>
<caption><p>Procedure of TS-MOEA.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0002.tif"/>
</table-wrap>
 <p>In TS-MOEA, the output population obtained in the first stage becomes the input population for the late stage. To ensure population diversity in the late stage, TS-MOEA adopts a transformation condition between the two stages, which takes into consideration both the total of consumed function evaluations (<italic>FE</italic>) and (<italic>NDC</italic>), as shown in Line 4 of <xref ref-type="table" rid="T12">Algorithm 2</xref>. <italic>NDC</italic> represents the number of different <italic>f</italic><sub>1</sub> values obtained by <italic>POP</italic>. To ensure population diversity in the late stage, TS-MOEA adopts a transformation condition between the two stages, which takes into consideration both <italic>FE</italic> and <italic>NDC</italic>, as shown in Line 4 of <xref ref-type="table" rid="T12">Algorithm 2</xref>. Where <italic>FE</italic> is the current number of function evaluations consumed by the algorithm and <italic>NDC</italic> represents the number of different <italic>f</italic><sub>1</sub> values obtained by <italic>POP</italic>. Since <italic>f</italic><sub>1</sub> is one of the objective functions optimized by TS-MOEA in the late stage, a larger <italic>NDC</italic> implies that the population exhibits better diversity in the late stage of TS-MOEA. If the number of deleted channels is equal to <italic>m</italic>, i.e., all channels are removed, this is nonsensical. Moreover, since TS-MOEA is designed based on the correlation matrix between channels, this implicitly presupposes that the number of retained channels is greater than or equal to 2. Therefore, the possible values for the number of deleted channels can be any integer within the range [0, <italic>m</italic>&#x02212;2]. When the <italic>NDC</italic> is <italic>m</italic>&#x02212;2, it indicates that the population generated in the first stage is sufficiently diverse to serve as the input population for the next stage. Moreover, if the number of the consumed function evaluations of the early stage exceeds the preset threshold, i.e., &#x003BC; &#x000D7; <italic>MaxFE</italic>, the algorithm can also transfer from the early stage to the late stage. In this case, &#x003BC; controls the transformation between the two stages, and its value has been investigated in detail in Section 4.2.</p>
<p>TS-MOEA introduces a sparse initialization operator to generate the initial population for channel selection problems (Line 1 in <xref ref-type="table" rid="T12">Algorithm 2</xref>). In the sparse initialization operator, each decision variable will be assigned a <italic>Score</italic> value, which is calculated according to the problem-domain knowledge. The detailed description of the sparse initialization operator is given in Section 3.3. Both the early and late stages in TS-MOEA adopt the binary tournament selection operator (Lavinas et al., <xref ref-type="bibr" rid="B18">2018</xref>) to obtain parent individuals (Line 6). The crossover and mutation for <italic>Dec</italic> and <italic>Mask</italic> variables utilize different strategies. Specifically, the simulated binary crossover operator (Deb and Beyer, <xref ref-type="bibr" rid="B6">2001</xref>; Zhassuzak et al., <xref ref-type="bibr" rid="B52">2024</xref>) is adopted for <italic>Dec</italic> variables (Line 8), while the <italic>Score</italic>-based crossover operator, which is inspired by SparseEA2, is utilized for <italic>Mask</italic> variables (Lines 10&#x02013;15). The mutation for <italic>Mask</italic> variables is implemented by the <italic>Score</italic>-based mutation operator as shown in Lines 16&#x02013;22. To balance convergence and population diversity, a <italic>Score</italic>-based mutation and the conventional polynomial mutation operators are utilized For <italic>Dec</italic> variables in the early and late stages of TS-MOEA, respectively (Lines 24, 34). The description of the proposed <italic>Score</italic>-based mutation operator has been given in Section 3.4. TS-MOEA utilizes the sequential grouping strategy (Zille et al., <xref ref-type="bibr" rid="B54">2016</xref>) to divide decision variables into groups. Specifically, if it is required to split <italic>d</italic> decision variables into <italic>k</italic> groups, then the first [<italic>d</italic>/<italic>k</italic>] decision variables will be classified into the first group, the next [<italic>d</italic>/<italic>k</italic>] decision variables will be classified into the second group, and so on. Here, [<italic>d</italic>/<italic>k</italic>] denotes the integer closest to <italic>d</italic>/<italic>k</italic>. Since the binary tournament selection, simulated binary crossover, and polynomial mutation operators are widely adopted in various MOEAs, their details will not be presented here to save space.</p></sec>
<sec>
<title>3.3 Sparse initialization operator</title>
<p>In the proposed TS-MOEA, the threshold matrix <italic>x</italic> is the optimization target, as shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. <italic>x</italic> is employed to filter the correlation matrix of samples. Since the correlation matrix is symmetric, <italic>x</italic> will be rearranged as a decision vector. For instance, in this paper, the correlation matrix is 62 &#x000D7; 62 due to the utilization of 62 channels. Therefore, the size of the decision vector will be 1 &#x000D7; 1, 891, as shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. Inspired by SparseEA2, this paper adopted a hybrid representation of decision variables, which contains real variables (<italic>Dec</italic> vector) and binary variables (<italic>Mask</italic> vector). As illustrated in <xref ref-type="fig" rid="F7">Figure 7</xref>, both the <italic>Dec</italic> vector and the <italic>Mask</italic> vector share the same size as the decision vector. The actual decision vector is obtained by multiplying corresponding elements from the <italic>Dec</italic> and <italic>Mask</italic> vectors.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Illustration of decision vector.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0006.tif"/>
</fig><fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Hybrid representation of decision vector.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0007.tif"/>
</fig><p><xref ref-type="table" rid="T13">Algorithm 3</xref> provides a detailed procedure of the proposed sparse initialization operator. In this operator, the first step is to calculate the <italic>Score</italic> value of each variable in the decision vector (Lines 2&#x02013;6). The <italic>Score</italic> values will later be used to determine whether elements in the <italic>Mask</italic> vector should be set to 0. Research has revealed that the relationship between brain regions relates to their location and length from each other (van den Broek et al., <xref ref-type="bibr" rid="B45">1998</xref>; Reznik and Allen, <xref ref-type="bibr" rid="B33">2018</xref>). Therefore, the calculation of <italic>Score</italic> values in this paper is based on domain-specific knowledge, which includes the location of channels and the distances between channels.</p>
<table-wrap position="float" id="T13">
<label>Algorithm 3</label>
<caption><p>Sparse initialization operator.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0003.tif"/>
</table-wrap>
<p>The calculation of <italic>Score</italic> values of the decision variables are given in <xref ref-type="disp-formula" rid="E8">Equations (8)</xref>, (<xref ref-type="disp-formula" rid="E9">9</xref>). As presented in <xref ref-type="disp-formula" rid="E8">Equation (8)</xref>, <italic>G</italic><sub><italic>Channe</italic><sub><italic>l</italic></sub><sub><italic>k</italic></sub></sub> represents the position of channel <italic>k</italic>, which can be acquired according to the international 10&#x02013;20 standard. ||<italic>G</italic><sub><italic>Channe</italic><sub><italic>l</italic></sub><sub><italic>k</italic></sub></sub>&#x02212;<italic>G</italic><sub><italic>Channe</italic><sub><italic>l</italic></sub><sub><italic>l</italic></sub></sub>||<sub>2</sub> signifies the Euclidean distance between <italic>G</italic><sub><italic>Channe</italic><sub><italic>l</italic></sub><sub><italic>k</italic></sub></sub> and <italic>G</italic><sub><italic>Channe</italic><sub><italic>l</italic></sub><sub><italic>l</italic></sub></sub>. In <xref ref-type="disp-formula" rid="E9">Equation (9)</xref>, <italic>Max</italic>(<italic>DM</italic>) and <italic>Min</italic>(<italic>DM</italic>) denote the maximum and minimum distances between channels. <italic>Location</italic><sub><italic>k</italic></sub> &#x0003D; 1 denotes that <italic>Channel</italic><sub><italic>k</italic></sub> is located in the left hemisphere, while <italic>Location</italic><sub><italic>k</italic></sub> = &#x02013;1 indicates that <italic>Channel</italic><sub><italic>k</italic></sub> is located in the right hemisphere, respectively. If <italic>Channel</italic><sub><italic>k</italic></sub> is positioned in the inter-hemispheric junction area of the brain, as illustrated by the dotted circles in <xref ref-type="fig" rid="F2">Figure 2</xref>, then <italic>Location</italic><sub><italic>k</italic></sub> will be set to 0. As indicated in <xref ref-type="disp-formula" rid="E9">Equation (9)</xref>, two channels located in different cerebral hemispheres have higher <italic>Score</italic> values compared to channels situated in the same hemisphere. Additionally, channels that are farther apart have higher <italic>Score</italic> values. In <xref ref-type="disp-formula" rid="E9">Equation (9)</xref>, <italic>R</italic> is the preset channel radius, whose value has been investigated in Section 4.3. For <italic>Channel</italic><sub><italic>k</italic></sub> and <italic>Channel</italic><sub><italic>l</italic></sub>, the larger the <italic>Score</italic> value, the easier it is for the corresponding element in <italic>Mask</italic> to be 0 (Lines 13&#x02013;17) and the easier it is for the correlation coefficient of <italic>Channel</italic><sub><italic>k</italic></sub> and <italic>Channel</italic><sub><italic>l</italic></sub> to be 0 after filtering.</p>
<disp-formula id="E8"><label>(8)</label><mml:math id="M16"><mml:mrow><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>l</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:msub><mml:mo>&#x02212;</mml:mo><mml:msub><mml:mi>G</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></disp-formula><disp-formula id="E9"><label>(9)</label><mml:math id="M17"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable columnalign='left'><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:mfrac><mml:mrow><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x02212;</mml:mo><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo stretchy='false'>(</mml:mo><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:mi>R</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mtd><mml:mtd columnalign='left'><mml:mrow><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:mtd></mml:mtr><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:mfrac><mml:mrow><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo stretchy='false'>(</mml:mo><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:mi>R</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mtd><mml:mtd columnalign='left'><mml:mrow><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo>&#x02260;</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>l</mml:mi></mml:msub><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x0003C;</mml:mo><mml:mi>R</mml:mi></mml:mrow></mml:mtd></mml:mtr><mml:mtr columnalign='left'><mml:mtd columnalign='left'><mml:mrow><mml:mfrac><mml:mrow><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo stretchy='false'>(</mml:mo><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:mi>R</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mtd><mml:mtd columnalign='left'><mml:mrow><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:mo>&#x02260;</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mi>l</mml:mi></mml:msub><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>D</mml:mi><mml:mi>M</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x02265;</mml:mo><mml:mi>R</mml:mi></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
</sec>
<sec>
<title>3.4 <italic>Score</italic>-based mutation operator</title>
<p>As explained in Section 3.2, the output population from the early stage in TS-MOEA serves as the input population for the late stage. Hence, the quality of the population acquired in the early stage significantly impacts the ultimate performance of the proposed TS-MOEA. To effectively leverage the problem-domain knowledge to steer the search process of TS-MOEA, a <italic>Score</italic>-based mutation operator, which utilizes the <italic>Score</italic> values of decision variables, is introduced for <italic>Dec</italic> vectors in the early stage of TS-MOEA. <xref ref-type="table" rid="T14">Algorithm 4</xref> presents the detailed procedure of the <italic>Score</italic>-based mutation operator.</p>
<table-wrap position="float" id="T14">
<label>Algorithm 4</label>
<caption><p>Score-based mutation operator.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0004.tif"/>
</table-wrap>
<p>For <italic>o</italic>.<italic>Dec</italic> (the <italic>Dec</italic> vector of individual <italic>o</italic>), if <italic>sc</italic><sub><italic>j</italic></sub> is large, i.e., the corresponding two channels are close to each other, then <italic>o</italic>.<italic>Dec</italic><sub><italic>j</italic></sub> will have a greater probability to be a large value after mutation (Lines 4, 8 in <xref ref-type="table" rid="T14">Algorithm 4</xref>). In this case, the two channels that are related to <italic>Dec</italic><sub><italic>j</italic></sub> tend to be regarded as uncorrelated after filtering (Line 5 in <xref ref-type="table" rid="T14">Algorithm 4</xref>). In <xref ref-type="table" rid="T14">Algorithm 4</xref>, &#x003B1; controls the magnitude of mutation and is set to 0.1 empirically.</p>
</sec></sec>
<sec sec-type="results" id="s4">
<title>4 Results</title>
<sec>
<title>4.1 EEG data and parameter settings</title>
<p>The unprocessed EEG signals were gathered from a group of 9 participants aged between 21 and 30 during a fatigue detection task. Throughout the data collection phase, participants underwent a wake-sleep-wake cycle post-lunch, a period when many people typically experience fatigue symptoms. For experiment integrity, all volunteers were required to wake up before 8:30 a.m. and abstain from alcohol and drugs. During the fatigue detection task, participants lay on a bed with closed eyes, responding to auditory cues via their headsets by promptly opening their eyes. Volunteers were considered alert if they responded within 2 seconds; otherwise, they were classified as fatigued. Further details regarding the processing of the acquired EEG signals are elaborated in Section 2.1.</p>
<p>This paper utilizes the Hypervolume (HV) metric, as described by While et al. (<xref ref-type="bibr" rid="B47">2006</xref>). To evaluate the efficiency of the proposed algorithm. HV measures an algorithm&#x00027;s convergence and diversity by calculating the hypercube&#x00027;s volume, which is formed by the non-dominated solutions obtained by the evaluated algorithm and a predetermined reference point. The reference point is typically chosen to be worse than the function values of each solution in the current evaluated solution set. Therefore, a greater HV value signifies better performance of the assessed algorithm. Since the optimization problem in this paper is a two-objective problem that requires maximization, the reference point <italic>z</italic> is generated according to the following <xref ref-type="disp-formula" rid="E10">Equation (10)</xref>. Where <italic>P</italic> represents the Pareto-optimal-set obtained by the evaluated algorithm.</p>
<disp-formula id="E10"><label>(10)</label><mml:math id="M18"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mi>z</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mi>min</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mi>P</mml:mi></mml:mrow></mml:munder><mml:msub><mml:mi>f</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x02212;</mml:mo><mml:mn>0.1</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mi>min</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mi>P</mml:mi></mml:mrow></mml:munder><mml:msub><mml:mi>f</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x02212;</mml:mo><mml:mn>0.1</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>As depicted in <xref ref-type="fig" rid="F6">Figure 6</xref>, when conducting channel selection for a BCI system comprising 62 channels, the number of the decision variables is 1,891. Consequently, the channel selection problem addressed in this paper qualifies as a large-scale MOP. Moreover, owing to the sparsity of the correlation matrix of channels, the channel selection problem discussed in this paper also falls within the category of sparse large-scale MOPs. To assess the effectiveness of the proposed algorithm, TS-MOEA is compared with several advanced large-scale MOEAs, containing SpaseEA2 (Zhang et al., <xref ref-type="bibr" rid="B51">2021</xref>), SLMEA (Tian et al., <xref ref-type="bibr" rid="B40">2023</xref>), S-ECSO (Wang et al., <xref ref-type="bibr" rid="B46">2022</xref>), CMMO (Ming et al., <xref ref-type="bibr" rid="B27">2023</xref>), and S-NSGA-II (Kropp et al., <xref ref-type="bibr" rid="B17">2023</xref>). Among these comparison algorithms, SparseEA2 is an effective sparse multi-objective optimization algorithm, whereas S-NSGA-II and S-ECSO are specialized for large-scale multi-objective optimization tasks. CMMO, a newly introduced algorithm, excels in finding an optimal equilibrium between diversity and convergence for multi-objective optimization problems. SLMEA is specialized for super-large-scale sparse multi-objective problems. For fair comparisons, all algorithms adopt the maximum number of function evaluations (<italic>MaxFE</italic>) of 20000 and the population size (<italic>N</italic>) of 200. The detailed settings of algorithms are given in <xref ref-type="table" rid="T1">Table 1</xref>, and <italic>D</italic> is the number of decision variables.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Algorithm configuration parameters.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Algorithm</bold></th>
<th valign="top" align="left"><bold>Parameter setting</bold></th>
<th valign="top" align="left"><bold>References</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">SparseEA2</td>
<td valign="top" align="left">The crossover probability is 1, while the mutation probability is 1/<italic>D</italic>;</td>
<td valign="top" align="left">Zhang et al., <xref ref-type="bibr" rid="B51">2021</xref></td>
</tr>
 <tr>
<td valign="top" align="left" rowspan="2"></td>
<td valign="top" align="left">Both crossover and mutation have a distribution index of 20.</td>
<td/>
</tr> <tr>
<td valign="top" align="left">SLMEA</td>
<td valign="top" align="left">The crossover probability is 1, while the mutation probability is 1/<italic>D</italic>;</td>
<td valign="top" align="left">Tian et al., <xref ref-type="bibr" rid="B40">2023</xref></td>
</tr>
 <tr>
<td valign="top" align="left" rowspan="2"></td>
<td valign="top" align="left">Both crossover and mutation have a distribution index of 20.</td>
<td/>
</tr> <tr>
<td valign="top" align="left">S-ECSO</td>
<td valign="top" align="left">Inertia weight <italic>w</italic> is 0.7968; &#x003BC;<sub>initial</sub> is set to 0.35;</td>
<td valign="top" align="left">Wang et al., <xref ref-type="bibr" rid="B46">2022</xref></td>
</tr>
 <tr>
<td valign="top" align="left" rowspan="2"></td>
<td valign="top" align="left">The learning factors <italic>C</italic><sub>1</sub> and <italic>C</italic><sub>2</sub> are both assigned the value of 1.4962.</td>
<td/>
</tr> <tr>
<td valign="top" align="left">CMMO</td>
<td valign="top" align="left">The crossover probability is 1, while the mutation probability is 1/<italic>D</italic>;</td>
<td valign="top" align="left">Ming et al., <xref ref-type="bibr" rid="B27">2023</xref></td>
</tr>
 <tr>
<td valign="top" align="left" rowspan="2"></td>
<td valign="top" align="left">Both crossover and mutation have a distribution index of 20; &#x003BC;, &#x003C4; and &#x003B8; are set to 0.1.</td>
<td/>
</tr> <tr>
<td valign="top" align="left">S-NSGA-II</td>
<td valign="top" align="left">The crossover probability is 1, while the mutation probability is 1/<italic>D</italic>;</td>
<td valign="top" align="left">Kropp et al., <xref ref-type="bibr" rid="B17">2023</xref></td>
</tr>
 <tr>
<td valign="top" align="left" rowspan="2"></td>
<td valign="top" align="left">Both crossover and mutation have a distribution index of 20.</td>
<td/>
</tr> <tr>
<td valign="top" align="left" rowspan="1"></td>
<td valign="top" align="left">The crossover probability is 1, while the mutation probability is 1/<italic>D</italic>;</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">TS-MOEA</td>
<td valign="top" align="left">Both crossover and mutation have a distribution index of 20.</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left" rowspan="1"></td>
<td valign="top" align="left"><italic>R</italic>, &#x003BC; and <italic>s</italic> are set to 0.1, 0.2 and 40, respectively.</td>
<td/>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec>
<title>4.2 Statistical results and analysis</title>
<p><xref ref-type="table" rid="T2">Table 2</xref> provides the statistical results of all six algorithms over 30 independent runs, measured in terms of HV. In this table, the best average HV values are highlighted in bold. Symbols &#x0201C;&#x0002B;,&#x0201D; &#x0201C;&#x02013;,&#x0201D; and &#x0201C;&#x02248;&#x0201D; indicate that, according to the Wilcoxon rank-sum test (Yaman et al., <xref ref-type="bibr" rid="B48">2021</xref>) at a 5% significance level, the performance of the compared algorithm is significantly better than, worse than, or similar to that of the proposed TS-MOEA, respectively.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Average HV values achieved by TS-MOEA and other comparative MOEAs.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Subject</bold></th>
<th valign="top" align="left"><bold>SparseEA2</bold></th>
<th valign="top" align="left"><bold>SLMEA</bold></th>
<th valign="top" align="left"><bold>S-ECSO</bold></th>
<th valign="top" align="left"><bold>CMMO</bold></th>
<th valign="top" align="left"><bold>S-NSGA-II</bold></th>
<th valign="top" align="left"><bold>TS-MOEA</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="left">2.79E&#x0002B;01 -</td>
<td valign="top" align="left">4.46E&#x0002B;00 -</td>
<td valign="top" align="left">1.36E&#x0002B;01 -</td>
<td valign="top" align="left">8.86E&#x0002B;00 -</td>
<td valign="top" align="left">5.13E&#x0002B;00 -</td>
<td valign="top" align="left"><bold>6.16E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">2.42E&#x0002B;01 -</td>
<td valign="top" align="left">1.25E&#x0002B;01 -</td>
<td valign="top" align="left">1.81E&#x0002B;01 -</td>
<td valign="top" align="left">2.15E&#x0002B;01 -</td>
<td valign="top" align="left">1.16E&#x0002B;02 -</td>
<td valign="top" align="left"><bold>6.63E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">3</td>
<td valign="top" align="left">4.51E&#x0002B;01 -</td>
<td valign="top" align="left">7.01E&#x0002B;00 -</td>
<td valign="top" align="left">1.99E&#x0002B;01-</td>
<td valign="top" align="left">1.26E&#x0002B;01 -</td>
<td valign="top" align="left">1.17E&#x0002B;02 -</td>
<td valign="top" align="left"><bold>7.41E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">4</td>
<td valign="top" align="left">2.11E&#x0002B;01 -</td>
<td valign="top" align="left">4.63E - 01 -</td>
<td valign="top" align="left">9.35E&#x0002B;00 -</td>
<td valign="top" align="left">6.58E&#x0002B;01 -</td>
<td valign="top" align="left">1.04E&#x0002B;02 -</td>
<td valign="top" align="left"><bold>4.29E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">5</td>
<td valign="top" align="left">2.93E&#x0002B;01 -</td>
<td valign="top" align="left">3.02E&#x0002B;01 -</td>
<td valign="top" align="left">2.42E&#x0002B;01 -</td>
<td valign="top" align="left">2.03E&#x0002B;01 -</td>
<td valign="top" align="left">3.13E&#x0002B;01 -</td>
<td valign="top" align="left"><bold>7.33E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">6</td>
<td valign="top" align="left">2.21E&#x0002B;01 -</td>
<td valign="top" align="left">1.30E&#x0002B;01 -</td>
<td valign="top" align="left">9.70E&#x0002B;00 -</td>
<td valign="top" align="left">7.36E&#x0002B;00 -</td>
<td valign="top" align="left">1.32E&#x0002B;01 -</td>
<td valign="top" align="left"><bold>6.28E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">7</td>
<td valign="top" align="left">2.02E&#x0002B;01 -</td>
<td valign="top" align="left">9.73E&#x0002B;00 -</td>
<td valign="top" align="left">1.13E&#x0002B;01 -</td>
<td valign="top" align="left">6.21E&#x0002B;00 -</td>
<td valign="top" align="left">1.68E&#x0002B;01 -</td>
<td valign="top" align="left"><bold>7.27E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">8</td>
<td valign="top" align="left">2.73E - 01 -</td>
<td valign="top" align="left">9.99E&#x0002B;00 -</td>
<td valign="top" align="left">5.20E&#x0002B;01 -</td>
<td valign="top" align="left">1.44E&#x0002B;00 -</td>
<td valign="top" align="left">1.13E&#x0002B;02 -</td>
<td valign="top" align="left"><bold>3.81E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">9</td>
<td valign="top" align="left">2.08E&#x0002B;01 -</td>
<td valign="top" align="left">1.28E&#x0002B;01 -</td>
<td valign="top" align="left">1.13E&#x0002B;01 -</td>
<td valign="top" align="left">5.13E&#x0002B;00 -</td>
<td valign="top" align="left">1.06E&#x0002B;02 -</td>
<td valign="top" align="left"><bold>5.37E&#x0002B;02</bold></td>
</tr> <tr>
<td valign="top" align="left">&#x0002B;/-/&#x02248;</td>
<td valign="top" align="left">0/9/0</td>
<td valign="top" align="left">0/9/0</td>
<td valign="top" align="left">0/9/0</td>
<td valign="top" align="left">0/9/0</td>
<td valign="top" align="left">0/9/0</td>
<td/>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T2">Table 2</xref> presents the statistical HV values obtained by TS-MOEA and other MOEAs. The numbers in bold are the best results achieved by algorithms and bold numbers in other tables also indicate the best results. The primary distinction between TS-MOEA and the comparative algorithms lies in TS-MOEA&#x00027;s utilization of a two-stage framework. Within this framework, the early stage is focused on discovering a diverse and well-distributed population. This is achieved by employing a two-objective problem model that is highly sensitive to the deletion status of channels. The late stage in TS-MOEA directly uses the number of deleted channels as an optimization objective, thereby striking a balance between the number of deleted channels and task accuracy. Furthermore, domain-specific knowledge is utilized to guide the evolutionary process in TS-MOEA. It can be observed from <xref ref-type="table" rid="T2">Table 2</xref> that TS-MOEA outperforms other algorithms in terms of HV for all 9 subjects, which indicates the effectiveness of the proposed two-stage framework.</p>
<p>Among the five comparative algorithms, there are algorithms that are specifically designed for sparse large-scale optimization problems. However, these algorithms still perform worse than the proposed algorithm for the channel selection problem. This is mainly because none of these comparative algorithms utilize knowledge related to the problem domain. The statistical results indicate that incorporating domain-specific knowledge into algorithms can effectively enhance their performance when solving specific problems. <xref ref-type="fig" rid="F8">Figure 8</xref> displays the Pareto fronts generated by all algorithms for Subject 2. In <xref ref-type="fig" rid="F8">Figure 8</xref>, it is evident that compared to other algorithms, TS-MOEA obtains the best Pareto front, which also verifies the efficiency of the proposed algorithm.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>Pareto frontiers generated by all algorithms for Subject 2. <bold>(A&#x02013;F)</bold> give the Pareto fronts generated by SparseEA2, SLMEA, S-ECSO, CMMO, S-NSGA-II, and TS-MOEA, respectively.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0008.tif"/>
</fig>
<p><xref ref-type="table" rid="T3">Table 3</xref> gives the average classification accuracies achieved by SVM using all channels and partial channels selected by TS-MOEA for all subjects. Since TS-MOEA provides a set of Pareto optimal solutions, which includes a variety of different electrode selection schemes. To save space, <xref ref-type="table" rid="T3">Table 3</xref> displays several representative channel selection schemes along with their corresponding classification accuracies. As can be seen from <xref ref-type="table" rid="T3">Table 3</xref>, the classification accuracy decreases as the number of selected channels decreases. However, in some cases where only a subset of channels is chosen (such as selecting 60 or 52 channels), the classification accuracy is either better than or slightly lower than when all channels are selected. This indicates that the proposed TS-MOEA can effectively reduce the number of channels used in BCIs while maintaining acceptable classification accuracy.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Average classification accuracies achieved by SVM using all channels and partial channels selected by TS-MOEA.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left" colspan="2"><bold>SVM using channels selected by TS-MOEA</bold></th>
<th valign="top" align="left"><bold>SVM using all channels</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td valign="top" align="left"><bold>Number of selected channels</bold></td>
<td valign="top" align="left"><bold>Accuracy rate</bold></td>
<td/>
</tr> <tr>
<td valign="top" align="left">60</td>
<td valign="top" align="left">98.62%</td>
<td valign="top" align="left">98.53%</td>
</tr>
 <tr>
<td valign="top" align="left">52</td>
<td valign="top" align="left">95.18%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">42</td>
<td valign="top" align="left">88.02%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">80.55%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">70.27%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">57.09%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">53.23%</td>
<td/>
</tr></tbody>
</table>
</table-wrap>
<p>For further comparison of the proposed TS-MOEA with other state-of-the-art MOEAs, including SparseEA2, SLMEA, S-ECSO, CMMO, and S-NSGA-II, <xref ref-type="table" rid="T4">Table 4</xref> presents the average classification accuracies achieved for all subjects based on the varying number of channels selected by the algorithm. It can be observed from <xref ref-type="table" rid="T4">Table 4</xref>, SLMEA, S-ECSO, CMMO fail to provide classification accuracies in most cases. This is due to the poor diversity of these three algorithms (which can also be observed in <xref ref-type="fig" rid="F8">Figures 8B</xref>&#x02013;<xref ref-type="fig" rid="F8">D</xref>), which results in their inability to obtain the Pareto-optimal solutions for the corresponding number of selected channels. SparseEA2 and S-NSGA-II are capable of obtaining well-distributed Pareto optimal solution sets, similar to the proposed TS-MOEA. However, in terms of classification accuracy, TS-MOEA achieves the best results. Therefore, the statistical results in <xref ref-type="table" rid="T4">Table 4</xref> validate that the proposed algorithm indeed strikes a good balance between classification accuracy and the number of selected channels compared to the algorithms under comparison.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Average classification accuracies obtained by all algorithms.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Number of selected channels</bold></th>
<th valign="top" align="left"><bold>SparseEA2</bold></th>
<th valign="top" align="left"><bold>SLMEA</bold></th>
<th valign="top" align="left"><bold>S-ECSO</bold></th>
<th valign="top" align="left"><bold>CMMO</bold></th>
<th valign="top" align="left"><bold>S-NSGA-II</bold></th>
<th valign="top" align="left"><bold>TS-MOEA</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">60</td>
<td valign="top" align="left">91.50%</td>
<td valign="top" align="left">97.48%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">60.36%</td>
<td valign="top" align="left"><bold>98.62%</bold></td>
</tr> <tr>
<td valign="top" align="left">52</td>
<td valign="top" align="left">76.78%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">58.22%</td>
<td valign="top" align="left"><bold>95.18%</bold></td>
</tr> <tr>
<td valign="top" align="left">42</td>
<td valign="top" align="left">60.37%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">56.47%</td>
<td valign="top" align="left"><bold>88.02%</bold></td>
</tr> <tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">56.92%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">56.47%</td>
<td valign="top" align="left"><bold>80.55%</bold></td>
</tr> <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">56.47%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">52.19%</td>
<td valign="top" align="left"><bold>70.27%</bold></td>
</tr> <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">55.40%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">51.43%</td>
<td valign="top" align="left"><bold>57.09%</bold></td>
</tr> <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">45.98%</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">-</td>
<td valign="top" align="left">51.43%</td>
<td valign="top" align="left"><bold>53.23%</bold></td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F9">Figure 9</xref> demonstrates the convergence of six comparative algorithms on 9 subjects. It can be observed from <xref ref-type="fig" rid="F9">Figure 9</xref> that TS-MOEA exhibits faster convergence compared to the other algorithms. This is mainly because the problem model adopted in the early stages of TS-MOEA can effectively prevents stagnation in the search process. Additionally, the sparse initialization and <italic>Score</italic>-based mutation operators can also accelerate the convergence speed of TS-MOEA.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Convergence of 6 algorithm on 9 subjects. <bold>(A&#x02013;I)</bold> demonstrate the convergence of all algorithms for Subject 1&#x02013;Subject 9, respectively.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0009.tif"/>
</fig>
<p>In summary, there are two reasons for the superior performance of the proposed algorithm. First, TS-MOEA adopts a two-stage framework with different optimization model for each stage, which maintains population diversity and avoids premature maturity of the algorithm. It can be observed from <xref ref-type="fig" rid="F8">Figure 8</xref> that TS-MOEA has obtained a Pareto front with a better distribution. Second, in TS-MOEA, operators related to the problem domain are used to improve the effectiveness of the searching process. Specifically, when assigning scores to each decision variable, the position of the channels and the distance between them are considered. As shown in <xref ref-type="fig" rid="F9">Figure 9</xref>, TS-MOEA demonstrates the best convergence, which also indicates the effectiveness of the domain-related operators used in TS-MOEA.</p>
<p><xref ref-type="fig" rid="F10">Figure 10</xref> presents the average execution time of all algorithms tested on Subject 1. It can be found from <xref ref-type="fig" rid="F10">Figure 10</xref> that S-NSGA-II has the least running time, followed by E-ECSO and the proposed TS-MOEA. SparseEA2 has the highest average running time. This is because the strategy used in SparseEA2 to obtain the <italic>Score</italic> values of decision variables incurs a increased computational cost, especially when there are a significant amount of decision variables. In S-NSGA-II, operators designed for efficient handling of large-scale sparse multi-objective optimization problems are introduced. These operators enable S-NSGA-II to achieve high efficiency when dealing with channel selection problems with a significant amount of decision variables. For S-ECSO, the algorithm employs a three-party competition mechanism to guide its evolutionary process. Compared to commonly used genetic operators such as crossover and mutation, the three-party competition mechanism is simpler and consumes less computational cost.</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p>Average running times for all algorithms on Subject 1.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0010.tif"/>
</fig></sec>
<sec>
<title>4.3 Statistical results and analysis on the DEAP dataset</title>
<p>The DEAP (Koelstra et al., <xref ref-type="bibr" rid="B15">2011</xref>) dataset was collected from a group of 32 participants, specifically for human emotion recognition. The dataset consisted of 32 channels of EEG signals and 8 channels of peripheral physiological signals (PPS.) The signals were sampled at a rate of 512 Hz. During the data collection process, the participants watched 40 1-min music videos while their physiological signals were recorded. The data set for each trial consisted of a 3-second pre-trial time and a 60-second video viewing trial time. At the end of the trial, participants self-assessed themselves based on arousal, sense of worthiness, dominance, and likability, using discrete 9-point scales for each dimension. In this section, the arousal dimension from the DEAP dataset has been utilized as a categorical label for binary classification to validate the effectiveness of the proposed algorithm.</p>
<p><xref ref-type="table" rid="T5">Table 5</xref> demonstrates the average classification accuracy of the proposed algorithm on DEAP. As can be seen from <xref ref-type="table" rid="T5">Table 5</xref>, when using 30, 22, 17, and 12 channels, the average classification accuracy is close to the classification accuracy using all channels, which further demonstrates the effectiveness of the proposed algorithm. At the same time, the results also show that channel selection is not simply a matter of reducing the number of channels to improve the classification performance, and that it is necessary to find the optimal combination of channels.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Average classification accuracies on DEAP achieved by SVM using all channels and partial channels selected by TS-MOEA.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left" colspan="2"><bold>SVM using channels selected by TS-MOEA</bold></th>
<th valign="top" align="left"><bold>SVM using all channels</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td valign="top" align="left"><bold>Number of selected channels</bold></td>
<td valign="top" align="left"><bold>Accuracy rate</bold></td>
<td/>
</tr> <tr>
<td valign="top" align="left">30</td>
<td valign="top" align="left">72.08%</td>
<td valign="top" align="left">73.18%</td>
</tr>
 <tr>
<td valign="top" align="left">27</td>
<td valign="top" align="left">69.38%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">71.88%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">17</td>
<td valign="top" align="left">71.63%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">71.25%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">7</td>
<td valign="top" align="left">66.09%</td>
<td/>
</tr>
 <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">60.01%</td>
<td/>
</tr></tbody>
</table>
</table-wrap></sec></sec>
<sec sec-type="discussion" id="s5">
<title>5 Discussion</title>
<sec>
<title>5.1 Investigate of the zero assignment parameter <italic>s</italic></title>
<p>As shown in Line 10 of <xref ref-type="table" rid="T11">Algorithm 1</xref>, <italic>s</italic> is modulates the difficulty level in deleting channels. Specifically, for the <italic>k</italic><sup><italic>th</italic></sup> sample, if the correlation coefficients between the <italic>j</italic><sup><italic>th</italic></sup> channel and more than <italic>s</italic> other channels are 0 in the filtered correlation matrix, then the <italic>j</italic><sup><italic>th</italic></sup> channel can be deleted. In this case, Setting <italic>s</italic> to a large value makes it challenging to meet the channel deletion criteria, potentially leading the algorithm into stagnation. Conversely, a small <italic>s</italic> might result in erroneous deletion of channels.</p>
<p>In this section, Subjects 1, 5, 6, 7, and 9 are chosen to analyze the impact of different <italic>s</italic> values on the effectiveness of the proposed TS-MOEA. <italic>s</italic> takes values within the range of [35, 55] with a step size of 5. The average HV values across 30 independent runs for different <italic>s</italic> on the selected five subjects can be found in <xref ref-type="table" rid="T6">Table 6</xref>. It can be observed that <italic>s</italic> &#x0003D; 40 achieves the highest average HV values. Taking the number of selected channels as 2, 12, 22, 32, 42, 52, and 60 as examples, <xref ref-type="table" rid="T7">Table 7</xref> provides the average classification accuracies of TS-MOEA for different values of s. As shown in <xref ref-type="table" rid="T7">Table 7</xref>, when <italic>s</italic> &#x0003D; 40, the algorithm achieves the optimal classification accuracy for most of the lead selection schemes. Hence, in this paper, <italic>s</italic> takes the value of 40.</p>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Average HV values for different <italic>s</italic>.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0005.tif"/></th>
<th valign="top" align="left"><bold>35</bold></th>
<th valign="top" align="left"><bold>40</bold></th>
<th valign="top" align="left"><bold>45</bold></th>
<th valign="top" align="left"><bold>50</bold></th>
<th valign="top" align="left"><bold>55</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Subject 1</td>
<td valign="top" align="left"><bold>6.62E&#x0002B;02</bold></td>
<td valign="top" align="left"><bold>6.62E&#x0002B;02</bold></td>
<td valign="top" align="left">5.25E&#x0002B;02</td>
<td valign="top" align="left">4.05E&#x0002B;02</td>
<td valign="top" align="left">6.39E&#x0002B;02</td>
</tr> <tr>
<td valign="top" align="left">Subject 5</td>
<td valign="top" align="left">6.02E&#x0002B;02</td>
<td valign="top" align="left"><bold>6.56E&#x0002B;02</bold></td>
<td valign="top" align="left">6.02E&#x0002B;02</td>
<td valign="top" align="left">3.37E&#x0002B;02</td>
<td valign="top" align="left">3.94E&#x0002B;02</td>
</tr> <tr>
<td valign="top" align="left">Subject 6</td>
<td valign="top" align="left">6.28E&#x0002B;02</td>
<td valign="top" align="left"><bold>6.95E&#x0002B;02</bold></td>
<td valign="top" align="left">6.26E&#x0002B;02</td>
<td valign="top" align="left">5.05E&#x0002B;02</td>
<td valign="top" align="left">4.86E&#x0002B;02</td>
</tr> <tr>
<td valign="top" align="left">Subject 7</td>
<td valign="top" align="left">5.70E&#x0002B;02</td>
<td valign="top" align="left"><bold>6.39E&#x0002B;02</bold></td>
<td valign="top" align="left">5.94E&#x0002B;02</td>
<td valign="top" align="left">4.61E&#x0002B;02</td>
<td valign="top" align="left">5.28E&#x0002B;02</td>
</tr> <tr>
<td valign="top" align="left">Subject 9</td>
<td valign="top" align="left">4.47E&#x0002B;02</td>
<td valign="top" align="left">4.45E&#x0002B;02</td>
<td valign="top" align="left"><bold>4.82E&#x0002B;02</bold></td>
<td valign="top" align="left">3.27E&#x0002B;02</td>
<td valign="top" align="left">3.74E&#x0002B;02</td>
</tr> <tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="left">5.82E&#x0002B;02</td>
<td valign="top" align="left"><bold>6.14E&#x0002B;02</bold></td>
<td valign="top" align="left">5.66E&#x0002B;02</td>
<td valign="top" align="left">4.07E&#x0002B;02</td>
<td valign="top" align="left">3.69E&#x0002B;02</td>
</tr></tbody>
</table>
</table-wrap><table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Average classification accuracies of different <italic>s</italic> for all subjects.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Number of selected channels</bold></th>
<th valign="top" align="left" colspan="5"><bold>s</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td/>
<td valign="top" align="left"><bold>35</bold></td>
<td valign="top" align="left"><bold>40</bold></td>
<td valign="top" align="left"><bold>45</bold></td>
<td valign="top" align="left"><bold>50</bold></td>
<td valign="top" align="left"><bold>55</bold></td>
</tr> <tr>
<td valign="top" align="left">60</td>
<td valign="top" align="left">98.40%</td>
<td valign="top" align="left"><bold>99.20%</bold></td>
<td valign="top" align="left">98.14%</td>
<td valign="top" align="left">98.78%</td>
<td valign="top" align="left">99.18%</td>
</tr> <tr>
<td valign="top" align="left">52</td>
<td valign="top" align="left">98.40%</td>
<td valign="top" align="left"><bold>98.50%</bold></td>
<td valign="top" align="left">97.53%</td>
<td valign="top" align="left">97.56%</td>
<td valign="top" align="left">97.32%</td>
</tr> <tr>
<td valign="top" align="left">42</td>
<td valign="top" align="left">96.70%</td>
<td valign="top" align="left">97.30%</td>
<td valign="top" align="left">95.61%</td>
<td valign="top" align="left">95.90%</td>
<td valign="top" align="left"><bold>97.87%</bold></td>
</tr> <tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">95.59%</td>
<td valign="top" align="left"><bold>95.73%</bold></td>
<td valign="top" align="left">95.12%</td>
<td valign="top" align="left">93.85%</td>
<td valign="top" align="left">95.67%</td>
</tr> <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">91.92%</td>
<td valign="top" align="left">88.63%</td>
<td valign="top" align="left">84.88%</td>
<td valign="top" align="left">90.18%</td>
<td valign="top" align="left"><bold>93.07%</bold></td>
</tr> <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">77.89%</td>
<td valign="top" align="left"><bold>79.13%</bold></td>
<td valign="top" align="left">77.00%</td>
<td valign="top" align="left">77.10%</td>
<td valign="top" align="left">78.46%</td>
</tr> <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">77.27%</td>
<td valign="top" align="left"><bold>78.13%</bold></td>
<td valign="top" align="left">77.00%</td>
<td valign="top" align="left">76.98%</td>
<td valign="top" align="left">77.23%</td>
</tr></tbody>
</table>
</table-wrap></sec><sec>
<title>5.2 Investigate of the transition control parameter &#x003BC;</title>
<p>It can be observed in <xref ref-type="table" rid="T12">Algorithm 2</xref> that the parameter &#x003BC; governs the transition from the early stage to the late stage in TS-MOEA. As described in Line 4 of <xref ref-type="table" rid="T12">Algorithm 2</xref>, the algorithm will shift from the early stage to the late stage if the number of function evaluations consumed by the early stage exceeds the predefined t, &#x003BC; &#x000D7; <italic>MaxFE</italic>, or if the obtained Pareto-optimal solutions satisfy the diversity requirement. Therefore, if &#x003BC; is set too large, the algorithm may exhaust a significant number of function evaluations in the early stage, potentially leading to unnecessary computational waste. Conversely, If &#x003BC; is too small, the early stage might fail to produce solutions with a good distribution. Consequently, the late stage may not achieve satisfactory optimization results, given that the solutions obtained in the early stage serve as initial solutions in the late stage.</p>
<p>In this section, the influence of different &#x003BC; on TS-MOEA&#x00027;s performance is investigated using five selected subjects: Subjects 1, 5, 6, 7, and 9. &#x003BC; is within the interval [0, 1] using an increment of 0.2. When &#x003BC; = 0, TS-MOEA exclusively executes the early stage, while &#x003BC; = 1 means that only the late stage is executed. <xref ref-type="table" rid="T8">Table 8</xref> provides the average HV values obtained from 30 independent runs for different &#x003BC; values across the selected five volunteers. Statistical results indicate that the performance of TS-MOEA that operates in only one stage (&#x003BC; = 0 and &#x003BC; = 1) is inferior to that of the algorithm utilizing both stages concurrently (&#x003BC; = 1/5, &#x003BC; = 2/5, &#x003BC; = 3/5, and &#x003BC; = 4/5). As shown in <xref ref-type="table" rid="T8">Table 8</xref>, &#x003BC; = 1/5 achieves the best results for 2 out of 5 subjects. However, &#x003BC; = 1/5 also obtains the optimal mean values across all selected subjects. <xref ref-type="table" rid="T9">Table 9</xref> gives the average classification accuracies of different &#x003BC; for all subjects and &#x003BC; = 1/5 achieves the best performance for most cases. Therefore, &#x003BC; is set to 1/5 in this paper.</p>
<table-wrap position="float" id="T8">
<label>Table 8</label>
<caption><p>Average HV values for different &#x003BC;.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-i0006.tif"/></th>
<th valign="top" align="left"><bold>0</bold></th>
<th valign="top" align="left"><bold>1/5</bold></th>
<th valign="top" align="left"><bold>2/5</bold></th>
<th valign="top" align="left"><bold>3/5</bold></th>
<th valign="top" align="left"><bold>4/5</bold></th>
<th valign="top" align="left"><bold>1</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Subject 1</td>
<td valign="top" align="left"><bold>7.85E&#x0002B;02</bold></td>
<td valign="top" align="left">6.21E&#x0002B;02</td>
<td valign="top" align="left">5.95E&#x0002B;02</td>
<td valign="top" align="left">6.34E&#x0002B;02</td>
<td valign="top" align="left">5.87E&#x0002B;02</td>
<td valign="top" align="left">2.12E&#x0002B;01</td>
</tr> <tr>
<td valign="top" align="left">Subject 5</td>
<td valign="top" align="left">1.61E&#x0002B;01</td>
<td valign="top" align="left"><bold>6.57E&#x0002B;02</bold></td>
<td valign="top" align="left">6.22E&#x0002B;02</td>
<td valign="top" align="left">6.40E&#x0002B;02</td>
<td valign="top" align="left">5.82E&#x0002B;02</td>
<td valign="top" align="left">1.51E&#x0002B;01</td>
</tr> <tr>
<td valign="top" align="left">Subject 6</td>
<td valign="top" align="left">1.64E&#x0002B;01</td>
<td valign="top" align="left">5.30E&#x0002B;02</td>
<td valign="top" align="left"><bold>5.69E&#x0002B;02</bold></td>
<td valign="top" align="left">5.39E&#x0002B;02</td>
<td valign="top" align="left">5.24E&#x0002B;02</td>
<td valign="top" align="left">1.88E&#x0002B;01</td>
</tr> <tr>
<td valign="top" align="left">Subject 7</td>
<td valign="top" align="left">3.70E&#x0002B;02</td>
<td valign="top" align="left">5.49E&#x0002B;02</td>
<td valign="top" align="left">5.60E&#x0002B;02</td>
<td valign="top" align="left"><bold>5.84E&#x0002B;02</bold></td>
<td valign="top" align="left">5.40E&#x0002B;02</td>
<td valign="top" align="left">1.69E&#x0002B;01</td>
</tr> <tr>
<td valign="top" align="left">Subject 9</td>
<td valign="top" align="left">1.22E&#x0002B;01</td>
<td valign="top" align="left"><bold>5.61E&#x0002B;02</bold></td>
<td valign="top" align="left">4.20E&#x0002B;02</td>
<td valign="top" align="left">4.77E&#x0002B;02</td>
<td valign="top" align="left">4.38E&#x0002B;02</td>
<td valign="top" align="left">1.18E&#x0002B;01</td>
</tr> <tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="left">2.40E&#x0002B;02</td>
<td valign="top" align="left"><bold>5.83E&#x0002B;02</bold></td>
<td valign="top" align="left">5.53E&#x0002B;02</td>
<td valign="top" align="left">5.75E&#x0002B;02</td>
<td valign="top" align="left">5.34E&#x0002B;02</td>
<td valign="top" align="left">1.68E&#x0002B;01</td>
</tr></tbody>
</table>
</table-wrap><table-wrap position="float" id="T9">
<label>Table 9</label>
<caption><p>Average classification accuracies of different &#x003BC; for all subjects.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Number of selected channels</bold></th>
<th valign="top" align="left" colspan="6">&#x003BC;</th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td/>
<td valign="top" align="left"><bold>0</bold></td>
<td valign="top" align="left"><bold>1/5</bold></td>
<td valign="top" align="left"><bold>2/5</bold></td>
<td valign="top" align="left"><bold>3/5</bold></td>
<td valign="top" align="left"><bold>4/5</bold></td>
<td valign="top" align="left"><bold>1</bold></td>
</tr> <tr>
<td valign="top" align="left">60</td>
<td valign="top" align="left">91.98%</td>
<td valign="top" align="left"><bold>92.75%</bold></td>
<td valign="top" align="left">91.66%</td>
<td valign="top" align="left">92.32%</td>
<td valign="top" align="left">90.98%</td>
<td valign="top" align="left">90.66%</td>
</tr> <tr>
<td valign="top" align="left">52</td>
<td valign="top" align="left">90.82%</td>
<td valign="top" align="left">90.47%</td>
<td valign="top" align="left">90.50%</td>
<td valign="top" align="left"><bold>90.86%</bold></td>
<td valign="top" align="left">89.20%</td>
<td valign="top" align="left">90.50%</td>
</tr> <tr>
<td valign="top" align="left">42</td>
<td valign="top" align="left"><bold>89.28%</bold></td>
<td valign="top" align="left">89.14%</td>
<td valign="top" align="left">88.25%</td>
<td valign="top" align="left">88.92%</td>
<td valign="top" align="left">87.97%</td>
<td valign="top" align="left">88.25%</td>
</tr> <tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">86.57%</td>
<td valign="top" align="left"><bold>88.57%</bold></td>
<td valign="top" align="left">87.62%</td>
<td valign="top" align="left">86.94%</td>
<td valign="top" align="left">86.05%</td>
<td valign="top" align="left">87.42%</td>
</tr> <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">85.46%</td>
<td valign="top" align="left"><bold>86.14%</bold></td>
<td valign="top" align="left">85.55%</td>
<td valign="top" align="left">86.57%</td>
<td valign="top" align="left">85.42%</td>
<td valign="top" align="left">85.55%</td>
</tr> <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">83.23%</td>
<td valign="top" align="left"><bold>84.36%</bold></td>
<td valign="top" align="left">83.47%</td>
<td valign="top" align="left">83.71%</td>
<td valign="top" align="left">84.02%</td>
<td valign="top" align="left">83.47%</td>
</tr> <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">70.15%</td>
<td valign="top" align="left"><bold>76.85%</bold></td>
<td valign="top" align="left">75.59%</td>
<td valign="top" align="left">76.29%</td>
<td valign="top" align="left">74.47%</td>
<td valign="top" align="left">75.59%</td>
</tr></tbody>
</table>
</table-wrap></sec><sec>
<title>5.3 Investigation of the distance radius <italic>R</italic></title>
<p>In TS-MOEA, the distance radius <italic>R</italic> is adopted to calculate the scores of decision variables as depicted in <xref ref-type="disp-formula" rid="E9">Equation (9)</xref>. If the distance between two channels is less than <italic>R</italic>, the score value assigned to these channels will be small, leading to a corresponding small value in the decision variable. Consequently, the correlation coefficient between the aforementioned channels is less likely to become 0 after filtering (<xref ref-type="table" rid="T14">Algorithm 4</xref>). If <italic>R</italic> is set to a small value, the probability of considering two channels as unrelated becomes low. In this case, the algorithm might retain channels that are useless to the specific task.</p>
<p>In this study, the distance radius <italic>R</italic> varies from 0.2 to 2 with increments of 0.2, as the maximum distance between two channels is 2. <xref ref-type="fig" rid="F11">Figure 11</xref> displays the average HV values obtained by TS-MOEA with different <italic>R</italic> across all subjects by 30 independent runs. It is evident from <xref ref-type="fig" rid="F11">Figure 11</xref> that the best performance is achieved when <italic>R</italic> = 1.0. <xref ref-type="table" rid="T10">Table 10</xref> gives the average classification accuracies of different R for all subjects and <italic>R</italic> &#x0003D; 1.0 achieves the best performance for most cases. Therefore, the distance radius <italic>R</italic> is set to 1.0 in this paper.</p>
<fig id="F11" position="float">
<label>Figure 11</label>
<caption><p>Average HV values of different <italic>R</italic>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0011.tif"/>
</fig><table-wrap position="float" id="T10">
<label>Table 10</label>
<caption><p>Average classification accuracies of different R for all subjects.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Number of selected channels</bold></th>
<th valign="top" align="left" colspan="9"><italic><bold>R</bold></italic></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td/>
<td valign="top" align="left"><bold>0.2</bold></td>
<td valign="top" align="left"><bold>0.4</bold></td>
<td valign="top" align="left"><bold>0.6</bold></td>
<td valign="top" align="left"><bold>0.8</bold></td>
<td valign="top" align="left"><bold>1</bold></td>
<td valign="top" align="left"><bold>1.2</bold></td>
<td valign="top" align="left"><bold>1.4</bold></td>
<td valign="top" align="left"><bold>1.6</bold></td>
<td valign="top" align="left"><bold>1.8</bold></td>
</tr> <tr>
<td valign="top" align="left">60</td>
<td valign="top" align="left">91.57%</td>
<td valign="top" align="left">92.55%</td>
<td valign="top" align="left">91.79%</td>
<td valign="top" align="left">92.18%</td>
<td valign="top" align="left"><bold>92.58%</bold></td>
<td valign="top" align="left">91.35%</td>
<td valign="top" align="left">92.07%</td>
<td valign="top" align="left">92.79%</td>
<td valign="top" align="left">92.05%</td>
</tr> <tr>
<td valign="top" align="left">52</td>
<td valign="top" align="left">90.13%</td>
<td valign="top" align="left">90.85%</td>
<td valign="top" align="left">90.86%</td>
<td valign="top" align="left">90.85%</td>
<td valign="top" align="left"><bold>91.82%</bold></td>
<td valign="top" align="left">90.91%</td>
<td valign="top" align="left">91.61%</td>
<td valign="top" align="left">91.40%</td>
<td valign="top" align="left">91.13%</td>
</tr> <tr>
<td valign="top" align="left">42</td>
<td valign="top" align="left">89.57%</td>
<td valign="top" align="left">90.05%</td>
<td valign="top" align="left">89.75%</td>
<td valign="top" align="left">89.94%</td>
<td valign="top" align="left"><bold>90.63%</bold></td>
<td valign="top" align="left">89.37%</td>
<td valign="top" align="left">90.36%</td>
<td valign="top" align="left">90.42%</td>
<td valign="top" align="left">89.15%</td>
</tr> <tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">87.94%</td>
<td valign="top" align="left">88.26%</td>
<td valign="top" align="left"><bold>88.67%</bold></td>
<td valign="top" align="left">87.86%</td>
<td valign="top" align="left">88.50%</td>
<td valign="top" align="left">88.19%</td>
<td valign="top" align="left">87.67%</td>
<td valign="top" align="left">88.17%</td>
<td valign="top" align="left">86.70%</td>
</tr> <tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left"><bold>86.40%</bold></td>
<td valign="top" align="left">85.40%</td>
<td valign="top" align="left">84.95%</td>
<td valign="top" align="left">84.68%</td>
<td valign="top" align="left">86.04%</td>
<td valign="top" align="left">84.63%</td>
<td valign="top" align="left">84.20%</td>
<td valign="top" align="left">84.63%</td>
<td valign="top" align="left">84.90%</td>
</tr> <tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left"><bold>82.52%</bold></td>
<td valign="top" align="left">80.66%</td>
<td valign="top" align="left">82.08%</td>
<td valign="top" align="left">79.98%</td>
<td valign="top" align="left">82.36%</td>
<td valign="top" align="left">80.97%</td>
<td valign="top" align="left">81.17%</td>
<td valign="top" align="left">81.13%</td>
<td valign="top" align="left">79.64%</td>
</tr> <tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">71.80%</td>
<td valign="top" align="left">63.43%</td>
<td valign="top" align="left">73.88%</td>
<td valign="top" align="left">65.57%</td>
<td valign="top" align="left"><bold>74.40%</bold></td>
<td valign="top" align="left">63.45%</td>
<td valign="top" align="left">63.40%</td>
<td valign="top" align="left">63.06%</td>
<td valign="top" align="left">65.93%</td>
</tr></tbody>
</table>
</table-wrap></sec><sec>
<title>5.4 Investigation of the selected channels</title>
<p>This section discusses the channels selected by the proposed TS-MOEA, using the fatigue detection task in Section 4.2 as an example. <xref ref-type="fig" rid="F12">Figure 12</xref> illustrates the scenarios with selected numbers of channels at 52, 42, 32, 22, 12, and 2 for all subjects, and <xref ref-type="table" rid="T3">Table 3</xref> shows the corresponding average classification accuracies for the six cases. As shown in <xref ref-type="fig" rid="F12">Figure 12</xref> and <xref ref-type="table" rid="T3">Table 3</xref>, the average classification accuracy gradually decreases as the number of deleted channels increases. It can be observed that the channels selected from <xref ref-type="fig" rid="F12">Figures 12A</xref>&#x02013;<xref ref-type="fig" rid="F12">D</xref> essentially include the frontal (Fpz, F3, Fz, F6), frontotemporal (FT7, Fc6, FT8), and central (C5, C6, T8) regions. Specific activity patterns in the frontotemporal region may be associated with dreams and cognitive activity during sleep. Activity in the central regions may be associated with motor inhibition and somatosensory information processing during sleep, and activity patterns in these regions may reflect changes in muscle relaxation and sensory information transfer during sleep. From <xref ref-type="fig" rid="F12">Figures 12D</xref>&#x02013;<xref ref-type="fig" rid="F12">F</xref>, the aforementioned ten channels were deleted, and there is a noticeable decline in classification accuracy, which can be seen in <xref ref-type="table" rid="T3">Table 3</xref>. Therefore, the removal of channels from regions closely related to the fatigue detection task will result in a sharp decline in classification accuracy. From this, it can be understood that incorporating prior knowledge of regions of interest (ROIs) related to specific tasks into the lead selection algorithm may be beneficial. For instance, in the context of fatigue detection tasks as discussed in Section 4.2, prioritizing the retention of channels from the frontal, frontotemporal, and central regions can help the channel selection algorithm strike a balance between the number of leads chosen and the classification accuracy.</p>
<fig id="F12" position="float">
<label>Figure 12</label>
<caption><p>The selected channels for all subjects by TS-MOEA, <bold>(A&#x02013;F)</bold> illustrate the scenarios with selected numbers of channels at 52, 42, 32, 22, 12, and 2, respectively (where the gray circles represent channels that have been deleted, and the white circles represent channels that have been selected).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-18-1400077-g0012.tif"/>
</fig></sec></sec>
<sec sec-type="conclusions" id="s6">
<title>6 Conclusions</title>
<p>This paper introduces a two-stage sparse multi-objective evolutionary algorithm (TS-MOEA) to solve channel selection problems within BCIs. In TS-MOEA, a two-stage framework with two different two-objective problem models has been adopted. Specifically, a two-objective problem which is sensitive to channel deletion is used in the early stage of TS-MOEA to prevent the algorithm from stalling. In the late stage of TS-MOEA, a two-objective problem model that can directly indicate the number of deleted channels is utilized. To strike a balance between convergence and population diversity in TS-MOEA, a transition condition has been devised. This condition takes into account both the number of consumed function evaluations and the distribution of the current population to control the transition between the early and late stages of the proposed algorithm. Moreover, due to the sparsity of the correlation matrix of channels, a sparse initialization operator is introduced to generate the initial population. Furthermore, a <italic>Score</italic>-based mutation operator has been integrated to enhance the search efficiency of the early stage in TS-MOEA. The experimental results of TS-MOEA and five other advanced MOEAs have demonstrated the efficiency of the proposed algorithm. However, as shown in <xref ref-type="fig" rid="F8">Figure 8</xref>, <xref ref-type="table" rid="T3">Tables 3</xref>, <xref ref-type="table" rid="T5">5</xref>, TS-MOEA provides a set of Pareto-optimal solutions, each offering a different channel selection scheme. Therefore, TS-MOEA does not directly yield a single optimal electrode selection scheme; in practical applications, the user must make a decision on which channel selection scheme to choose from the Pareto-optimal solution set. Additionally, although TS-MOEA takes into account knowledge relevant to the problem, such as channel positions and the distance matrix between channels, it does not consider the impact of regions of interest (ROI) on the performance of the algorithm.</p>
<p>As shown in Section 3.2, TS-MOEA incorporates the problem-domain knowledge, specifically the locations and distance matrix of channels, to enhance the algorithm&#x00027;s performance. However, the biological connections between brain regions, which could better capture and exploit correlations between different channels, were not considered. Therefore, how to combine the biological connections between brain regions in the design of critical operators to improve the search capabilities of the algorithm is one of the future works of this paper. Furthermore, as the number of commands increases, the number of brain wave patterns (or other physiological signals) that the BCI system needs to distinguish becomes larger, which increases the complexity of the classification task. Therefore, how to maintain or improve classification accuracy when the number of commands increases will be one of the future works of this paper.</p></sec>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The data analyzed in this study is subject to the following licenses/restrictions: the data that support the findings of this study are available on request from the corresponding author upon reasonable request. Requests to access these datasets should be directed to <email>liuty&#x00040;shmtu.edu.cn</email>.</p></sec>
<sec sec-type="ethics-statement" id="s8">
<title>Ethics statement</title>
<p>The studies involving human participants were reviewed and approved by the Medical and Life Science Ethics Committee of Tongji University. Written informed consent to participate in this study was provided by the participants.</p></sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>TL: Conceptualization, Formal analysis, Funding acquisition, Investigation, Methodology, Supervision, Validation, Writing&#x02014;original draft, Writing&#x02014;review &#x00026; editing. YW: Data curation, Methodology, Formal analysis, Software, Visualization, Writing&#x02014;review &#x00026; editing. AY: Conceptualization, Data curation, Methodology, Software, Visualization, Writing&#x02014;original draft, Writing&#x02014;review &#x00026; editing. LC: Data curation, Funding acquisition, Validation, Writing&#x02014;original draft. YC: Data curation, Writing&#x02014;review &#x00026; editing.</p></sec>
</body>
<back>
<sec sec-type="funding-information" id="s10">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was supported by the National Natural Science Foundation of China (grant Nos. 61806122 and 62102242).</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abdullah</surname> <given-names>Faye, I.</given-names></name> <name><surname>Islam</surname> <given-names>M. R.</given-names></name></person-group> (<year>2022</year>). <article-title>EEG channel selection techniques in motor imagery applications: a review and new perspectives</article-title>. <source>Bioengineering</source> <volume>9</volume>:<fpage>726</fpage>. <pub-id pub-id-type="doi">10.3390/bioengineering9120726</pub-id><pub-id pub-id-type="pmid">36550932</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Almanza-Conejo</surname> <given-names>O.</given-names></name> <name><surname>Avina-Cervantes</surname> <given-names>J. G.</given-names></name> <name><surname>Garcia-Perez</surname> <given-names>A.</given-names></name> <name><surname>Ibarra-Manzano</surname> <given-names>M. A.</given-names></name></person-group> (<year>2023</year>). <article-title>A channel selection method to find the role of the amygdala in emotion recognition avoiding conflict learning in EEG signals</article-title>. <source>Eng. Applic. Artif. Intell</source>. <volume>126</volume>:<fpage>106971</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2023.106971</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alotaiby</surname> <given-names>T.</given-names></name> <name><surname>El-Samie</surname> <given-names>F. E. A.</given-names></name> <name><surname>Alshebeili</surname> <given-names>S. A.</given-names></name> <name><surname>Ahmad</surname> <given-names>I.</given-names></name></person-group> (<year>2015</year>). <article-title>A review of channel selection algorithms for EEG signal processing</article-title>. <source>EURASIP J. Adv. Signal Proc</source>. <volume>2015</volume>, <fpage>1</fpage>&#x02013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1186/s13634-015-0251-9</pub-id></citation>
</ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aydore</surname> <given-names>S.</given-names></name> <name><surname>Pantazis</surname> <given-names>D.</given-names></name> <name><surname>Leahy</surname> <given-names>R. M.</given-names></name></person-group> (<year>2013</year>). <article-title>A note on the phase locking value and its properties</article-title>. <source>NeuroImage</source> <volume>74</volume>, <fpage>231</fpage>&#x02013;<lpage>244</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.02.008</pub-id><pub-id pub-id-type="pmid">23435210</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Z.</given-names></name> <name><surname>Duan</surname> <given-names>S.</given-names></name> <name><surname>Peng</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>EEG-based emotion recognition by retargeted semi-supervised regression with robust weights</article-title>. <source>Systems</source> <volume>10</volume>:<fpage>236</fpage>. <pub-id pub-id-type="doi">10.3390/systems10060236</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deb</surname> <given-names>K.</given-names></name> <name><surname>Beyer</surname> <given-names>H.-G.</given-names></name></person-group> (<year>2001</year>). <article-title>Self-adaptive genetic algorithms with simulated binary crossover</article-title>. <source>Evolut. Comput</source>. <volume>9</volume>, <fpage>197</fpage>&#x02013;<lpage>221</lpage>. <pub-id pub-id-type="doi">10.1162/106365601750190406</pub-id><pub-id pub-id-type="pmid">11382356</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deb</surname> <given-names>K.</given-names></name> <name><surname>Pratap</surname> <given-names>A.</given-names></name> <name><surname>Agarwal</surname> <given-names>S.</given-names></name> <name><surname>Meyarivan</surname> <given-names>T.</given-names></name></person-group> (<year>2002</year>). <article-title>A fast and elitist multiobjective genetic algorithm: Nsga-ii</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>6</volume>, <fpage>182</fpage>&#x02013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.1109/4235.996017</pub-id></citation>
</ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deist</surname> <given-names>T. M.</given-names></name> <name><surname>Grewal</surname> <given-names>M.</given-names></name> <name><surname>Dankers</surname> <given-names>F. J. W. M.</given-names></name> <name><surname>Alderliesten</surname> <given-names>T.</given-names></name> <name><surname>Bosman</surname> <given-names>P. A. N.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Multi-objective learning using hv maximization,&#x0201D;</article-title> in <source>Evolutionary Multi-Criterion Optimization</source>, eds. M. Emmerich, A. Deutz, H. Wang, A. V. Kononova, B. Naujoks, K. Li, et al. (Cham: Springer Nature Switzerland), <fpage>103</fpage>&#x02013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-031-27250-9_8</pub-id></citation>
</ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deng</surname> <given-names>W.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>X.</given-names></name> <name><surname>Chen</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>An enhanced fast non-dominated solution sorting genetic algorithm for multi-objective problems</article-title>. <source>Inf. Sci</source>. <volume>585</volume>, <fpage>441</fpage>&#x02013;<lpage>453</lpage>. <pub-id pub-id-type="doi">10.1016/j.ins.2021.11.052</pub-id></citation>
</ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>do Val Lopes</surname> <given-names>C. L.</given-names></name> <name><surname>Martins</surname> <given-names>F. V. C.</given-names></name> <name><surname>Wanner</surname> <given-names>E. F.</given-names></name> <name><surname>Deb</surname> <given-names>K.</given-names></name></person-group> (<year>2022</year>). <article-title>Analyzing dominance move (mip-dom) indicator for multiobjective and many-objective optimization</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>26</volume>, <fpage>476</fpage>&#x02013;<lpage>489</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2021.3096669</pub-id></citation>
</ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ganesh</surname> <given-names>N.</given-names></name> <name><surname>Shankar</surname> <given-names>R.</given-names></name> <name><surname>Kalita</surname> <given-names>K.</given-names></name> <name><surname>Jangir</surname> <given-names>P.</given-names></name> <name><surname>Oliva</surname> <given-names>D.</given-names></name> <name><surname>P&#x000E9;rez-Cisneros</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>A novel decomposition-based multi-objective symbiotic organism search optimization algorithm</article-title>. <source>Mathematics</source> <volume>11</volume>:<fpage>1898</fpage>. <pub-id pub-id-type="doi">10.3390/math11081898</pub-id></citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gong</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name></person-group> (<year>2024</year>). <article-title>A new filter feature selection algorithm for classification task by ensembling pearson correlation coefficient and mutual information</article-title>. <source>Eng. Applic. Artif. Intell</source>. <volume>131</volume>:<fpage>107865</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2024.107865</pub-id></citation>
</ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>L.</given-names></name> <name><surname>Chiong</surname> <given-names>R.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Budhi</surname> <given-names>G. S.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>A multiobjective evolutionary algorithm for achieving energy efficiency in production environments integrated with multiple automated guided vehicles</article-title>. <source>Knowl. Based Syst</source>. <volume>243</volume>:<fpage>108315</fpage>. <pub-id pub-id-type="doi">10.1016/j.knosys.2022.108315</pub-id></citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ishibuchi</surname> <given-names>H.</given-names></name> <name><surname>Imada</surname> <given-names>R.</given-names></name> <name><surname>Masuyama</surname> <given-names>N.</given-names></name> <name><surname>Nojima</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Comparison of hypervolume, igd and igd&#x0002B; from the viewpoint of optimal distributions of solutions,&#x0201D;</article-title> in <source>Evolutionary Multi-Criterion Optimization</source>, eds. K. Deb, E. Goodman, C. A. Coello Coello, K. Klamroth, K. Miettinen, S. Mostaghim, et al. (Cham: Springer International Publishing), <fpage>332</fpage>&#x02013;<lpage>345</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-12598-1_27</pub-id></citation>
</ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koelstra</surname> <given-names>S.</given-names></name> <name><surname>Muhl</surname> <given-names>C.</given-names></name> <name><surname>Soleymani</surname> <given-names>M.</given-names></name> <name><surname>Lee</surname> <given-names>J.-S.</given-names></name> <name><surname>Yazdani</surname> <given-names>A.</given-names></name> <name><surname>Ebrahimi</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Deap: a database for emotion analysis; using physiological signals</article-title>. <source>IEEE Trans. Affect. Comput</source>. <volume>3</volume>, <fpage>18</fpage>&#x02013;<lpage>31</lpage>. <pub-id pub-id-type="doi">10.1109/T-AFFC.2011.15</pub-id></citation>
</ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krishna Rao</surname> <given-names>E.</given-names></name> <name><surname>Yaswanthkumar Reddy</surname> <given-names>N.</given-names></name> <name><surname>Greeshma</surname> <given-names>B.</given-names></name> <name><surname>Vardhan Reddy</surname> <given-names>Y. S. S.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;EEG based smart wheelchair for disabled persons using non-invasive BCI,&#x0201D;</article-title> in <source>2022 International Conference on Computational Intelligence and Sustainable Engineering Solutions (CISES)</source>, 440&#x02013;446. <pub-id pub-id-type="doi">10.1109/CISES54857.2022.9844334</pub-id></citation>
</ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kropp</surname> <given-names>I.</given-names></name> <name><surname>Nejadhashemi</surname> <given-names>A. P.</given-names></name> <name><surname>Deb</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>Improved evolutionary operators for sparse large-scale multiobjective optimization problems</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>28</volume>, <fpage>460</fpage>&#x02013;<lpage>473</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2023.3256183</pub-id></citation>
</ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lavinas</surname> <given-names>Y.</given-names></name> <name><surname>Aranha</surname> <given-names>C.</given-names></name> <name><surname>Sakurai</surname> <given-names>T.</given-names></name> <name><surname>Ladeira</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;Experimental analysis of the tournament size on genetic algorithms,&#x0201D;</article-title> in <source>2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC)</source>, 3647&#x02013;3653. <pub-id pub-id-type="doi">10.1109/SMC.2018.00617</pub-id><pub-id pub-id-type="pmid">34623436</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Stephen</surname> <given-names>E. P.</given-names></name> <name><surname>Prerau</surname> <given-names>M. J.</given-names></name> <name><surname>Purdon</surname> <given-names>P. L.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Sparse multi-task inverse covariance estimation for connectivity analysis in EEG source space,&#x0201D;</article-title> in <source>2019 9th International IEEE/EMBS Conference on Neural Engineering (NER)</source>, 299&#x02013;302. <pub-id pub-id-type="doi">10.1109/NER.2019.8717043</pub-id><pub-id pub-id-type="pmid">31156761</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>H.-L.</given-names></name> <name><surname>Gu</surname> <given-names>F.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name></person-group> (<year>2014</year>). <article-title>Decomposition of a multiobjective optimization problem into a number of simple multiobjective subproblems</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>18</volume>, <fpage>450</fpage>&#x02013;<lpage>455</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2013.2281533</pub-id></citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>R.</given-names></name> <name><surname>Wang</surname> <given-names>R.</given-names></name> <name><surname>Bian</surname> <given-names>R.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Jiao</surname> <given-names>L.</given-names></name></person-group> (<year>2021</year>). <article-title>A decomposition-based evolutionary algorithm with correlative selection mechanism for many-objective optimization</article-title>. <source>Evolut. Comput</source>. <volume>29</volume>, <fpage>269</fpage>&#x02013;<lpage>304</lpage>. <pub-id pub-id-type="doi">10.1162/evco_a_00279</pub-id><pub-id pub-id-type="pmid">33047610</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>T.</given-names></name> <name><surname>Ye</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Domain knowledge-assisted multi-objective evolutionary algorithm for channel selection in brain-computer interface systems</article-title>. <source>Front. Neurosci</source>. <volume>17</volume>:<fpage>1251968</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2023.1251968</pub-id><pub-id pub-id-type="pmid">37746153</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ma</surname> <given-names>X.</given-names></name> <name><surname>Yu</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Qi</surname> <given-names>Y.</given-names></name> <name><surname>Zhu</surname> <given-names>Z.</given-names></name></person-group> (<year>2020</year>). <article-title>A survey of weight vector adjustment methods for decomposition-based multiobjective evolutionary algorithms</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>24</volume>, <fpage>634</fpage>&#x02013;<lpage>649</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2020.2978158</pub-id></citation>
</ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ma</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Tian</surname> <given-names>G.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Zhu</surname> <given-names>Z.</given-names></name></person-group> (<year>2018</year>). <article-title>On tchebycheff decomposition approaches for multiobjective evolutionary optimization</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>22</volume>, <fpage>226</fpage>&#x02013;<lpage>244</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2017.2704118</pub-id><pub-id pub-id-type="pmid">30040669</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maria</surname> <given-names>M. A.</given-names></name> <name><surname>Akhand</surname> <given-names>M. A. H.</given-names></name> <name><surname>Hossain</surname> <given-names>A. B. M. A.</given-names></name> <name><surname>Kamal</surname> <given-names>M. A. S.</given-names></name> <name><surname>Yamada</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>A comparative study on prominent connectivity features for emotion recognition from EEG</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>37809</fpage>&#x02013;<lpage>37831</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3264845</pub-id></citation>
</ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mart&#x000ED;nez-Cagigal</surname> <given-names>V.</given-names></name> <name><surname>Santamar&#x000ED;a-V&#x000ED;zquez</surname> <given-names>E.</given-names></name> <name><surname>Hornero</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Brain &#x0201C;computer interface channel selection optimization using meta-heuristics and evolutionary algorithms</article-title>. <source>Appl. Soft Comput</source>. <volume>115</volume>:<fpage>108176</fpage>. <pub-id pub-id-type="doi">10.1016/j.asoc.2021.108176</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ming</surname> <given-names>F.</given-names></name> <name><surname>Gong</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Gao</surname> <given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>Balancing convergence and diversity in objective and decision spaces for multimodal multi-objective optimization</article-title>. <source>IEEE Trans. Emerg. Topics Comput. Intell</source>. <volume>7</volume>, <fpage>474</fpage>&#x02013;<lpage>486</lpage>. <pub-id pub-id-type="doi">10.1109/TETCI.2022.3221940</pub-id><pub-id pub-id-type="pmid">38660209</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moon</surname> <given-names>S.-E.</given-names></name> <name><surname>Chen</surname> <given-names>C.-J.</given-names></name> <name><surname>Hsieh</surname> <given-names>C.-J.</given-names></name> <name><surname>Wang</surname> <given-names>J.-L.</given-names></name> <name><surname>Lee</surname> <given-names>J.-S.</given-names></name></person-group> (<year>2020</year>). <article-title>Emotional EEG classification using connectivity features and convolutional neural networks</article-title>. <source>Neural Netw</source>. <volume>132</volume>, <fpage>96</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2020.08.009</pub-id><pub-id pub-id-type="pmid">32861918</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pearson</surname> <given-names>K.</given-names></name></person-group> (<year>1895</year>). <article-title>Vii. Note on regression and inheritance in the case of two parents</article-title>. <source>Proc. R. Soc. London</source> <volume>58</volume>, <fpage>240</fpage>&#x02013;<lpage>242</lpage>. <pub-id pub-id-type="doi">10.1098/rspl.1895.0041</pub-id></citation>
</ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pradhan</surname> <given-names>D.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Ali</surname> <given-names>S.</given-names></name> <name><surname>Yue</surname> <given-names>T.</given-names></name> <name><surname>Liaaen</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Cbga-es&#x0002B;: a cluster-based genetic algorithm with non-dominated elitist selection for supporting multi-objective test optimization</article-title>. <source>IEEE Trans. Softw. Eng</source>. <volume>47</volume>, <fpage>86</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1109/TSE.2018.2882176</pub-id></citation>
</ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Premkumar</surname> <given-names>M.</given-names></name> <name><surname>Jangir</surname> <given-names>P.</given-names></name> <name><surname>Sowmya</surname> <given-names>R.</given-names></name> <name><surname>Alhelou</surname> <given-names>H. H.</given-names></name> <name><surname>Heidari</surname> <given-names>A. A.</given-names></name> <name><surname>Chen</surname> <given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>Mosma: multi-objective slime mould algorithm based on elitist non-dominated sorting</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>3229</fpage>&#x02013;<lpage>3248</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3047936</pub-id></citation>
</ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qu</surname> <given-names>R.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>A.</given-names></name> <name><surname>Xu</surname> <given-names>G.</given-names></name></person-group> (<year>2023</year>). <article-title>Interictal electrophysiological source imaging based on realistic epilepsy head model in presurgical evaluation: a prospective study</article-title>. <source>Chin. J. Electr. Eng</source>. <volume>9</volume>, <fpage>61</fpage>&#x02013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.23919/CJEE.2023.000012</pub-id></citation>
</ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reznik</surname> <given-names>S. J.</given-names></name> <name><surname>Allen</surname> <given-names>J. J. B.</given-names></name></person-group> (<year>2018</year>). <article-title>Frontal asymmetry as a mediator and moderator of emotion: an updated review</article-title>. <source>Psychophysiology</source> <volume>55</volume>:<fpage>e12965</fpage>. <pub-id pub-id-type="doi">10.1111/psyp.12965</pub-id><pub-id pub-id-type="pmid">28776710</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rocha-Herrera</surname> <given-names>C. A.</given-names></name> <name><surname>D&#x000ED;az-Manr&#x000ED;quez</surname> <given-names>A.</given-names></name> <name><surname>Barron-Zambrano</surname> <given-names>J. H.</given-names></name> <name><surname>Elizondo-Leal</surname> <given-names>J. C.</given-names></name> <name><surname>Saldivar-Alonso</surname> <given-names>V. P.</given-names></name> <name><surname>Mart&#x000ED;nez-Angulo</surname> <given-names>J. R.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>EEG feature extraction using evolutionary algorithms for brain-computer interface development</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2022</volume>:<fpage>7571208</fpage>. <pub-id pub-id-type="doi">10.1155/2022/7571208</pub-id><pub-id pub-id-type="pmid">35814562</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scannella</surname> <given-names>S.</given-names></name> <name><surname>Pariente</surname> <given-names>J.</given-names></name> <name><surname>De Boissezon</surname> <given-names>X.</given-names></name> <name><surname>Castel-Lacanal</surname> <given-names>E.</given-names></name> <name><surname>Chauveau</surname> <given-names>N.</given-names></name> <name><surname>Causse</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>N270 sensitivity to conflict strength and working memory: a combined ERP and sloreta study</article-title>. <source>Behav. Brain Res. SreeTestContent</source> <volume>297</volume>, <fpage>231</fpage>&#x02013;<lpage>240</lpage>. <pub-id pub-id-type="doi">10.1016/j.bbr.2015.10.014</pub-id><pub-id pub-id-type="pmid">26477377</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schreiber</surname> <given-names>T.</given-names></name></person-group> (<year>2000</year>). <article-title>Measuring information transfer</article-title>. <source>Phys. Rev. Lett</source>. <volume>85</volume>:<fpage>461</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.85.461</pub-id><pub-id pub-id-type="pmid">10991308</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shang</surname> <given-names>K.</given-names></name> <name><surname>Ishibuchi</surname> <given-names>H.</given-names></name></person-group> (<year>2020</year>). <article-title>A new hypervolume-based evolutionary algorithm for many-objective optimization</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>24</volume>, <fpage>839</fpage>&#x02013;<lpage>852</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2020.2964705</pub-id><pub-id pub-id-type="pmid">24654679</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shang</surname> <given-names>K.</given-names></name> <name><surname>Ishibuchi</surname> <given-names>H.</given-names></name> <name><surname>Ni</surname> <given-names>X.</given-names></name></person-group> (<year>2020</year>). <article-title>R2-based hypervolume contribution approximation</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>24</volume>, <fpage>185</fpage>&#x02013;<lpage>192</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2019.2909271</pub-id></citation>
</ref>
<ref id="B39">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sibilano</surname> <given-names>E.</given-names></name> <name><surname>Suglia</surname> <given-names>V.</given-names></name> <name><surname>Brunetti</surname> <given-names>A.</given-names></name> <name><surname>Buongiorno</surname> <given-names>D.</given-names></name> <name><surname>Caporusso</surname> <given-names>N.</given-names></name> <name><surname>Guger</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2024</year>). <source>Brain-Computer Interfaces</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer US</publisher-name>, <fpage>203</fpage>&#x02013;<lpage>240</lpage>. <pub-id pub-id-type="doi">10.1007/978-1-0716-3545-2_10</pub-id></citation>
</ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Feng</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Sun</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>A fast clustering based evolutionary algorithm for super-large-scale sparse multi-objective optimization</article-title>. <source>IEEE/CAA J. Autom. Sinica</source> <volume>10</volume>, <fpage>1048</fpage>&#x02013;<lpage>1063</lpage>. <pub-id pub-id-type="doi">10.1109/JAS.2022.105437</pub-id></citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Lu</surname> <given-names>C.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Cheng</surname> <given-names>F.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>A pattern mining-based evolutionary algorithm for large-scale sparse multiobjective optimization problems</article-title>. <source>IEEE Trans. Cyber</source>. <volume>52</volume>, <fpage>6784</fpage>&#x02013;<lpage>6797</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2020.3041325</pub-id><pub-id pub-id-type="pmid">33378271</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Lu</surname> <given-names>C.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Tan</surname> <given-names>K. C.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name></person-group> (<year>2021a</year>). <article-title>Solving large-scale multiobjective optimization problems with sparse optimal solutions via unsupervised neural networks</article-title>. <source>IEEE Trans. Cybern</source>. <volume>51</volume>, <fpage>3115</fpage>&#x02013;<lpage>3128</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2020.2979930</pub-id><pub-id pub-id-type="pmid">32217494</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Si</surname> <given-names>L.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Cheng</surname> <given-names>R.</given-names></name> <name><surname>He</surname> <given-names>C.</given-names></name> <name><surname>Tan</surname> <given-names>K. C.</given-names></name> <etal/></person-group>. (<year>2021b</year>). <article-title>Evolutionary large-scale multi-objective optimization: a survey</article-title>. <source>ACM Comput. Surv</source>. <volume>54</volume>, <fpage>1</fpage>&#x02013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1145/3470971</pub-id></citation>
</ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name></person-group> (<year>2020</year>). <article-title>An evolutionary algorithm for large-scale sparse multiobjective optimization problems</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>24</volume>, <fpage>380</fpage>&#x02013;<lpage>393</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2019.2918140</pub-id></citation>
</ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van den Broek</surname> <given-names>S.</given-names></name> <name><surname>Reinders</surname> <given-names>F.</given-names></name> <name><surname>Donderwinkel</surname> <given-names>M.</given-names></name> <name><surname>Peters</surname> <given-names>M.</given-names></name></person-group> (<year>1998</year>). <article-title>Volume conduction effects in EEG and MEG</article-title>. <source>Electroencephalogr. Clin. Neurophysiol</source>. <volume>106</volume>, <fpage>522</fpage>&#x02013;<lpage>534</lpage>. <pub-id pub-id-type="doi">10.1016/S0013-4694(97)00147-8</pub-id><pub-id pub-id-type="pmid">9741752</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>K.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>An enhanced competitive swarm optimizer with strongly convex sparse operator for large-scale multiobjective optimization</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>26</volume>, <fpage>859</fpage>&#x02013;<lpage>871</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2021.3111209</pub-id></citation>
</ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>While</surname> <given-names>L.</given-names></name> <name><surname>Hingston</surname> <given-names>P.</given-names></name> <name><surname>Barone</surname> <given-names>L.</given-names></name> <name><surname>Huband</surname> <given-names>S.</given-names></name></person-group> (<year>2006</year>). <article-title>A faster algorithm for calculating hypervolume</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>10</volume>, <fpage>29</fpage>&#x02013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2005.851275</pub-id></citation>
</ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yaman</surname> <given-names>A.</given-names></name> <name><surname>Iacca</surname> <given-names>G.</given-names></name> <name><surname>Mocanu</surname> <given-names>D. C.</given-names></name> <name><surname>Coler</surname> <given-names>M.</given-names></name> <name><surname>Fletcher</surname> <given-names>G.</given-names></name> <name><surname>Pechenizkiy</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Evolving plasticity for autonomous learning under changing environmental conditions</article-title>. <source>Evolut. Comput</source>. <volume>29</volume>, <fpage>391</fpage>&#x02013;<lpage>414</lpage>. <pub-id pub-id-type="doi">10.1162/evco_a_00286</pub-id><pub-id pub-id-type="pmid">34467993</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>You</surname> <given-names>Q.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name> <name><surname>Pan</surname> <given-names>F.</given-names></name> <name><surname>Palade</surname> <given-names>V.</given-names></name> <name><surname>Ahmad</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>DMO-QPSO: a multi-objective quantum-behaved particle swarm optimization algorithm based on decomposition with diversity control</article-title>. <source>Mathematics</source> <volume>9</volume>:<fpage>1959</fpage>. <pub-id pub-id-type="doi">10.3390/math9161959</pub-id></citation>
</ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name></person-group> (<year>2007</year>). <article-title>MOEA/D: a multiobjective evolutionary algorithm based on decomposition</article-title>. <source>IEEE Trans. Evolut. Comput</source>. <volume>11</volume>, <fpage>712</fpage>&#x02013;<lpage>731</lpage>. <pub-id pub-id-type="doi">10.1109/TEVC.2007.892759</pub-id></citation>
</ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>Improved sparseea for sparse large-scale multi-objective optimization problems</article-title>. <source>Complex Intell. Syst</source>. <volume>9</volume>, <fpage>1127</fpage>&#x02013;<lpage>1142</lpage>. <pub-id pub-id-type="doi">10.1007/s40747-021-00553-0</pub-id></citation>
</ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhassuzak</surname> <given-names>M.</given-names></name> <name><surname>Akhmet</surname> <given-names>M.</given-names></name> <name><surname>Amirgaliyev</surname> <given-names>Y.</given-names></name> <name><surname>Buribayev</surname> <given-names>Z.</given-names></name></person-group> (<year>2024</year>). <article-title>Application of genetic algorithms for periodicity recognition and finite sequences sorting</article-title>. <source>Algorithms</source> <volume>17</volume>:<fpage>101</fpage>. <pub-id pub-id-type="doi">10.3390/a17030101</pub-id></citation>
</ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>A.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Sendhoff</surname> <given-names>B.</given-names></name> <name><surname>Tsang</surname> <given-names>E.</given-names></name></person-group> (<year>2006</year>). <article-title>&#x0201C;Combining model-based and genetics-based offspring generation for multi-objective optimization using a convergence criterion,&#x0201D;</article-title> in <source>2006 IEEE International Conference on Evolutionary Computation</source>, <fpage>892</fpage>&#x02013;<lpage>899</lpage>.</citation>
</ref>
<ref id="B54">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zille</surname> <given-names>H.</given-names></name> <name><surname>Ishibuchi</surname> <given-names>H.</given-names></name> <name><surname>Mostaghim</surname> <given-names>S.</given-names></name> <name><surname>Nojima</surname> <given-names>Y.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Mutation operators based on variable grouping for multi-objective large-scale optimization,&#x0201D;</article-title> in <source>2016 IEEE Symposium Series on Computational Intelligence (SSCI)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1109/SSCI.2016.7850214</pub-id></citation>
</ref>
</ref-list>
</back>
</article>